From f9b70ea77a3b3059d9c784e2fe4b90000e75d3d4 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sat, 22 Jun 2024 09:30:16 +0200 Subject: [PATCH 01/70] Add JSONCompactWithProgressRowOutputFormat --- src/Formats/registerFormats.cpp | 2 + ...JSONCompactWithProgressRowOutputFormat.cpp | 125 ++++++++++++++++++ .../JSONCompactWithProgressRowOutputFormat.h | 53 ++++++++ 3 files changed, 180 insertions(+) create mode 100644 src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp create mode 100644 src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 57ca1bb49c8..770b747fafd 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -95,6 +95,7 @@ void registerOutputFormatMarkdown(FormatFactory & factory); void registerOutputFormatPostgreSQLWire(FormatFactory & factory); void registerOutputFormatPrometheus(FormatFactory & factory); void registerOutputFormatSQLInsert(FormatFactory & factory); +void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory); /// Input only formats. @@ -242,6 +243,7 @@ void registerFormats() registerOutputFormatCapnProto(factory); registerOutputFormatPrometheus(factory); registerOutputFormatSQLInsert(factory); + registerOutputFormatJSONCompactWithProgress(factory); registerInputFormatRegexp(factory); registerInputFormatJSONAsString(factory); diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp new file mode 100644 index 00000000000..78cf5b9a003 --- /dev/null +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -0,0 +1,125 @@ +#include +#include +#include + +#include + +#include + + + +namespace DB +{ + +JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat( + WriteBuffer & out_, + const Block & header, + const FormatSettings & settings_, + bool yield_strings_) + : JSONRowOutputFormat(out_, header, settings_, yield_strings_) +{ +} + +void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) +{ + JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr); + ++field_number; + LOG_DEBUG(getLogger("JSONCompactWithProgressRowOutputFormat"), "Field number: {}", field_number); +} + +void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter() +{ + JSONUtils::writeFieldCompactDelimiter(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter() +{ + if (has_progress) + writeProgress(); + JSONUtils::writeCompactArrayStart(*ostr, 2); +} + +void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter() +{ + JSONUtils::writeCompactArrayEnd(*ostr); + field_number = 0; + ++row_count; +} + +void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals() +{ + JSONUtils::writeFieldDelimiter(*ostr, 2); + JSONUtils::writeCompactArrayStart(*ostr, 1, "totals"); +} + +void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num) +{ + JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeAfterTotals() +{ + JSONUtils::writeCompactArrayEnd(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num) +{ + JSONUtils::writeCompactArrayStart(*ostr, 2, title); + JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr); + JSONUtils::writeCompactArrayEnd(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value) +{ + LOG_DEBUG(getLogger("JSONCompactWithProgressRowOutputFormat"), "onProgress: {}", value.read_rows); + + progress.incrementPiecewiseAtomically(value); + String progress_line; + WriteBufferFromString buf(progress_line); + writeCString("{\"progress\":", buf); + progress.writeJSON(buf); + writeCString("}\n", buf); + buf.finalize(); + std::lock_guard lock(progress_lines_mutex); + progress_lines.emplace_back(std::move(progress_line)); + has_progress = true; +} + + +void JSONCompactWithProgressRowOutputFormat::flush() +{ + if (has_progress) + writeProgress(); + JSONRowOutputFormat::flush(); +} + +void JSONCompactWithProgressRowOutputFormat::writeSuffix() +{ + if (has_progress) + writeProgress(); + JSONRowOutputFormat::writeSuffix(); +} + +void JSONCompactWithProgressRowOutputFormat::writeProgress() +{ + std::lock_guard lock(progress_lines_mutex); + for (const auto & progress_line : progress_lines) + writeString(progress_line, *ostr); + progress_lines.clear(); + has_progress = false; +} + +void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) +{ + factory.registerOutputFormat("JSONCompactWithProgress", []( + WriteBuffer & buf, + const Block & sample, + const FormatSettings & format_settings) + { + return std::make_shared(buf, sample, format_settings, false); + }); + + factory.markOutputFormatSupportsParallelFormatting("JSONCompactWithProgress"); +} + +} diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h new file mode 100644 index 00000000000..4bc10d41f19 --- /dev/null +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ + +struct FormatSettings; + +/** The stream for outputting data in the JSONCompact- formats. + */ +class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat +{ +public: + JSONCompactWithProgressRowOutputFormat( + WriteBuffer & out_, + const Block & header, + const FormatSettings & settings_, + bool yield_strings_); + + String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; } + + void onProgress(const Progress & value) override; + void flush() override; + +private: + void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; + void writeFieldDelimiter() override; + void writeRowStartDelimiter() override; + void writeRowEndDelimiter() override; + bool supportTotals() const override { return true; } + bool supportExtremes() const override { return true; } + void writeBeforeTotals() override; + void writeAfterTotals() override; + void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override; + void writeTotals(const Columns & columns, size_t row_num) override; + + void writeProgress(); + void writeSuffix() override; + + Progress progress; + std::vector progress_lines; + std::mutex progress_lines_mutex; + /// To not lock mutex and check progress_lines every row, + /// we will use atomic flag that progress_lines is not empty. + std::atomic_bool has_progress = false; +}; + +} From 20860ed8b04f6588de12b4e59baf932f02f90d27 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 23 Jun 2024 14:34:54 +0200 Subject: [PATCH 02/70] Enable onProgress calls to JSONCompactWithProgressRowOutputFormat --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 78cf5b9a003..2cc7d99f8e7 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -118,8 +118,6 @@ void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) { return std::make_shared(buf, sample, format_settings, false); }); - - factory.markOutputFormatSupportsParallelFormatting("JSONCompactWithProgress"); } } From e1c60c4e40a03db459e71dd2b54b082d5205654d Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 24 Jun 2024 10:11:46 +0200 Subject: [PATCH 03/70] Remove debug output --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 2cc7d99f8e7..39532fb76fb 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -4,9 +4,6 @@ #include -#include - - namespace DB { @@ -24,7 +21,6 @@ void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, { JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr); ++field_number; - LOG_DEBUG(getLogger("JSONCompactWithProgressRowOutputFormat"), "Field number: {}", field_number); } void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter() @@ -71,8 +67,6 @@ void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * t void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value) { - LOG_DEBUG(getLogger("JSONCompactWithProgressRowOutputFormat"), "onProgress: {}", value.read_rows); - progress.incrementPiecewiseAtomically(value); String progress_line; WriteBufferFromString buf(progress_line); From 4e350ff44a4dbdecb1c044fb10df131f6c31ae3d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 4 Jul 2024 18:48:41 +0200 Subject: [PATCH 04/70] don't allow explicit uuids and rmt args in replicated db --- src/Core/Settings.h | 3 +- src/Databases/DatabaseReplicated.cpp | 12 ----- src/Interpreters/InterpreterCreateQuery.cpp | 20 ++++++++ ...tractZooKeeperPathFromReplicatedTableDef.h | 2 +- .../MergeTree/registerStorageMergeTree.cpp | 46 ++++++++++++++----- tests/config/users.d/database_replicated.xml | 1 + .../configs/settings.xml | 1 + .../configs/settings2.xml | 1 + .../02858_explicit_uuid_and_zk_path.reference | 10 ++++ .../02858_explicit_uuid_and_zk_path.sh | 41 +++++++++++++++++ 10 files changed, 112 insertions(+), 25 deletions(-) create mode 100644 tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference create mode 100755 tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d84e5b149f6..c162c75b6a0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -731,7 +731,8 @@ class IColumn; M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \ M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \ M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \ - M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \ + M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \ + M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \ M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \ M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \ M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result, one of: 'none', 'throw', 'null_status_on_timeout', 'never_throw', 'none_only_active', 'throw_only_active', 'null_status_on_timeout_only_active'", 0) \ diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 4ca9afc49eb..507d1a9affc 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -774,18 +774,6 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ bool maybe_replica_macros = info.expanded_other; bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros"); - if (!enable_functional_tests_helper) - { - if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments) - LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments"); - else - throw Exception(ErrorCodes::INCORRECT_QUERY, - "It's not allowed to specify explicit zookeeper_path and replica_name " - "for ReplicatedMergeTree arguments in Replicated database. If you really want to " - "specify them explicitly, enable setting " - "database_replicated_allow_replicated_engine_arguments."); - } - if (maybe_shard_macros && maybe_replica_macros) return; diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a990eb651ce..28827a1fe37 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1055,6 +1055,26 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool from_path = create.attach_from_path.has_value(); bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal) + { + if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, " + "see database_replicated_allow_explicit_uuid"); + } + else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) + { + LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases"); + } + else // if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2) + { + UUID old_uuid = create.uuid; + create.generateRandomUUID(/*always_generate_new_uuid*/ true); + LOG_INFO(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) " + "to make sure it's unique", old_uuid, create.uuid); + } + } + if (is_replicated_database_internal && !internal) { if (create.uuid == UUIDHelpers::Nil) diff --git a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h index 5ef5e1db62e..212dc048868 100644 --- a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h +++ b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h @@ -14,6 +14,6 @@ using ContextPtr = std::shared_ptr; /// Extracts a zookeeper path from a specified CREATE TABLE query. /// The function checks the table engine and if it is Replicated*MergeTree then it takes the first argument and expands macros in it. /// Returns std::nullopt if the specified CREATE query doesn't describe a Replicated table or its arguments can't be evaluated. -std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & context); +std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & local_context); } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index d234103e52b..66f7b2114bf 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -185,7 +186,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( const String & engine_name, ASTs & engine_args, LoadingStrictnessLevel mode, - const ContextPtr & context, + const ContextPtr & local_context, String & zookeeper_path, String & replica_name, RenamingRestrictions & renaming_restrictions) @@ -202,11 +203,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( { /// Allow expressions in engine arguments. /// In new syntax argument can be literal or identifier or array/tuple of identifiers. - evaluateEngineArgs(engine_args, context); + evaluateEngineArgs(engine_args, local_context); } - bool is_on_cluster = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; - bool is_replicated_database = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && + bool is_on_cluster = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated"; /// Allow implicit {uuid} macros only for zookeeper_path in ON CLUSTER queries @@ -226,10 +227,10 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( /// We did unfold it in previous versions to make moving table from Atomic to Ordinary database work correctly, /// but now it's not allowed (and it was the only reason to unfold {uuid} macro). info.table_id.uuid = UUIDHelpers::Nil; - zookeeper_path = context->getMacros()->expand(zookeeper_path, info); + zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info); info.level = 0; - replica_name = context->getMacros()->expand(replica_name, info); + replica_name = local_context->getMacros()->expand(replica_name, info); } ast_zk_path->value = zookeeper_path; @@ -247,11 +248,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( } if (!allow_uuid_macro) info.table_id.uuid = UUIDHelpers::Nil; - zookeeper_path = context->getMacros()->expand(zookeeper_path, info); + zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info); info.level = 0; info.table_id.uuid = UUIDHelpers::Nil; - replica_name = context->getMacros()->expand(replica_name, info); + replica_name = local_context->getMacros()->expand(replica_name, info); /// We do not allow renaming table with these macros in metadata, because zookeeper_path will be broken after RENAME TABLE. /// NOTE: it may happen if table was created by older version of ClickHouse (< 20.10) and macros was not unfolded on table creation @@ -268,9 +269,24 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( bool has_arguments = (arg_num + 2 <= arg_cnt); bool has_valid_arguments = has_arguments && engine_args[arg_num]->as() && engine_args[arg_num + 1]->as(); + const auto & server_settings = local_context->getServerSettings(); if (has_valid_arguments) { + if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "It's not allowed to specify explicit zookeeper_path and replica_name " + "for ReplicatedMergeTree arguments in Replicated database. If you really want to " + "specify them explicitly, enable setting " + "database_replicated_allow_replicated_engine_arguments."); + } + else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1) + { + LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify " + "zookeeper_path and replica_name in ReplicatedMergeTree arguments"); + } + /// Get path and name from engine arguments auto * ast_zk_path = engine_args[arg_num]->as(); if (ast_zk_path && ast_zk_path->value.getType() == Field::Types::String) @@ -284,6 +300,15 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message); + + if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2) + { + LOG_INFO(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) " + "with default arguments", zookeeper_path, replica_name); + engine_args[arg_num]->as()->value = zookeeper_path = server_settings.default_replica_path; + engine_args[arg_num + 1]->as()->value = replica_name = server_settings.default_replica_name; + } + expand_macro(ast_zk_path, ast_replica_name); } else if (is_extended_storage_def @@ -293,7 +318,6 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( { /// Try use default values if arguments are not specified. /// Note: {uuid} macro works for ON CLUSTER queries when database engine is Atomic. - const auto & server_settings = context->getServerSettings(); zookeeper_path = server_settings.default_replica_path; /// TODO maybe use hostname if {replica} is not defined? replica_name = server_settings.default_replica_name; @@ -318,7 +342,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( } /// Extracts a zookeeper path from a specified CREATE TABLE query. -std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & context) +std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & local_context) { if (!query.storage || !query.storage->engine) return {}; @@ -342,7 +366,7 @@ std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreate try { - extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, context, + extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, local_context, zookeeper_path, replica_name, renaming_restrictions); } catch (Exception & e) diff --git a/tests/config/users.d/database_replicated.xml b/tests/config/users.d/database_replicated.xml index c049c3559fc..1c2cf2ac22b 100644 --- a/tests/config/users.d/database_replicated.xml +++ b/tests/config/users.d/database_replicated.xml @@ -6,6 +6,7 @@ 120 1 1 + 3 diff --git a/tests/integration/test_replicated_database/configs/settings.xml b/tests/integration/test_replicated_database/configs/settings.xml index c637fe8eead..4b1122b1b5f 100644 --- a/tests/integration/test_replicated_database/configs/settings.xml +++ b/tests/integration/test_replicated_database/configs/settings.xml @@ -5,6 +5,7 @@ 1 0 0 + 3 diff --git a/tests/integration/test_replicated_database/configs/settings2.xml b/tests/integration/test_replicated_database/configs/settings2.xml index dad5740a8ae..9a3038871e1 100644 --- a/tests/integration/test_replicated_database/configs/settings2.xml +++ b/tests/integration/test_replicated_database/configs/settings2.xml @@ -5,6 +5,7 @@ 1 0 0 + 3 0 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference new file mode 100644 index 00000000000..1d3a90daccd --- /dev/null +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -0,0 +1,10 @@ +2 +3 +m1 +m2 +rmt1 +rmt2 +02858000-1000-4000-8000-0000000000 +0 +CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/default/auto_{shard}\', \'1auto_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE rdb_default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh new file mode 100755 index 00000000000..f2c2f47cd0d --- /dev/null +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +db=$CLICKHOUSE_DATABASE +if [[ $($CLICKHOUSE_CLIENT -q "SELECT engine = 'Replicated' FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'") != 1 ]]; then + $CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', '1', '1')" + db="rdb_$CLICKHOUSE_DATABASE" +fi + +$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=0 -q "CREATE TABLE $db.m0 +UUID '02858000-1000-4000-8000-000000000000' (n int) ENGINE=Memory" 2>&1| grep -Fac "database_replicated_allow_explicit_uuid" + +$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1 +UUID '02858000-1000-4000-8000-0000000000$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory" + +$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2 +UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" + + +$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" + +$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" + +$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" + + +$CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" + +$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 2) FROM system.tables WHERE database='$db' and name='m1'" +$CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'" + +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" + +$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS rdb_$CLICKHOUSE_DATABASE" From aa9591419d3f0cb1575ddd2cf7b82f2cfc397e2b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 4 Jul 2024 19:15:06 +0200 Subject: [PATCH 05/70] fix --- .../0_stateless/02858_explicit_uuid_and_zk_path.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh index f2c2f47cd0d..1b5d1a06a9b 100755 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) db=$CLICKHOUSE_DATABASE if [[ $($CLICKHOUSE_CLIENT -q "SELECT engine = 'Replicated' FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'") != 1 ]]; then - $CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', '1', '1')" + $CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rdb', '1', '1')" db="rdb_$CLICKHOUSE_DATABASE" fi @@ -21,13 +21,13 @@ UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" $CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" $CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" $CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_DATABASE', '1') ORDER BY n" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" $CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" From 613636bb1b324acf7500c8a760abf5aea764418b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 4 Jul 2024 20:49:10 +0200 Subject: [PATCH 06/70] fix --- src/Core/SettingsChangesHistory.cpp | 2 ++ src/Databases/DatabaseReplicated.cpp | 3 ++- src/Interpreters/InterpreterCreateQuery.cpp | 6 +++--- .../MergeTree/registerStorageMergeTree.cpp | 2 +- .../01148_zookeeper_path_macros_unfolding.sql | 2 ++ ...atabase_replicated_no_arguments_for_rmt.sh | 2 +- .../02858_explicit_uuid_and_zk_path.reference | 2 +- .../02858_explicit_uuid_and_zk_path.sh | 21 +++++++++++-------- 8 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 6af6b4b15aa..6c730940c87 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -63,6 +63,8 @@ static std::initializer_listgetSettingsRef().database_replicated_allow_replicated_engine_arguments == 0) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. " "If you really want to specify it explicitly, then you should use some macros " "to distinguish different shards and replicas"); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index c32a32be5cc..2862a907bec 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1062,15 +1062,15 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, " "see database_replicated_allow_explicit_uuid"); } - else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) + else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 1) { LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases"); } - else // if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2) + else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2) { UUID old_uuid = create.uuid; create.generateRandomUUID(/*always_generate_new_uuid*/ true); - LOG_INFO(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) " + LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) " "to make sure it's unique", old_uuid, create.uuid); } } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 66f7b2114bf..d27c786d422 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -303,7 +303,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2) { - LOG_INFO(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) " + LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) " "with default arguments", zookeeper_path, replica_name); engine_args[arg_num]->as()->value = zookeeper_path = server_settings.default_replica_path; engine_args[arg_num + 1]->as()->value = replica_name = server_settings.default_replica_name; diff --git a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql index a585ef1c324..c689542e4c3 100644 --- a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql +++ b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql @@ -7,6 +7,8 @@ DROP TABLE IF EXISTS rmt1; DROP TABLE IF EXISTS rmt2; DROP TABLE IF EXISTS rmt3; +SET database_replicated_allow_replicated_engine_arguments=1; + CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01148/{shard}/{database}/{table}', '{replica}') ORDER BY n; SHOW CREATE TABLE rmt; RENAME TABLE rmt TO rmt1; diff --git a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh index a0f228e6af4..7b600ade9a0 100755 --- a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh +++ b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 36 }" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference index 1d3a90daccd..1241d9714f9 100644 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -6,5 +6,5 @@ rmt1 rmt2 02858000-1000-4000-8000-0000000000 0 -CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/default/auto_{shard}\', \'1auto_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/auto_{shard}\', \'1auto_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 CREATE TABLE rdb_default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh index 1b5d1a06a9b..b968c02a07a 100755 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -1,33 +1,36 @@ #!/usr/bin/env bash +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh + db=$CLICKHOUSE_DATABASE if [[ $($CLICKHOUSE_CLIENT -q "SELECT engine = 'Replicated' FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'") != 1 ]]; then $CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rdb', '1', '1')" db="rdb_$CLICKHOUSE_DATABASE" fi -$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=0 -q "CREATE TABLE $db.m0 +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=0 -q "CREATE TABLE $db.m0 UUID '02858000-1000-4000-8000-000000000000' (n int) ENGINE=Memory" 2>&1| grep -Fac "database_replicated_allow_explicit_uuid" -$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1 +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1 UUID '02858000-1000-4000-8000-0000000000$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory" -$CLICKHOUSE_CLIENT --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2 +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2 UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" -$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" -$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" -$CLICKHOUSE_CLIENT --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') ORDER BY n" +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" $CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" From 0e6fd2251e9048bbdb8535c3af0971fb0c670350 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 5 Jul 2024 17:15:04 +0200 Subject: [PATCH 07/70] fix --- .../0_stateless/02858_explicit_uuid_and_zk_path.reference | 2 +- .../queries/0_stateless/02858_explicit_uuid_and_zk_path.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference index 1241d9714f9..cb6d0135adf 100644 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -6,5 +6,5 @@ rmt1 rmt2 02858000-1000-4000-8000-0000000000 0 -CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/auto_{shard}\', \'1auto_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 CREATE TABLE rdb_default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh index b968c02a07a..a959446c822 100755 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -24,13 +24,13 @@ UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) -ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') ORDER BY n" +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" $CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" From e7105a3faa50ba012e6cff1e9e26e62fce6d75ba Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 5 Jul 2024 20:51:30 +0200 Subject: [PATCH 08/70] fix --- src/Backups/RestorerFromBackup.cpp | 6 +++++- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- .../0_stateless/02858_explicit_uuid_and_zk_path.reference | 2 +- .../queries/0_stateless/02858_explicit_uuid_and_zk_path.sh | 4 ++-- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 454a0468e9f..63ca272db0b 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -903,11 +903,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name) table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database); DatabasePtr database = table_info.database; + auto query_context = Context::createCopy(context); + query_context->setSetting("database_replicated_allow_explicit_uuid", 3); + query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3); + /// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some /// database-specific things). database->createTableRestoredFromBackup( create_table_query, - context, + query_context, restore_coordination, std::chrono::duration_cast(create_table_timeout).count()); } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 2862a907bec..89f503f5fb1 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1055,7 +1055,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool from_path = create.attach_from_path.has_value(); bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; - if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal) + if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !create.attach) { if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) { diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference index cb6d0135adf..6444e10bb48 100644 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -4,7 +4,7 @@ m1 m2 rmt1 rmt2 -02858000-1000-4000-8000-0000000000 +02858000-1000-4000-8000-000000000 0 CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 CREATE TABLE rdb_default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh index a959446c822..b011aed613a 100755 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -17,7 +17,7 @@ $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allo UUID '02858000-1000-4000-8000-000000000000' (n int) ENGINE=Memory" 2>&1| grep -Fac "database_replicated_allow_explicit_uuid" $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1 -UUID '02858000-1000-4000-8000-0000000000$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory" +UUID '02858000-1000-4000-8000-000000000$(($RANDOM % 10))$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory" $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2 UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" @@ -35,7 +35,7 @@ ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}' $CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" -$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 2) FROM system.tables WHERE database='$db' and name='m1'" +$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 3) FROM system.tables WHERE database='$db' and name='m1'" $CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'" $CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" From 88736a0d74b068f271ee21c69760128f82adbb91 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 7 Jul 2024 21:12:26 +0200 Subject: [PATCH 09/70] Update JSONCompactWithProgressRowOutputFormat to print JSON on each row --- src/Formats/JSONUtils.cpp | 64 +++++++++++++++++++ src/Formats/JSONUtils.h | 10 +++ ...JSONCompactWithProgressRowOutputFormat.cpp | 39 ++++++++++- .../JSONCompactWithProgressRowOutputFormat.h | 4 ++ 4 files changed, 115 insertions(+), 2 deletions(-) diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index f0985f4a6b7..16083441f3a 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -483,6 +483,33 @@ namespace JSONUtils writeArrayEnd(out, 1); } + + void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out) + { + writeCompactArrayStart(out, 0, "meta"); + + for (size_t i = 0; i < names.size(); ++i) + { + writeCompactObjectStart(out); + writeTitle("name", out, 0, ""); + + /// The field names are pre-escaped to be put into JSON string literal. + writeChar('"', out); + writeString(names[i], out); + writeChar('"', out); + + writeFieldCompactDelimiter(out); + writeTitle("type", out, 0, ""); + writeJSONString(types[i]->getName(), out, settings); + writeCompactObjectEnd(out); + + if (i + 1 < names.size()) + writeFieldCompactDelimiter(out); + } + + writeCompactArrayEnd(out); + } + void writeAdditionalInfo( size_t rows, size_t rows_before_limit, @@ -523,6 +550,43 @@ namespace JSONUtils } } + void writeCompactAdditionalInfo( + size_t rows, + size_t rows_before_limit, + bool applied_limit, + const Stopwatch & watch, + const Progress & progress, + bool write_statistics, + WriteBuffer & out) + { + writeCompactObjectStart(out, 0, "statistics"); + writeTitle("rows", out, 0, ""); + writeIntText(rows, out); + writeFieldCompactDelimiter(out); + + if (applied_limit) + { + writeTitle("rows_before_limit_at_least", out, 0, ""); + writeIntText(rows_before_limit, out); + writeFieldCompactDelimiter(out); + } + + if (write_statistics) + { + writeTitle("elapsed", out, 0, ""); + writeText(watch.elapsedSeconds(), out); + writeFieldCompactDelimiter(out); + + writeTitle("rows_read", out, 0, ""); + writeText(progress.read_rows.load(), out); + writeFieldCompactDelimiter(out); + + writeTitle("bytes_read", out, 0, ""); + writeText(progress.read_bytes.load(), out); + } + writeCompactObjectEnd(out); + } + void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent) { writeTitle("exception", out, indent, " "); diff --git a/src/Formats/JSONUtils.h b/src/Formats/JSONUtils.h index 7ee111c1285..718b9b2f610 100644 --- a/src/Formats/JSONUtils.h +++ b/src/Formats/JSONUtils.h @@ -99,6 +99,7 @@ namespace JSONUtils WriteBuffer & out); void writeMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out); + void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out); void writeAdditionalInfo( size_t rows, @@ -109,6 +110,15 @@ namespace JSONUtils bool write_statistics, WriteBuffer & out); + void writeCompactAdditionalInfo( + size_t rows, + size_t rows_before_limit, + bool applied_limit, + const Stopwatch & watch, + const Progress & progress, + bool write_statistics, + WriteBuffer & out); + void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent = 0); void skipColon(ReadBuffer & in); diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 39532fb76fb..7a19b56d0ca 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -17,6 +17,14 @@ JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat( { } +void JSONCompactWithProgressRowOutputFormat::writePrefix() +{ + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeCompactMetadata(names, types, settings, *ostr); + JSONUtils::writeCompactObjectEnd(*ostr); + writeCString("}\n", *ostr); +} + void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) { JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr); @@ -32,16 +40,22 @@ void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter() { if (has_progress) writeProgress(); - JSONUtils::writeCompactArrayStart(*ostr, 2); + writeCString("{\"data\":", *ostr); + JSONUtils::writeCompactArrayStart(*ostr); } void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter() { JSONUtils::writeCompactArrayEnd(*ostr); + writeCString("}\n", *ostr); field_number = 0; ++row_count; } +void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter() +{ +} + void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals() { JSONUtils::writeFieldDelimiter(*ostr, 2); @@ -91,7 +105,6 @@ void JSONCompactWithProgressRowOutputFormat::writeSuffix() { if (has_progress) writeProgress(); - JSONRowOutputFormat::writeSuffix(); } void JSONCompactWithProgressRowOutputFormat::writeProgress() @@ -103,6 +116,28 @@ void JSONCompactWithProgressRowOutputFormat::writeProgress() has_progress = false; } +void JSONCompactWithProgressRowOutputFormat::finalizeImpl() +{ + JSONUtils::writeCompactAdditionalInfo( + row_count, + statistics.rows_before_limit, + statistics.applied_limit, + statistics.watch, + statistics.progress, + settings.write_statistics && exception_message.empty(), + *ostr); + + exception_message = "Test exception message."; + if (!exception_message.empty()) + { + writeCString("\n", *ostr); + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeException(exception_message, *ostr, settings, 0); + JSONUtils::writeCompactObjectEnd(*ostr); + } + ostr->next(); +} + void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) { factory.registerOutputFormat("JSONCompactWithProgress", []( diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h index 4bc10d41f19..0c7e6b295e8 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -32,6 +32,7 @@ private: void writeFieldDelimiter() override; void writeRowStartDelimiter() override; void writeRowEndDelimiter() override; + void writeRowBetweenDelimiter() override; bool supportTotals() const override { return true; } bool supportExtremes() const override { return true; } void writeBeforeTotals() override; @@ -40,7 +41,10 @@ private: void writeTotals(const Columns & columns, size_t row_num) override; void writeProgress(); + void writePrefix() override; void writeSuffix() override; + void finalizeImpl() override; + Progress progress; std::vector progress_lines; From 2ae9490a681d3d522dd30e96b9f36b2d638ffd10 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 7 Jul 2024 21:22:02 +0200 Subject: [PATCH 10/70] Fix progress rows and bytes read --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 5 ++--- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 7a19b56d0ca..d72a6556c2b 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -81,11 +81,11 @@ void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * t void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value) { - progress.incrementPiecewiseAtomically(value); + statistics.progress.incrementPiecewiseAtomically(value); String progress_line; WriteBufferFromString buf(progress_line); writeCString("{\"progress\":", buf); - progress.writeJSON(buf); + statistics.progress.writeJSON(buf); writeCString("}\n", buf); buf.finalize(); std::lock_guard lock(progress_lines_mutex); @@ -127,7 +127,6 @@ void JSONCompactWithProgressRowOutputFormat::finalizeImpl() settings.write_statistics && exception_message.empty(), *ostr); - exception_message = "Test exception message."; if (!exception_message.empty()) { writeCString("\n", *ostr); diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h index 0c7e6b295e8..669e342c583 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -46,7 +46,6 @@ private: void finalizeImpl() override; - Progress progress; std::vector progress_lines; std::mutex progress_lines_mutex; /// To not lock mutex and check progress_lines every row, From 2469647609c53ac968d7fba60867c78ecebe522c Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 7 Jul 2024 21:30:36 +0200 Subject: [PATCH 11/70] Fix statistics section --- src/Formats/JSONUtils.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 16083441f3a..0918658cf6f 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -559,6 +559,7 @@ namespace JSONUtils bool write_statistics, WriteBuffer & out) { + writeCompactObjectStart(out); writeCompactObjectStart(out, 0, "statistics"); writeTitle("rows", out, 0, ""); writeIntText(rows, out); @@ -585,6 +586,7 @@ namespace JSONUtils writeText(progress.read_bytes.load(), out); } writeCompactObjectEnd(out); + writeCompactObjectEnd(out); } void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent) From 00e47d64b0c12023d926b383c6dea327e9806fc7 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 8 Jul 2024 09:02:07 +0200 Subject: [PATCH 12/70] Add JSONCompactWithProgressStrings format --- .../Impl/JSONCompactWithProgressRowOutputFormat.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index d72a6556c2b..91608cd5050 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -146,6 +146,14 @@ void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) { return std::make_shared(buf, sample, format_settings, false); }); + + factory.registerOutputFormat("JSONCompactWithProgressStrings", []( + WriteBuffer & buf, + const Block & sample, + const FormatSettings & format_settings) + { + return std::make_shared(buf, sample, format_settings, true); + }); } } From 58d34e306cec5b41ac372934e43639c2df24e54e Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 8 Jul 2024 09:08:03 +0200 Subject: [PATCH 13/70] Add documentation on JSONCompactWithProgress --- docs/en/interfaces/formats.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index ffdd7e2ca25..6161cddefdd 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -38,6 +38,7 @@ The supported formats are: | [JSONCompact](#jsoncompact) | ✔ | ✔ | | [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ | | [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ | +| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ | | [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ | | [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | @@ -925,6 +926,23 @@ Example: Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here) +## JSONCompactWithProgress (#jsoncompactwithprogress) + +In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object. + +Each row is either a metadata object, data object, progress information or statistics object. + +Example: + +```json +{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}} +{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}} +{"data":["1", "John Doe"]} +{"data":["2", "Joe Doe"]} +{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}} +``` + + ## JSONEachRow {#jsoneachrow} In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object. From 78cb02503f05d1c1c84dbb17fd51700d74f697df Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 8 Jul 2024 09:22:07 +0200 Subject: [PATCH 14/70] Remove extra space --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 91608cd5050..6be2d092305 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -111,7 +111,7 @@ void JSONCompactWithProgressRowOutputFormat::writeProgress() { std::lock_guard lock(progress_lines_mutex); for (const auto & progress_line : progress_lines) - writeString(progress_line, *ostr); + writeString(progress_line, *ostr); progress_lines.clear(); has_progress = false; } From 7fbcaeaae2bddc07898fd4e2eb7aa9331e2d0df5 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 8 Jul 2024 09:29:14 +0200 Subject: [PATCH 15/70] Remove wrong comment --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h index 669e342c583..dc7a70229e7 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -11,8 +11,6 @@ namespace DB struct FormatSettings; -/** The stream for outputting data in the JSONCompact- formats. - */ class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat { public: From a3360d0e63c08132ecd73423d2dd9fc576b54589 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Mon, 8 Jul 2024 13:53:24 +0200 Subject: [PATCH 16/70] Fix code style --- ...JSONCompactWithProgressRowOutputFormat.cpp | 29 +++++++------------ .../JSONCompactWithProgressRowOutputFormat.h | 6 +--- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 6be2d092305..9603ce0265d 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -1,6 +1,6 @@ -#include #include #include +#include #include @@ -9,10 +9,7 @@ namespace DB { JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat( - WriteBuffer & out_, - const Block & header, - const FormatSettings & settings_, - bool yield_strings_) + WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_) : JSONRowOutputFormat(out_, header, settings_, yield_strings_) { } @@ -139,21 +136,15 @@ void JSONCompactWithProgressRowOutputFormat::finalizeImpl() void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) { - factory.registerOutputFormat("JSONCompactWithProgress", []( - WriteBuffer & buf, - const Block & sample, - const FormatSettings & format_settings) - { - return std::make_shared(buf, sample, format_settings, false); - }); + factory.registerOutputFormat( + "JSONCompactWithProgress", + [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings) + { return std::make_shared(buf, sample, format_settings, false); }); - factory.registerOutputFormat("JSONCompactWithProgressStrings", []( - WriteBuffer & buf, - const Block & sample, - const FormatSettings & format_settings) - { - return std::make_shared(buf, sample, format_settings, true); - }); + factory.registerOutputFormat( + "JSONCompactWithProgressStrings", + [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings) + { return std::make_shared(buf, sample, format_settings, true); }); } } diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h index dc7a70229e7..1c21914d8cb 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -14,11 +14,7 @@ struct FormatSettings; class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat { public: - JSONCompactWithProgressRowOutputFormat( - WriteBuffer & out_, - const Block & header, - const FormatSettings & settings_, - bool yield_strings_); + JSONCompactWithProgressRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_); String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; } From 6cb361413eff6f870eb2bd43876d82d1c2c5e882 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 9 Jul 2024 16:45:15 +0200 Subject: [PATCH 17/70] fix --- src/Databases/DatabaseReplicated.cpp | 3 +++ tests/integration/test_disk_over_web_server/test.py | 3 ++- .../00510_materizlized_view_and_deduplication_zookeeper.sql | 2 ++ tests/queries/0_stateless/00609_mv_index_in_in.sql | 1 + tests/queries/0_stateless/00738_lock_for_inner_table.sh | 2 ++ tests/queries/0_stateless/01153_attach_mv_uuid.sql | 1 + .../0_stateless/02858_explicit_uuid_and_zk_path.reference | 4 ++-- tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh | 4 ++-- .../0_stateless/02888_replicated_merge_tree_creation.sh | 1 + 9 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index bc9b2537d1e..25d4a3eb704 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -1008,6 +1008,9 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep query_context->setSetting("allow_create_index_without_type", 1); query_context->setSetting("allow_experimental_s3queue", 1); + query_context->setSetting("database_replicated_allow_explicit_uuid", 3); + query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3); + auto txn = std::make_shared(current_zookeeper, zookeeper_path, false, ""); query_context->initZooKeeperMetadataTransaction(txn); return query_context; diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 891ee8f00f5..ec0bef23731 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -311,7 +311,8 @@ def test_replicated_database(cluster): SETTINGS storage_policy = 'web'; """.format( uuids[0] - ) + ), + settings={"database_replicated_allow_explicit_uuid": 3}, ) node2 = cluster.instances["node2"] diff --git a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql index d3c4da86b41..5d2f8192d04 100644 --- a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql +++ b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql @@ -6,6 +6,8 @@ DROP TABLE IF EXISTS without_deduplication; DROP TABLE IF EXISTS with_deduplication_mv; DROP TABLE IF EXISTS without_deduplication_mv; +SET database_replicated_allow_explicit_uuid=3; +SET database_replicated_allow_replicated_engine_arguments=3; CREATE TABLE with_deduplication(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x; CREATE TABLE without_deduplication(x UInt32) diff --git a/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql index bd9f35350c1..89fc85651ad 100644 --- a/tests/queries/0_stateless/00609_mv_index_in_in.sql +++ b/tests/queries/0_stateless/00609_mv_index_in_in.sql @@ -6,6 +6,7 @@ DROP TABLE IF EXISTS test_mv_00609; create table test_00609 (a Int8) engine=Memory; insert into test_00609 values (1); +set database_replicated_allow_explicit_uuid=3; set allow_deprecated_syntax_for_merge_tree=1; create materialized view test_mv_00609 uuid '00000609-1000-4000-8000-000000000001' Engine=MergeTree(date, (a), 8192) populate as select a, toDate('2000-01-01') date from test_00609; diff --git a/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh index b62a639d8f4..9bc84dd1063 100755 --- a/tests/queries/0_stateless/00738_lock_for_inner_table.sh +++ b/tests/queries/0_stateless/00738_lock_for_inner_table.sh @@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3" + # there are some issues with Atomic database, let's generate it uniq # otherwise flaky check will not pass. uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase())") diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.sql b/tests/queries/0_stateless/01153_attach_mv_uuid.sql index 00cce8a1de4..6b167253d8f 100644 --- a/tests/queries/0_stateless/01153_attach_mv_uuid.sql +++ b/tests/queries/0_stateless/01153_attach_mv_uuid.sql @@ -14,6 +14,7 @@ INSERT INTO src VALUES (3), (4); SELECT * FROM mv ORDER BY n; DROP TABLE mv SYNC; +SET database_replicated_allow_explicit_uuid=3; SET show_table_uuid_in_table_create_query_if_not_nil=1; CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n; ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference index 6444e10bb48..874494fb061 100644 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -6,5 +6,5 @@ rmt1 rmt2 02858000-1000-4000-8000-000000000 0 -CREATE TABLE rdb_default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 -CREATE TABLE rdb_default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh index b011aed613a..81a9cef02ff 100755 --- a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -38,7 +38,7 @@ $CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER $CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 3) FROM system.tables WHERE database='$db' and name='m1'" $CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'" -$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" -$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" | sed "s/$db/default/g" +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" | sed "s/$db/default/g" $CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS rdb_$CLICKHOUSE_DATABASE" diff --git a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh index b9603e75d2e..466f0d01a7f 100755 --- a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh +++ b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh @@ -5,6 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3 --database_replicated_allow_replicated_engine_arguments 3" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test_exception_replicated SYNC" From 0631ff93778801768edde14c5fd5ed22701f6ec3 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Tue, 23 Jul 2024 12:14:54 +0100 Subject: [PATCH 18/70] Add jsoncompactwithprogress to aspell-dict.txt --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 3967179f078..2e403f44e4e 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1855,6 +1855,7 @@ jsoncompactstrings jsoncompactstringseachrow jsoncompactstringseachrowwithnames jsoncompactstringseachrowwithnamesandtypes +jsoncompactwithprogress jsoneachrow jsoneachrowwithprogress jsonobjecteachrow From 53a55221845802e67f9586af615fcb5ed84b20eb Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Tue, 23 Jul 2024 12:19:06 +0100 Subject: [PATCH 19/70] Fix example output in the documentation --- docs/en/interfaces/formats.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 6161cddefdd..8795b71e0ac 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -935,7 +935,7 @@ Each row is either a metadata object, data object, progress information or stati Example: ```json -{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}} +{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]} {"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}} {"data":["1", "John Doe"]} {"data":["2", "Joe Doe"]} From a4a0611c1c0b40ea6b99d7e5cf3972fda201bb9d Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Tue, 23 Jul 2024 12:29:40 +0100 Subject: [PATCH 20/70] Fix docs spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 2e403f44e4e..c64cfcb0968 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -406,6 +406,7 @@ JSONCompactStrings JSONCompactStringsEachRow JSONCompactStringsEachRowWithNames JSONCompactStringsEachRowWithNamesAndTypes +JSONCompactWithProgress JSONEachRow JSONEachRowWithProgress JSONExtract From ef4ad01d7d57942a79b46ad05c10160d557ff75f Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Tue, 23 Jul 2024 15:03:01 +0100 Subject: [PATCH 21/70] Fix totals formatting --- .../Impl/JSONCompactWithProgressRowOutputFormat.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 9603ce0265d..ddf071324c6 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -55,8 +55,8 @@ void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter() void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals() { - JSONUtils::writeFieldDelimiter(*ostr, 2); - JSONUtils::writeCompactArrayStart(*ostr, 1, "totals"); + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeCompactArrayStart(*ostr, 0, "totals"); } void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num) @@ -67,6 +67,8 @@ void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns void JSONCompactWithProgressRowOutputFormat::writeAfterTotals() { JSONUtils::writeCompactArrayEnd(*ostr); + JSONUtils::writeCompactObjectEnd(*ostr); + writeCString("}\n", *ostr); } void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num) @@ -131,6 +133,7 @@ void JSONCompactWithProgressRowOutputFormat::finalizeImpl() JSONUtils::writeException(exception_message, *ostr, settings, 0); JSONUtils::writeCompactObjectEnd(*ostr); } + writeCString("\n", *ostr); ostr->next(); } From f3710e9a29e4b866bfd55886c18ebad954a48e42 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Tue, 23 Jul 2024 15:04:05 +0100 Subject: [PATCH 22/70] Add stateless test for JSONCompactWithProgress output format --- ...03174_json_compact_with_progress.reference | 14 +++++++++++++ .../03174_json_compact_with_progress.sh | 20 +++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 tests/queries/0_stateless/03174_json_compact_with_progress.reference create mode 100755 tests/queries/0_stateless/03174_json_compact_with_progress.sh diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference new file mode 100644 index 00000000000..6e83db33565 --- /dev/null +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -0,0 +1,14 @@ +1 +{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]}} +{"data":[1, "a"]} +{"data":[2, "b"]} +{"data":[3, "c"]} +{"progress":{"read_rows":"3","read_bytes":"33","written_rows":"0","written_bytes":"0","total_rows_to_read":"3","result_rows":"0","result_bytes":"0","elapsed_ns":"ELAPSED_NS"}} +{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}} +2 +{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]}} +{"data":["a", "1"]} +{"data":["b", "1"]} +{"data":["c", "1"]} +{"totals": ["", "3"]}} +{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}} diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh new file mode 100755 index 00000000000..c21f7228517 --- /dev/null +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" + +$CLICKHOUSE_CLIENT -q "SELECT 1;" +# Check JSONCompactWithProgress Output +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;" +$CLICKHOUSE_CLIENT -q "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' + +$CLICKHOUSE_CLIENT -q "SELECT 2;" +# Check Totals +$CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" From 118e329e23b51673e7d62bb3ffa8a34a8ecdb330 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Wed, 24 Jul 2024 13:09:47 +0100 Subject: [PATCH 23/70] Fix flaky test --- ...03174_json_compact_with_progress.reference | 19 ++++++++++--------- .../03174_json_compact_with_progress.sh | 4 ++-- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference index 6e83db33565..b735c871fbd 100644 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.reference +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -1,14 +1,15 @@ 1 -{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]}} -{"data":[1, "a"]} -{"data":[2, "b"]} -{"data":[3, "c"]} +{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}, {"name":"sleep(0.1)", "type":"UInt8"}]}} {"progress":{"read_rows":"3","read_bytes":"33","written_rows":"0","written_bytes":"0","total_rows_to_read":"3","result_rows":"0","result_bytes":"0","elapsed_ns":"ELAPSED_NS"}} +{"data":[1, "a", 0]} +{"data":[2, "b", 0]} +{"data":[3, "c", 0]} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}} 2 -{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]}} -{"data":["a", "1"]} -{"data":["b", "1"]} -{"data":["c", "1"]} -{"totals": ["", "3"]}} +{"meta": [{"name":"name", "type":"String"}, {"name":"count()", "type":"UInt64"}, {"name":"c", "type":"UInt8"}]}} +{"progress":{"read_rows":"3","read_bytes":"30","written_rows":"0","written_bytes":"0","total_rows_to_read":"3","result_rows":"0","result_bytes":"0","elapsed_ns":"ELAPSED_NS"}} +{"data":["a", "1", 0]} +{"data":["b", "1", 0]} +{"data":["c", "1", 0]} +{"totals": ["", "3", 0]}} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}} diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh index c21f7228517..383668de858 100755 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.sh +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -11,10 +11,10 @@ $CLICKHOUSE_CLIENT -q "SELECT 1;" # Check JSONCompactWithProgress Output $CLICKHOUSE_CLIENT -q "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;" $CLICKHOUSE_CLIENT -q "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');" -$CLICKHOUSE_CLIENT -q "SELECT * FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' +$CLICKHOUSE_CLIENT -q "SELECT *, sleep(0.1) FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' $CLICKHOUSE_CLIENT -q "SELECT 2;" # Check Totals -$CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' +$CLICKHOUSE_CLIENT -q "SELECT name, count(), sleep(0.1) AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" From b0ec8c92fcc09857345acb4e4f627827556d17cd Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Wed, 24 Jul 2024 17:49:33 +0100 Subject: [PATCH 24/70] Fix totals json object --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index ddf071324c6..0814e5f45d6 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -68,7 +68,7 @@ void JSONCompactWithProgressRowOutputFormat::writeAfterTotals() { JSONUtils::writeCompactArrayEnd(*ostr); JSONUtils::writeCompactObjectEnd(*ostr); - writeCString("}\n", *ostr); + writeCString("\n", *ostr); } void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num) From caaf14a0a91bd4779cc292dd674761a47148d24d Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Wed, 24 Jul 2024 17:56:14 +0100 Subject: [PATCH 25/70] Fix flaky test by ignoring progress lines when comparing output --- ...03174_json_compact_with_progress.reference | 20 +++++++++---------- .../03174_json_compact_with_progress.sh | 4 ++-- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference index b735c871fbd..b45f296e5c0 100644 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.reference +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -1,15 +1,13 @@ 1 -{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}, {"name":"sleep(0.1)", "type":"UInt8"}]}} -{"progress":{"read_rows":"3","read_bytes":"33","written_rows":"0","written_bytes":"0","total_rows_to_read":"3","result_rows":"0","result_bytes":"0","elapsed_ns":"ELAPSED_NS"}} -{"data":[1, "a", 0]} -{"data":[2, "b", 0]} -{"data":[3, "c", 0]} +{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]}} +{"data":[1, "a"]} +{"data":[2, "b"]} +{"data":[3, "c"]} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}} 2 -{"meta": [{"name":"name", "type":"String"}, {"name":"count()", "type":"UInt64"}, {"name":"c", "type":"UInt8"}]}} -{"progress":{"read_rows":"3","read_bytes":"30","written_rows":"0","written_bytes":"0","total_rows_to_read":"3","result_rows":"0","result_bytes":"0","elapsed_ns":"ELAPSED_NS"}} -{"data":["a", "1", 0]} -{"data":["b", "1", 0]} -{"data":["c", "1", 0]} -{"totals": ["", "3", 0]}} +{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]}} +{"data":["a", "1"]} +{"data":["b", "1"]} +{"data":["c", "1"]} +{"totals": ["", "3"]} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}} diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh index 383668de858..b440dbe2891 100755 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.sh +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -11,10 +11,10 @@ $CLICKHOUSE_CLIENT -q "SELECT 1;" # Check JSONCompactWithProgress Output $CLICKHOUSE_CLIENT -q "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;" $CLICKHOUSE_CLIENT -q "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');" -$CLICKHOUSE_CLIENT -q "SELECT *, sleep(0.1) FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' $CLICKHOUSE_CLIENT -q "SELECT 2;" # Check Totals -$CLICKHOUSE_CLIENT -q "SELECT name, count(), sleep(0.1) AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | sed -E 's/"elapsed_ns":"[0-9]+"/"elapsed_ns":"ELAPSED_NS"/g; s/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' +$CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" From 63ccbcbdfc7aa012e1879b11f9f6f4a23f9a1a0c Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Thu, 25 Jul 2024 13:22:47 +0100 Subject: [PATCH 26/70] Remove test tags --- tests/queries/0_stateless/03174_json_compact_with_progress.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh index b440dbe2891..8f524cdff76 100755 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.sh +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-parallel CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 0f2024306deeb9b67b631391afc9d2c70d622ffc Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sat, 3 Aug 2024 17:03:27 +0800 Subject: [PATCH 27/70] optimize array() with single arguments and map() with double arguments --- src/Functions/array/array.cpp | 28 +++++++++++++++------ src/Functions/map.cpp | 47 +++++++++++++++++++++++------------ 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index 03b51808799..202aa83216b 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -46,8 +46,10 @@ public: size_t num_elements = arguments.size(); if (num_elements == 0) + { /// We should return constant empty array. return result_type->createColumnConstWithDefaultValue(input_rows_count); + } const DataTypePtr & elem_type = static_cast(*result_type).getNestedType(); @@ -59,7 +61,6 @@ public: Columns columns_holder(num_elements); ColumnRawPtrs column_ptrs(num_elements); - for (size_t i = 0; i < num_elements; ++i) { const auto & arg = arguments[i]; @@ -76,22 +77,33 @@ public: } /// Create and fill the result array. - auto out = ColumnArray::create(elem_type->createColumn()); IColumn & out_data = out->getData(); IColumn::Offsets & out_offsets = out->getOffsets(); out_data.reserve(input_rows_count * num_elements); - out_offsets.resize(input_rows_count); + out_offsets.resize_exact(input_rows_count); IColumn::Offset current_offset = 0; - for (size_t i = 0; i < input_rows_count; ++i) + if (num_elements == 1) { - for (size_t j = 0; j < num_elements; ++j) - out_data.insertFrom(*column_ptrs[j], i); + for (size_t i = 0; i < input_rows_count; ++i) + { + ++current_offset; + out_offsets[i] = current_offset; + } + out_data.insertManyFrom(*column_ptrs[0], 0, input_rows_count); + } + else + { + for (size_t i = 0; i < input_rows_count; ++i) + { + for (size_t j = 0; j < num_elements; ++j) + out_data.insertFrom(*column_ptrs[j], i); - current_offset += num_elements; - out_offsets[i] = current_offset; + current_offset += num_elements; + out_offsets[i] = current_offset; + } } return out; diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 66cd10a3f0b..b869a475af0 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -1,14 +1,15 @@ -#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include +#include #include @@ -123,24 +124,38 @@ public: MutableColumnPtr keys_data = key_type->createColumn(); MutableColumnPtr values_data = value_type->createColumn(); - MutableColumnPtr offsets = DataTypeNumber().createColumn(); + MutableColumnPtr offsets = DataTypeUInt64().createColumn(); size_t total_elements = input_rows_count * num_elements / 2; keys_data->reserve(total_elements); values_data->reserve(total_elements); - offsets->reserve(input_rows_count); + auto & offsets_data = assert_cast(*offsets).getData(); + offsets_data.resize_exact(input_rows_count); IColumn::Offset current_offset = 0; - for (size_t i = 0; i < input_rows_count; ++i) + if (num_elements == 2) { - for (size_t j = 0; j < num_elements; j += 2) + for (size_t i = 0; i < input_rows_count; ++i) { - keys_data->insertFrom(*column_ptrs[j], i); - values_data->insertFrom(*column_ptrs[j + 1], i); + ++current_offset; + offsets_data[i] = current_offset; } + keys_data->insertManyFrom(*column_ptrs[0], 0, input_rows_count); + values_data->insertManyFrom(*column_ptrs[1], 0, input_rows_count); + } + else + { + for (size_t i = 0; i < input_rows_count; ++i) + { + for (size_t j = 0; j < num_elements; j += 2) + { + keys_data->insertFrom(*column_ptrs[j], i); + values_data->insertFrom(*column_ptrs[j + 1], i); + } - current_offset += num_elements / 2; - offsets->insert(current_offset); + current_offset += num_elements / 2; + offsets_data[i] = current_offset; + } } auto nested_column = ColumnArray::create( From e5d4eaa4580f5921a472174d805c852e8b057165 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sat, 3 Aug 2024 21:08:21 +0800 Subject: [PATCH 28/70] fix bugs --- src/Functions/array/array.cpp | 2 +- src/Functions/map.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index 202aa83216b..a15fe86a232 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -92,7 +92,7 @@ public: ++current_offset; out_offsets[i] = current_offset; } - out_data.insertManyFrom(*column_ptrs[0], 0, input_rows_count); + out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count); } else { diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index b869a475af0..ebfa9d4eab4 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -140,8 +140,8 @@ public: ++current_offset; offsets_data[i] = current_offset; } - keys_data->insertManyFrom(*column_ptrs[0], 0, input_rows_count); - values_data->insertManyFrom(*column_ptrs[1], 0, input_rows_count); + keys_data->insertRangeFrom(*column_ptrs[0], 0, input_rows_count); + values_data->insertRangeFrom(*column_ptrs[1], 0, input_rows_count); } else { From a498259ec8c08d4f37f77c8a6870f519bc295801 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sun, 4 Aug 2024 13:32:17 +0800 Subject: [PATCH 29/70] optimize array with mutiple arguments --- src/Functions/array/array.cpp | 212 +++++++++++++++++++++++++++++----- 1 file changed, 184 insertions(+), 28 deletions(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index a15fe86a232..b25505e9ac1 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -1,10 +1,14 @@ -#include -#include +#include +#include +#include #include #include -#include -#include +#include +#include #include +#include +#include "Columns/ColumnFixedString.h" +#include "Columns/ColumnNullable.h" namespace DB @@ -43,7 +47,7 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { - size_t num_elements = arguments.size(); + const size_t num_elements = arguments.size(); if (num_elements == 0) { @@ -81,42 +85,194 @@ public: IColumn & out_data = out->getData(); IColumn::Offsets & out_offsets = out->getOffsets(); - out_data.reserve(input_rows_count * num_elements); + /// Fill out_offsets out_offsets.resize_exact(input_rows_count); - IColumn::Offset current_offset = 0; + for (size_t i = 0; i < input_rows_count; ++i) + { + current_offset += num_elements; + out_offsets[i] = current_offset; + } + + /// Fill out_data + out_data.reserve(input_rows_count * num_elements); if (num_elements == 1) - { - for (size_t i = 0; i < input_rows_count; ++i) - { - ++current_offset; - out_offsets[i] = current_offset; - } out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count); - } else - { - for (size_t i = 0; i < input_rows_count; ++i) - { - for (size_t j = 0; j < num_elements; ++j) - out_data.insertFrom(*column_ptrs[j], i); - - current_offset += num_elements; - out_offsets[i] = current_offset; - } - } - + executeAny(column_ptrs, out_data, input_rows_count); return out; } - private: + bool executeAny(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + return false // NOLINT + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeString(columns, out_data, input_rows_count) + || executeNullable(columns, out_data, input_rows_count) || executeTuple(columns, out_data, input_rows_count) + || executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count); + } + + bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + for (size_t i = 0; i < input_rows_count; ++i) + for (const auto * column : columns) + out_data.insertFrom(*column, i); + return true; + } + + template + bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + using Container = ColumnVectorOrDecimal::Container; + std::vector containers(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnVectorOrDecimal * concrete_column = checkAndGetColumn>(columns[i]); + if (!concrete_column) + return false; + + containers[i] = &concrete_column->getData(); + } + + ColumnVectorOrDecimal & concrete_out_data = assert_cast &>(out_data); + Container & out_container = concrete_out_data.getData(); + out_container.resize_exact(columns.size() * input_rows_count); + + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + const size_t base = row_i * columns.size(); + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + out_container[base + col_i] = (*containers[col_i])[row_i]; + } + return true; + } + + bool executeString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + size_t total_bytes = 0; + std::vector concrete_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnString * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + total_bytes += concrete_column->getChars().size(); + concrete_columns[i] = concrete_column; + } + + ColumnString & concreate_out_data = assert_cast(out_data); + auto & out_chars = concreate_out_data.getChars(); + auto & out_offsets = concreate_out_data.getOffsets(); + out_chars.resize_exact(total_bytes); + out_offsets.resize_exact(input_rows_count * columns.size()); + + size_t curr_out_offset = 0; + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + const size_t base = row_i * columns.size(); + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + { + StringRef ref = concrete_columns[col_i]->getDataAt(row_i); + memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, ref.size); + out_chars[curr_out_offset + ref.size] = 0; + + curr_out_offset += ref.size + 1; + out_offsets[base + col_i] = curr_out_offset; + } + } + return true; + } + + bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + size_t total_bytes = 0; + std::vector concrete_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnFixedString * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + total_bytes += concrete_column->getChars().size(); + concrete_columns[i] = concrete_column; + } + + ColumnFixedString & concreate_out_data = assert_cast(out_data); + auto & out_chars = concreate_out_data.getChars(); + out_chars.resize_exact(total_bytes); + + const size_t n = concreate_out_data.getN(); + size_t curr_out_offset = 0; + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + { + StringRef ref = concrete_columns[col_i]->getDataAt(row_i); + memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, n); + curr_out_offset += n; + } + } + return true; + } + + bool executeNullable(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + ColumnRawPtrs null_maps(columns.size(), nullptr); + ColumnRawPtrs nested_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnNullable * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + null_maps[i] = &concrete_column->getNullMapColumn(); + nested_columns[i] = &concrete_column->getNestedColumn(); + } + + ColumnNullable & concreate_out_data = assert_cast(out_data); + auto & out_null_map = concreate_out_data.getNullMapColumn(); + auto & out_nested_column = concreate_out_data.getNestedColumn(); + executeAny(null_maps, out_null_map, input_rows_count); + executeAny(nested_columns, out_nested_column, input_rows_count); + return true; + } + + bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + ColumnTuple * concreate_out_data = typeid_cast(&out_data); + if (!concreate_out_data) + return false; + + const size_t tuple_size = concreate_out_data->tupleSize(); + for (size_t i = 0; i < tuple_size; ++i) + { + ColumnRawPtrs elem_columns(columns.size(), nullptr); + for (size_t j = 0; j < columns.size(); ++j) + { + const ColumnTuple * concrete_column = assert_cast(columns[j]); + elem_columns[j] = &concrete_column->getColumn(i); + } + executeAny(elem_columns, concreate_out_data->getColumn(i), input_rows_count); + } + return true; + } + String getName() const override { return name; } - bool addField(DataTypePtr type_res, const Field & f, Array & arr) const; - bool use_variant_as_common_type = false; }; From 00a9c30363e16aa1893320c35052ad551b803394 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sun, 4 Aug 2024 16:16:51 +0800 Subject: [PATCH 30/70] optimize map function --- src/Functions/map.cpp | 44 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index ebfa9d4eab4..994a03374c9 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -32,11 +32,18 @@ class FunctionMap : public IFunction public: static constexpr auto name = "map"; - explicit FunctionMap(bool use_variant_as_common_type_) : use_variant_as_common_type(use_variant_as_common_type_) {} + explicit FunctionMap(ContextPtr context_) + : context(std::move(context_)) + , use_variant_as_common_type( + context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type) + , function_array(FunctionFactory::instance().get("array", context)) + , function_map_from_arrays(FunctionFactory::instance().get("mapFromArrays", context)) + { + } static FunctionPtr create(ContextPtr context) { - return std::make_shared(context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type); + return std::make_shared(std::move(context)); } String getName() const override @@ -97,7 +104,36 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { size_t num_elements = arguments.size(); + if (num_elements == 0) + return result_type->createColumnConstWithDefaultValue(input_rows_count); + ColumnsWithTypeAndName key_args; + ColumnsWithTypeAndName value_args; + for (size_t i = 0; i < num_elements; i += 2) + { + key_args.emplace_back(arguments[i]); + value_args.emplace_back(arguments[i+1]); + } + + const auto & result_type_map = static_cast(*result_type); + const DataTypePtr & key_type = result_type_map.getKeyType(); + const DataTypePtr & value_type = result_type_map.getValueType(); + const DataTypePtr & key_array_type = std::make_shared(key_type); + const DataTypePtr & value_array_type = std::make_shared(value_type); + + /// key_array = array(args[0], args[2]...) + ColumnPtr key_array = function_array->build(key_args)->execute(key_args, key_array_type, input_rows_count); + /// value_array = array(args[1], args[3]...) + ColumnPtr value_array = function_array->build(value_args)->execute(value_args, value_array_type, input_rows_count); + + /// result = mapFromArrays(key_array, value_array) + ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}}; + return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count); + } + /* + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + size_t num_elements = arguments.size(); if (num_elements == 0) return result_type->createColumnConstWithDefaultValue(input_rows_count); @@ -164,9 +200,13 @@ public: return ColumnMap::create(nested_column); } + */ private: + ContextPtr context; bool use_variant_as_common_type = false; + FunctionOverloadResolverPtr function_array; + FunctionOverloadResolverPtr function_map_from_arrays; }; /// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays From 122c9794a65cb6d0579ec41f6fb3b4848e5c989b Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sun, 4 Aug 2024 17:04:04 +0800 Subject: [PATCH 31/70] remove useless codes --- src/Functions/map.cpp | 71 ------------------------------------------- 1 file changed, 71 deletions(-) diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 994a03374c9..4b1e41af796 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -130,77 +130,6 @@ public: ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}}; return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count); } - /* - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override - { - size_t num_elements = arguments.size(); - if (num_elements == 0) - return result_type->createColumnConstWithDefaultValue(input_rows_count); - - const auto & result_type_map = static_cast(*result_type); - const DataTypePtr & key_type = result_type_map.getKeyType(); - const DataTypePtr & value_type = result_type_map.getValueType(); - - Columns columns_holder(num_elements); - ColumnRawPtrs column_ptrs(num_elements); - - for (size_t i = 0; i < num_elements; ++i) - { - const auto & arg = arguments[i]; - const auto to_type = i % 2 == 0 ? key_type : value_type; - - ColumnPtr preprocessed_column = castColumn(arg, to_type); - preprocessed_column = preprocessed_column->convertToFullColumnIfConst(); - - columns_holder[i] = std::move(preprocessed_column); - column_ptrs[i] = columns_holder[i].get(); - } - - /// Create and fill the result map. - - MutableColumnPtr keys_data = key_type->createColumn(); - MutableColumnPtr values_data = value_type->createColumn(); - MutableColumnPtr offsets = DataTypeUInt64().createColumn(); - - size_t total_elements = input_rows_count * num_elements / 2; - keys_data->reserve(total_elements); - values_data->reserve(total_elements); - auto & offsets_data = assert_cast(*offsets).getData(); - offsets_data.resize_exact(input_rows_count); - - IColumn::Offset current_offset = 0; - if (num_elements == 2) - { - for (size_t i = 0; i < input_rows_count; ++i) - { - ++current_offset; - offsets_data[i] = current_offset; - } - keys_data->insertRangeFrom(*column_ptrs[0], 0, input_rows_count); - values_data->insertRangeFrom(*column_ptrs[1], 0, input_rows_count); - } - else - { - for (size_t i = 0; i < input_rows_count; ++i) - { - for (size_t j = 0; j < num_elements; j += 2) - { - keys_data->insertFrom(*column_ptrs[j], i); - values_data->insertFrom(*column_ptrs[j + 1], i); - } - - current_offset += num_elements / 2; - offsets_data[i] = current_offset; - } - } - - auto nested_column = ColumnArray::create( - ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}), - std::move(offsets)); - - return ColumnMap::create(nested_column); - } - */ private: ContextPtr context; From 045551f5098e36822c69cc2bb5852d7d16cfe41b Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 5 Aug 2024 11:33:58 +0800 Subject: [PATCH 32/70] fix bugs --- src/Functions/map.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index f49d66c0329..d9643087b11 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -157,6 +157,7 @@ public: bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { From f0f1015162ddfb7ef2e8046fa9c952c5c7e6b89a Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 7 Aug 2024 10:47:13 +0800 Subject: [PATCH 33/70] fix style --- src/Functions/array/array.cpp | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index 21b1841f3ed..f193acafe06 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -172,9 +172,9 @@ private: concrete_columns[i] = concrete_column; } - ColumnString & concreate_out_data = assert_cast(out_data); - auto & out_chars = concreate_out_data.getChars(); - auto & out_offsets = concreate_out_data.getOffsets(); + ColumnString & concrete_out_data = assert_cast(out_data); + auto & out_chars = concrete_out_data.getChars(); + auto & out_offsets = concrete_out_data.getOffsets(); out_chars.resize_exact(total_bytes); out_offsets.resize_exact(input_rows_count * columns.size()); @@ -197,7 +197,6 @@ private: bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const { - size_t total_bytes = 0; std::vector concrete_columns(columns.size(), nullptr); for (size_t i = 0; i < columns.size(); ++i) { @@ -205,15 +204,16 @@ private: if (!concrete_column) return false; - total_bytes += concrete_column->getChars().size(); concrete_columns[i] = concrete_column; } - ColumnFixedString & concreate_out_data = assert_cast(out_data); - auto & out_chars = concreate_out_data.getChars(); + ColumnFixedString & concrete_out_data = assert_cast(out_data); + auto & out_chars = concrete_out_data.getChars(); + + const size_t n = concrete_out_data.getN(); + size_t total_bytes = concrete_out_data.getN() * columns.size() * input_rows_count; out_chars.resize_exact(total_bytes); - const size_t n = concreate_out_data.getN(); size_t curr_out_offset = 0; for (size_t row_i = 0; row_i < input_rows_count; ++row_i) { @@ -241,9 +241,9 @@ private: nested_columns[i] = &concrete_column->getNestedColumn(); } - ColumnNullable & concreate_out_data = assert_cast(out_data); - auto & out_null_map = concreate_out_data.getNullMapColumn(); - auto & out_nested_column = concreate_out_data.getNestedColumn(); + ColumnNullable & concrete_out_data = assert_cast(out_data); + auto & out_null_map = concrete_out_data.getNullMapColumn(); + auto & out_nested_column = concrete_out_data.getNestedColumn(); executeAny(null_maps, out_null_map, input_rows_count); executeAny(nested_columns, out_nested_column, input_rows_count); return true; @@ -251,11 +251,11 @@ private: bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const { - ColumnTuple * concreate_out_data = typeid_cast(&out_data); - if (!concreate_out_data) + ColumnTuple * concrete_out_data = typeid_cast(&out_data); + if (!concrete_out_data) return false; - const size_t tuple_size = concreate_out_data->tupleSize(); + const size_t tuple_size = concrete_out_data->tupleSize(); for (size_t i = 0; i < tuple_size; ++i) { ColumnRawPtrs elem_columns(columns.size(), nullptr); @@ -264,7 +264,7 @@ private: const ColumnTuple * concrete_column = assert_cast(columns[j]); elem_columns[j] = &concrete_column->getColumn(i); } - executeAny(elem_columns, concreate_out_data->getColumn(i), input_rows_count); + executeAny(elem_columns, concrete_out_data->getColumn(i), input_rows_count); } return true; } From cb9039e91d073942ff07a68e10336f8db38f4a79 Mon Sep 17 00:00:00 2001 From: Jiebin Sun Date: Tue, 6 Aug 2024 19:05:12 +0800 Subject: [PATCH 34/70] Add thread pool and cancellation if merge data with key This patch will add thread pool and cancellation if merge data with key. During the merge, if the data size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel. Test the patch with 2 x 80 vCPUs, Q8 and Q9 has got 10.3% and 7.6% performance improvement. Signed-off-by: Jiebin Sun --- .../AggregateFunctionUniq.h | 2 ++ src/AggregateFunctions/IAggregateFunction.h | 12 +++++++--- src/AggregateFunctions/UniqExactSet.h | 7 ++++++ src/Interpreters/Aggregator.cpp | 24 ++++++++++++------- src/Interpreters/Aggregator.h | 4 ++-- .../Transforms/AggregatingTransform.cpp | 2 +- 6 files changed, 36 insertions(+), 15 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index cef23f766c7..35d6e599e38 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -459,6 +459,8 @@ public: bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; } + constexpr static bool parallelizeMergeWithKey() { return true; } + void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic & is_cancelled) const override { if constexpr (is_parallelize_merge_prepare_needed) diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index ee227db6d9d..f8e7051d635 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -145,6 +145,8 @@ public: virtual bool isParallelizeMergePrepareNeeded() const { return false; } + constexpr static bool parallelizeMergeWithKey() { return false; } + virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic & /*is_cancelled*/) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName()); @@ -169,7 +171,7 @@ public: /// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function /// then destroy states (on which src places points to). - virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0; + virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const = 0; /// Serializes state (to transmit it over the network, for example). virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional version = std::nullopt) const = 0; /// NOLINT @@ -499,11 +501,15 @@ public: static_cast(this)->merge(places[i] + place_offset, rhs[i], arena); } - void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override + void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const override { for (size_t i = 0; i < size; ++i) { - static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena); + if constexpr (Derived::parallelizeMergeWithKey()) + static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena); + else + static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena); + static_cast(this)->destroy(rhs_places[i] + offset); } } diff --git a/src/AggregateFunctions/UniqExactSet.h b/src/AggregateFunctions/UniqExactSet.h index 2ae8c3a8386..25c6f7ac55f 100644 --- a/src/AggregateFunctions/UniqExactSet.h +++ b/src/AggregateFunctions/UniqExactSet.h @@ -101,6 +101,13 @@ public: auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic * is_cancelled = nullptr) { + /// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel. + if (other.size() > 40000) + { + if (isSingleLevel()) + convertToTwoLevel(); + } + if (isSingleLevel() && other.isTwoLevel()) convertToTwoLevel(); diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e073b7a49b6..71be3c1c30c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2371,7 +2371,7 @@ void NO_INLINE Aggregator::mergeDataNullKey( template void NO_INLINE Aggregator::mergeDataImpl( - Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const + Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const { if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization) mergeDataNullKey(table_dst, table_src, arena); @@ -2410,7 +2410,7 @@ void NO_INLINE Aggregator::mergeDataImpl( { if (!is_aggregate_function_compiled[i]) aggregate_functions[i]->mergeAndDestroyBatch( - dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena); + dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena); } return; @@ -2420,7 +2420,7 @@ void NO_INLINE Aggregator::mergeDataImpl( for (size_t i = 0; i < params.aggregates_size; ++i) { aggregate_functions[i]->mergeAndDestroyBatch( - dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena); + dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena); } } @@ -2535,8 +2535,10 @@ void NO_INLINE Aggregator::mergeWithoutKeyDataImpl( template void NO_INLINE Aggregator::mergeSingleLevelDataImpl( - ManyAggregatedDataVariants & non_empty_data) const + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const { + ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads}; + AggregatedDataVariantsPtr & res = non_empty_data[0]; bool no_more_keys = false; @@ -2557,13 +2559,13 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( if (compiled_aggregate_functions_holder) { mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch); + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch, thread_pool, is_cancelled); } else #endif { mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch); + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch, thread_pool, is_cancelled); } } else if (res->without_key) @@ -2589,7 +2591,7 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( #define M(NAME) \ template void NO_INLINE Aggregator::mergeSingleLevelDataImpl( \ - ManyAggregatedDataVariants & non_empty_data) const; + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const; APPLY_FOR_VARIANTS_SINGLE_LEVEL(M) #undef M @@ -2597,6 +2599,8 @@ template void NO_INLINE Aggregator::mergeBucketImpl( ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic & is_cancelled) const { + ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads}; + /// We merge all aggregation results to the first. AggregatedDataVariantsPtr & res = data[0]; @@ -2613,7 +2617,7 @@ void NO_INLINE Aggregator::mergeBucketImpl( if (compiled_aggregate_functions_holder) { mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch); + getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch, thread_pool, is_cancelled); } else #endif @@ -2623,7 +2627,9 @@ void NO_INLINE Aggregator::mergeBucketImpl( getDataVariant(current).data.impls[bucket], arena, false, - prefetch); + prefetch, + thread_pool, + is_cancelled); } } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index f4f1e9a1df3..c357da2d9de 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -455,7 +455,7 @@ private: /// Merge data from hash table `src` into `dst`. template - void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch) const; + void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const; /// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`. template @@ -478,7 +478,7 @@ private: template void mergeSingleLevelDataImpl( - ManyAggregatedDataVariants & non_empty_data) const; + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const; template using ConvertToBlockRes = std::conditional_t; diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index cdbe194cfac..5e7fba6d80c 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -495,7 +495,7 @@ private: #define M(NAME) \ else if (first->type == AggregatedDataVariants::Type::NAME) \ - params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data); + params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data, shared_data->is_cancelled); if (false) {} // NOLINT APPLY_FOR_VARIANTS_SINGLE_LEVEL(M) #undef M From 83fa51f400fba43cd4bb961419f7dd572ec240a0 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Fri, 23 Aug 2024 21:42:54 +0200 Subject: [PATCH 35/70] PR fixes: update documentation on JSONCompactWithProgress format --- docs/en/interfaces/formats.md | 38 ++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 8795b71e0ac..f2ca803453e 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -930,7 +930,43 @@ Columns that are not present in the block will be filled with default values (yo In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object. -Each row is either a metadata object, data object, progress information or statistics object. +Each row is either a metadata object, data object, progress information or statistics object: + +1. **Metadata Object (`meta`)** + - Describes the structure of the data rows. + - Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.). + - Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}` + - Appears before any data objects. + +2. **Data Object (`data`)** + - Represents a row of query results. + - Fields: An array with values corresponding to the columns defined in the metadata. + - Example: `{"data":["1", "John Doe"]}` + - Appears after the metadata object, one per row. + +3. **Progress Information Object (`progress`)** + - Provides real-time progress feedback during query execution. + - Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`. + - Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}` + - May appear intermittently. + +4. **Statistics Object (`statistics`)** + - Summarizes query execution statistics. + - Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`. + - Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}` + - Appears at the end. + +5. **Exception Object (`exception`)** + - Represents an error that occurred during query execution. + - Fields: A single text field containing the error message. + - Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}` + - Appears when an error is encountered. + +6. **Totals Object (`totals`)** + - Provides the totals for each numeric column in the result set. + - Fields: An array with total values corresponding to the columns defined in the metadata. + - Example: `{"totals": ["", "3"]}` + - Appears at the end of the data rows, if applicable. Example: From b772140514aa47ada3556f1d24712eae734a5645 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Fri, 23 Aug 2024 22:56:24 +0200 Subject: [PATCH 36/70] Fix PR comments: check exception in the test --- .../0_stateless/03174_json_compact_with_progress.reference | 2 ++ tests/queries/0_stateless/03174_json_compact_with_progress.sh | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference index b45f296e5c0..fe51e484bc1 100644 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.reference +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -11,3 +11,5 @@ {"data":["c", "1"]} {"totals": ["", "3"]} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}} +3 +Value passed to 'throwIf' function is non-zero: diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh index 8f524cdff76..b15dc7cfdb2 100755 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.sh +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -16,4 +16,8 @@ $CLICKHOUSE_CLIENT -q "SELECT 2;" # Check Totals $CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' +$CLICKHOUSE_CLIENT -q "SELECT 3;" +# Check exceptions +${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "SELECT throwIf(number = 15), 1::Int64 as a, '\"' from numbers(100) format JSONCompactWithProgress settings output_format_json_quote_64bit_integers=1, max_block_size=10" | grep "exception" | cut -c42-88 + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" From 06f0267ed142d76109b2d6d48d3c413bdaf0fc17 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 25 Aug 2024 11:27:43 +0200 Subject: [PATCH 37/70] Do not show statistics if exception happens --- ...JSONCompactWithProgressRowOutputFormat.cpp | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index 0814e5f45d6..cb3398196f4 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -117,18 +117,16 @@ void JSONCompactWithProgressRowOutputFormat::writeProgress() void JSONCompactWithProgressRowOutputFormat::finalizeImpl() { - JSONUtils::writeCompactAdditionalInfo( - row_count, - statistics.rows_before_limit, - statistics.applied_limit, - statistics.watch, - statistics.progress, - settings.write_statistics && exception_message.empty(), - *ostr); - - if (!exception_message.empty()) - { - writeCString("\n", *ostr); + if (exception_message.empty()) { + JSONUtils::writeCompactAdditionalInfo( + row_count, + statistics.rows_before_limit, + statistics.applied_limit, + statistics.watch, + statistics.progress, + settings.write_statistics && exception_message.empty(), + *ostr); + } else { JSONUtils::writeCompactObjectStart(*ostr); JSONUtils::writeException(exception_message, *ostr, settings, 0); JSONUtils::writeCompactObjectEnd(*ostr); From 58aab9b8c789090834dd1db7bb00568db7b66cac Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 25 Aug 2024 11:29:07 +0200 Subject: [PATCH 38/70] Remove wrong bracket in the meta section --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 2 +- .../0_stateless/03174_json_compact_with_progress.reference | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index cb3398196f4..b5cbd6fb232 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -19,7 +19,7 @@ void JSONCompactWithProgressRowOutputFormat::writePrefix() JSONUtils::writeCompactObjectStart(*ostr); JSONUtils::writeCompactMetadata(names, types, settings, *ostr); JSONUtils::writeCompactObjectEnd(*ostr); - writeCString("}\n", *ostr); + writeCString("\n", *ostr); } void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference index fe51e484bc1..cdbe7cfcb3e 100644 --- a/tests/queries/0_stateless/03174_json_compact_with_progress.reference +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -1,11 +1,11 @@ 1 -{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]}} +{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]} {"data":[1, "a"]} {"data":[2, "b"]} {"data":[3, "c"]} {"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}} 2 -{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]}} +{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]} {"data":["a", "1"]} {"data":["b", "1"]} {"data":["c", "1"]} From af3f600b5cca7cadb304e0f69fc281b7015deca9 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 25 Aug 2024 11:35:35 +0200 Subject: [PATCH 39/70] Fix commas in the statistics section --- src/Formats/JSONUtils.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 0918658cf6f..6c2c6fc3700 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -563,17 +563,17 @@ namespace JSONUtils writeCompactObjectStart(out, 0, "statistics"); writeTitle("rows", out, 0, ""); writeIntText(rows, out); - writeFieldCompactDelimiter(out); if (applied_limit) { + writeFieldCompactDelimiter(out); writeTitle("rows_before_limit_at_least", out, 0, ""); writeIntText(rows_before_limit, out); - writeFieldCompactDelimiter(out); } if (write_statistics) { + writeFieldCompactDelimiter(out); writeTitle("elapsed", out, 0, ""); writeText(watch.elapsedSeconds(), out); writeFieldCompactDelimiter(out); From 0e4f2b17cc75a1f09994ad583df69fd067e582f8 Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 25 Aug 2024 11:38:29 +0200 Subject: [PATCH 40/70] Remove unnecessary check --- .../Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index b5cbd6fb232..d1d7355724f 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -124,7 +124,7 @@ void JSONCompactWithProgressRowOutputFormat::finalizeImpl() statistics.applied_limit, statistics.watch, statistics.progress, - settings.write_statistics && exception_message.empty(), + settings.write_statistics, *ostr); } else { JSONUtils::writeCompactObjectStart(*ostr); From 312d32aaf972e01a88fceb2fea60fce424bfc49f Mon Sep 17 00:00:00 2001 From: Alexey Korepanov Date: Sun, 25 Aug 2024 12:14:18 +0200 Subject: [PATCH 41/70] Fix formatting --- .../Impl/JSONCompactWithProgressRowOutputFormat.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp index d1d7355724f..e90864ecdf3 100644 --- a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -117,7 +117,8 @@ void JSONCompactWithProgressRowOutputFormat::writeProgress() void JSONCompactWithProgressRowOutputFormat::finalizeImpl() { - if (exception_message.empty()) { + if (exception_message.empty()) + { JSONUtils::writeCompactAdditionalInfo( row_count, statistics.rows_before_limit, @@ -126,7 +127,9 @@ void JSONCompactWithProgressRowOutputFormat::finalizeImpl() statistics.progress, settings.write_statistics, *ostr); - } else { + } + else + { JSONUtils::writeCompactObjectStart(*ostr); JSONUtils::writeException(exception_message, *ostr, settings, 0); JSONUtils::writeCompactObjectEnd(*ostr); From a3522072d4b38817559541353f3b70625f92b918 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 2 Sep 2024 13:08:34 +0000 Subject: [PATCH 42/70] Respect format settings in Values format during conversion from expression to the destination type --- src/Interpreters/convertFieldToType.cpp | 30 +++++++++---------- src/Interpreters/convertFieldToType.h | 7 +++-- .../Formats/Impl/ValuesBlockInputFormat.cpp | 2 +- ...at_settings_in_fields_conversion.reference | 1 + ...t_format_settings_in_fields_conversion.sql | 7 +++++ 5 files changed, 28 insertions(+), 19 deletions(-) create mode 100644 tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference create mode 100644 tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 7e1b4e2fb0e..737353095b8 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -164,7 +164,7 @@ Field convertDecimalType(const Field & from, const To & type) } -Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint) +Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint, const FormatSettings & format_settings) { if (from_type_hint && from_type_hint->equals(type)) { @@ -359,7 +359,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID Array res(src_arr_size); for (size_t i = 0; i < src_arr_size; ++i) { - res[i] = convertFieldToType(src_arr[i], element_type); + res[i] = convertFieldToType(src_arr[i], element_type, nullptr, format_settings); if (res[i].isNull() && !canContainNull(element_type)) { // See the comment for Tuples below. @@ -387,7 +387,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID for (size_t i = 0; i < dst_tuple_size; ++i) { const auto & element_type = *(type_tuple->getElements()[i]); - res[i] = convertFieldToType(src_tuple[i], element_type); + res[i] = convertFieldToType(src_tuple[i], element_type, nullptr, format_settings); if (res[i].isNull() && !canContainNull(element_type)) { /* @@ -435,12 +435,12 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID Tuple updated_entry(2); - updated_entry[0] = convertFieldToType(key, key_type); + updated_entry[0] = convertFieldToType(key, key_type, nullptr, format_settings); if (updated_entry[0].isNull() && !canContainNull(key_type)) have_unconvertible_element = true; - updated_entry[1] = convertFieldToType(value, value_type); + updated_entry[1] = convertFieldToType(value, value_type, nullptr, format_settings); if (updated_entry[1].isNull() && !canContainNull(value_type)) have_unconvertible_element = true; @@ -551,7 +551,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID ReadBufferFromString in_buffer(src.safeGet()); try { - type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{}); + type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, format_settings); } catch (Exception & e) { @@ -563,7 +563,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } Field parsed = (*col)[0]; - return convertFieldToType(parsed, type, from_type_hint); + return convertFieldToType(parsed, type, from_type_hint, format_settings); } throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch in IN or VALUES section. Expected: {}. Got: {}", @@ -573,7 +573,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } -Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint) +Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings) { if (from_value.isNull()) return from_value; @@ -582,7 +582,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co return from_value; if (const auto * low_cardinality_type = typeid_cast(&to_type)) - return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint); + return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint, format_settings); else if (const auto * nullable_type = typeid_cast(&to_type)) { const IDataType & nested_type = *nullable_type->getNestedType(); @@ -593,20 +593,20 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co if (from_type_hint && from_type_hint->equals(nested_type)) return from_value; - return convertFieldToTypeImpl(from_value, nested_type, from_type_hint); + return convertFieldToTypeImpl(from_value, nested_type, from_type_hint, format_settings); } else - return convertFieldToTypeImpl(from_value, to_type, from_type_hint); + return convertFieldToTypeImpl(from_value, to_type, from_type_hint, format_settings); } -Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint) +Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings) { bool is_null = from_value.isNull(); if (is_null && !canContainNull(to_type)) throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName()); - Field converted = convertFieldToType(from_value, to_type, from_type_hint); + Field converted = convertFieldToType(from_value, to_type, from_type_hint, format_settings); if (!is_null && converted.isNull()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, @@ -626,9 +626,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value) return decimal_to_float == float_value; } -std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type) +std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings) { - Field result_value = convertFieldToType(from_value, to_type, &from_type); + Field result_value = convertFieldToType(from_value, to_type, &from_type, format_settings); if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType())) { diff --git a/src/Interpreters/convertFieldToType.h b/src/Interpreters/convertFieldToType.h index 4aa09f8619e..c3c6271a157 100644 --- a/src/Interpreters/convertFieldToType.h +++ b/src/Interpreters/convertFieldToType.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -15,13 +16,13 @@ class IDataType; * Checks for the compatibility of types, checks values fall in the range of valid values of the type, makes type conversion. * If the value does not fall into the range - returns Null. */ -Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr); +Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {}); /// Does the same, but throws ARGUMENT_OUT_OF_BOUND if value does not fall into the range. -Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr); +Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {}); /// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal. /// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt. -std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type); +std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings = {}); } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 10d0e051665..16b88d0b8dc 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -542,7 +542,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx if (format_settings.null_as_default) tryToReplaceNullFieldsInComplexTypesWithDefaultValues(expression_value, type); - Field value = convertFieldToType(expression_value, type, value_raw.second.get()); + Field value = convertFieldToType(expression_value, type, value_raw.second.get(), format_settings); /// Check that we are indeed allowed to insert a NULL. if (value.isNull() && !type.isNullable() && !type.isLowCardinalityNullable()) diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference new file mode 100644 index 00000000000..8dbf92d6590 --- /dev/null +++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference @@ -0,0 +1 @@ +{'Hello':'2020-01-01 00:00:00'} diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql new file mode 100644 index 00000000000..484a16bb22f --- /dev/null +++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (map Map(String, DateTime)) engine=Memory; +set date_time_input_format='best_effort'; +insert into test values (map('Hello', '01/01/2020')); +select * from test; +drop table test; + From ecfe9d97821658bd43c7034f3cb2dc85825c30bf Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 30 Aug 2024 14:34:24 +0000 Subject: [PATCH 43/70] make replicated database creation operations idempotent --- src/Databases/DatabaseReplicated.cpp | 103 ++++++++++++++---- src/Interpreters/InterpreterCreateQuery.cpp | 21 ++-- .../test_replicated_database/test.py | 35 ++++++ 3 files changed, 127 insertions(+), 32 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 8e3378bcc12..26a834eea4c 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -441,7 +441,8 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL bool is_create_query = mode == LoadingStrictnessLevel::CREATE; String replica_host_id; - if (current_zookeeper->tryGet(replica_path, replica_host_id)) + bool replica_exists_in_zk = current_zookeeper->tryGet(replica_path, replica_host_id); + if (replica_exists_in_zk) { if (replica_host_id == DROPPED_MARK && !is_create_query) { @@ -454,7 +455,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); String host_id_default = getHostID(getContext(), db_uuid, false); - if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default)) + if (replica_host_id != host_id && replica_host_id != host_id_default) { throw Exception( ErrorCodes::REPLICA_ALREADY_EXISTS, @@ -485,12 +486,13 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL createEmptyLogEntry(current_zookeeper); } } - else if (is_create_query) + + if (is_create_query) { - /// Create new replica. Throws if replica with the same name already exists + /// Create replica nodes in ZooKeeper. If newly initialized nodes already exist, reuse them. createReplicaNodesInZooKeeper(current_zookeeper); } - else + else if (!replica_exists_in_zk) { /// It's not CREATE query, but replica does not exist. Probably it was dropped. /// Do not create anything, continue as readonly. @@ -606,37 +608,92 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt "already contains some data and it does not look like Replicated database path.", zookeeper_path); /// Write host name to replica_path, it will protect from multiple replicas with the same name - auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); + const auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); + + const std::vector check_paths = { + replica_path, + replica_path + "/replica_group", + replica_path + "/digest", + + /// Needed to mark all the queries + /// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful. + /// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory. + /// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation. + replica_path + "/max_log_ptr_at_creation", + }; + bool nodes_exist = true; + auto check_responses = current_zookeeper->tryGet(check_paths); + for (size_t i = 0; i < check_responses.size(); ++i) + { + const auto response = check_responses[i]; + + if (response.error == Coordination::Error::ZNONODE) + { + nodes_exist = false; + break; + } else if (response.error != Coordination::Error::ZOK) + { + throw zkutil::KeeperException::fromPath(response.error, check_paths[i]); + } + } + + if (nodes_exist) + { + const std::vector expected_data = { + host_id, + replica_group_name, + "0", + }; + for (size_t i = 0; i != expected_data.size(); ++i) + { + if (check_responses[i].data != expected_data[i]) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Replica node {} in ZooKeeper already exists and contains unexpected value: {}", + quoteString(check_paths[i]), quoteString(check_responses[i].data)); + } + } + + LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them"); + max_log_ptr_at_creation = parse(check_responses[check_responses.size() - 1].data); + createEmptyLogEntry(current_zookeeper); + return; + } for (int attempts = 10; attempts > 0; --attempts) { Coordination::Stat stat; - String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat); + const String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat); - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent)); - /// In addition to creating the replica nodes, we record the max_log_ptr at the instant where - /// we declared ourself as an existing replica. We'll need this during recoverLostReplica to - /// notify other nodes that issued new queries while this node was recovering. - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version)); + const Coordination::Requests ops = { + zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent), + + /// In addition to creating the replica nodes, we record the max_log_ptr at the instant where + /// we declared ourself as an existing replica. We'll need this during recoverLostReplica to + /// notify other nodes that issued new queries while this node was recovering. + zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version), + zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent), + }; + + Coordination::Responses ops_responses; + const auto code = current_zookeeper->tryMulti(ops, ops_responses); - Coordination::Responses responses; - const auto code = current_zookeeper->tryMulti(ops, responses); if (code == Coordination::Error::ZOK) { max_log_ptr_at_creation = parse(max_log_ptr_str); - break; + createEmptyLogEntry(current_zookeeper); + return; } - else if (code == Coordination::Error::ZNODEEXISTS || attempts == 1) + + if (attempts == 1) { - /// If its our last attempt, or if the replica already exists, fail immediately. - zkutil::KeeperMultiException::check(code, ops, responses); + zkutil::KeeperMultiException::check(code, ops, ops_responses); } } - createEmptyLogEntry(current_zookeeper); } void DatabaseReplicated::beforeLoadingMetadata(ContextMutablePtr context_, LoadingStrictnessLevel mode) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 80cb0510b35..a1ed9f40685 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -228,8 +228,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) metadata_path = metadata_path / "store" / DatabaseCatalog::getPathForUUID(create.uuid); - if (!create.attach && fs::exists(metadata_path)) - throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists", metadata_path.string()); + if (!create.attach && fs::exists(metadata_path) && !fs::is_empty(metadata_path)) + throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists and is not empty", metadata_path.string()); } else if (create.storage->engine->name == "MaterializeMySQL" || create.storage->engine->name == "MaterializedMySQL") @@ -329,6 +329,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) writeChar('\n', statement_buf); String statement = statement_buf.str(); + /// Needed to make database creation retriable if it fails after the file is created + fs::remove(metadata_file_tmp_path); + /// Exclusive flag guarantees, that database is not created right now in another thread. WriteBufferFromFile out(metadata_file_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL); writeString(statement, out); @@ -350,13 +353,6 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) DatabaseCatalog::instance().attachDatabase(database_name, database); added = true; - if (need_write_metadata) - { - /// Prevents from overwriting metadata of detached database - renameNoReplace(metadata_file_tmp_path, metadata_file_path); - renamed = true; - } - if (!load_database_without_tables) { /// We use global context here, because storages lifetime is bigger than query context lifetime @@ -368,6 +364,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) /// Only then prioritize, schedule and wait all the startup tasks waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks); } + + if (need_write_metadata) + { + /// Prevents from overwriting metadata of detached database + renameNoReplace(metadata_file_tmp_path, metadata_file_path); + renamed = true; + } } catch (...) { diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index 60a6e099b22..58e99d4c408 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -1,5 +1,8 @@ +import concurrent.futures import os +import random import shutil +import string import time import re import pytest @@ -1549,3 +1552,35 @@ def test_all_groups_cluster(started_cluster): assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query( "select host_name from system.clusters where name='all_groups.db_cluster' order by host_name" ) + + +def test_database_creation_idempotency(started_cluster): + def randomize_database_name(database_name, random_suffix_length=20): + letters = string.ascii_letters + string.digits + return f"{database_name}{''.join(random.choice(letters) for _ in range(random_suffix_length))}" + + databases = [randomize_database_name("rdb") for _ in range(100)] + + def create_database(name): + main_node.query( + f""" + CREATE DATABASE {name} + ENGINE = Replicated('/test/test_database_creation_idempotency/{name}', 'shard', 'replica') + """ + ) + + with concurrent.futures.ThreadPoolExecutor() as executor: + for name in databases: + executor.submit(create_database, name) + + main_node.restart_clickhouse(kill=True) + + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [executor.submit(create_database, name) for name in databases] + concurrent.futures.wait(futures) + + assert int( + main_node.query( + f"SELECT count() FROM system.databases WHERE name IN {databases}" + ).strip() + ) == len(databases) From e28066645b16a1e6b2c7136bc2b2ca0a9d772f72 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 4 Sep 2024 09:31:06 +0000 Subject: [PATCH 44/70] Sort --- contrib/icu-cmake/CMakeLists.txt | 354 +++++++++++++++---------------- 1 file changed, 177 insertions(+), 177 deletions(-) diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index adeaa7dcf33..178af2ade6c 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -15,162 +15,63 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES -"${ICU_SOURCE_DIR}/common/errorcode.cpp" -"${ICU_SOURCE_DIR}/common/putil.cpp" -"${ICU_SOURCE_DIR}/common/umath.cpp" -"${ICU_SOURCE_DIR}/common/utypes.cpp" -"${ICU_SOURCE_DIR}/common/uinvchar.cpp" -"${ICU_SOURCE_DIR}/common/umutex.cpp" -"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" -"${ICU_SOURCE_DIR}/common/uinit.cpp" -"${ICU_SOURCE_DIR}/common/uobject.cpp" -"${ICU_SOURCE_DIR}/common/cmemory.cpp" -"${ICU_SOURCE_DIR}/common/charstr.cpp" -"${ICU_SOURCE_DIR}/common/cstr.cpp" -"${ICU_SOURCE_DIR}/common/udata.cpp" -"${ICU_SOURCE_DIR}/common/ucmndata.cpp" -"${ICU_SOURCE_DIR}/common/udatamem.cpp" -"${ICU_SOURCE_DIR}/common/umapfile.cpp" -"${ICU_SOURCE_DIR}/common/udataswp.cpp" -"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" -"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" -"${ICU_SOURCE_DIR}/common/utrace.cpp" -"${ICU_SOURCE_DIR}/common/uhash.cpp" -"${ICU_SOURCE_DIR}/common/uhash_us.cpp" -"${ICU_SOURCE_DIR}/common/uenum.cpp" -"${ICU_SOURCE_DIR}/common/ustrenum.cpp" -"${ICU_SOURCE_DIR}/common/uvector.cpp" -"${ICU_SOURCE_DIR}/common/ustack.cpp" -"${ICU_SOURCE_DIR}/common/uvectr32.cpp" -"${ICU_SOURCE_DIR}/common/uvectr64.cpp" -"${ICU_SOURCE_DIR}/common/ucnv.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" -"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" -"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" -"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" -"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" -"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" -"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" -"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" -"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" -"${ICU_SOURCE_DIR}/common/resource.cpp" -"${ICU_SOURCE_DIR}/common/uresbund.cpp" -"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" -"${ICU_SOURCE_DIR}/common/uresdata.cpp" -"${ICU_SOURCE_DIR}/common/resbund.cpp" -"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" -"${ICU_SOURCE_DIR}/common/ucurr.cpp" -"${ICU_SOURCE_DIR}/common/localebuilder.cpp" -"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" -"${ICU_SOURCE_DIR}/common/messagepattern.cpp" -"${ICU_SOURCE_DIR}/common/ucat.cpp" -"${ICU_SOURCE_DIR}/common/locmap.cpp" -"${ICU_SOURCE_DIR}/common/uloc.cpp" -"${ICU_SOURCE_DIR}/common/locid.cpp" -"${ICU_SOURCE_DIR}/common/locutil.cpp" -"${ICU_SOURCE_DIR}/common/locavailable.cpp" -"${ICU_SOURCE_DIR}/common/locdispnames.cpp" -"${ICU_SOURCE_DIR}/common/locdspnm.cpp" -"${ICU_SOURCE_DIR}/common/loclikely.cpp" -"${ICU_SOURCE_DIR}/common/locresdata.cpp" -"${ICU_SOURCE_DIR}/common/lsr.cpp" -"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" -"${ICU_SOURCE_DIR}/common/locdistance.cpp" -"${ICU_SOURCE_DIR}/common/localematcher.cpp" -"${ICU_SOURCE_DIR}/common/bytestream.cpp" -"${ICU_SOURCE_DIR}/common/stringpiece.cpp" -"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" -"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/bytestrie.cpp" -"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" -"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" -"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" -"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" -"${ICU_SOURCE_DIR}/common/edits.cpp" "${ICU_SOURCE_DIR}/common/appendable.cpp" -"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" -"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" -"${ICU_SOURCE_DIR}/common/unistr.cpp" -"${ICU_SOURCE_DIR}/common/unistr_case.cpp" -"${ICU_SOURCE_DIR}/common/unistr_props.cpp" -"${ICU_SOURCE_DIR}/common/utf_impl.cpp" -"${ICU_SOURCE_DIR}/common/ustring.cpp" -"${ICU_SOURCE_DIR}/common/ustrcase.cpp" -"${ICU_SOURCE_DIR}/common/ucasemap.cpp" -"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/cstring.cpp" -"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" -"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" -"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" -"${ICU_SOURCE_DIR}/common/utext.cpp" -"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" -"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" -"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" -"${ICU_SOURCE_DIR}/common/normalizer2.cpp" -"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" -"${ICU_SOURCE_DIR}/common/normlzr.cpp" -"${ICU_SOURCE_DIR}/common/unorm.cpp" -"${ICU_SOURCE_DIR}/common/unormcmp.cpp" -"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" -"${ICU_SOURCE_DIR}/common/chariter.cpp" -"${ICU_SOURCE_DIR}/common/schriter.cpp" -"${ICU_SOURCE_DIR}/common/uchriter.cpp" -"${ICU_SOURCE_DIR}/common/uiter.cpp" -"${ICU_SOURCE_DIR}/common/patternprops.cpp" -"${ICU_SOURCE_DIR}/common/uchar.cpp" -"${ICU_SOURCE_DIR}/common/uprops.cpp" -"${ICU_SOURCE_DIR}/common/ucase.cpp" -"${ICU_SOURCE_DIR}/common/propname.cpp" -"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" -"${ICU_SOURCE_DIR}/common/characterproperties.cpp" -"${ICU_SOURCE_DIR}/common/ubidi.cpp" -"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" -"${ICU_SOURCE_DIR}/common/ubidiln.cpp" -"${ICU_SOURCE_DIR}/common/ushape.cpp" -"${ICU_SOURCE_DIR}/common/uscript.cpp" -"${ICU_SOURCE_DIR}/common/uscript_props.cpp" -"${ICU_SOURCE_DIR}/common/usc_impl.cpp" -"${ICU_SOURCE_DIR}/common/unames.cpp" -"${ICU_SOURCE_DIR}/common/utrie.cpp" -"${ICU_SOURCE_DIR}/common/utrie2.cpp" -"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" -"${ICU_SOURCE_DIR}/common/ucptrie.cpp" -"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" "${ICU_SOURCE_DIR}/common/bmpset.cpp" -"${ICU_SOURCE_DIR}/common/unisetspan.cpp" -"${ICU_SOURCE_DIR}/common/uset_props.cpp" -"${ICU_SOURCE_DIR}/common/uniset_props.cpp" -"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" -"${ICU_SOURCE_DIR}/common/uset.cpp" -"${ICU_SOURCE_DIR}/common/uniset.cpp" -"${ICU_SOURCE_DIR}/common/usetiter.cpp" -"${ICU_SOURCE_DIR}/common/ruleiter.cpp" -"${ICU_SOURCE_DIR}/common/caniter.cpp" -"${ICU_SOURCE_DIR}/common/unifilt.cpp" -"${ICU_SOURCE_DIR}/common/unifunct.cpp" -"${ICU_SOURCE_DIR}/common/uarrsort.cpp" -"${ICU_SOURCE_DIR}/common/brkiter.cpp" -"${ICU_SOURCE_DIR}/common/ubrk.cpp" "${ICU_SOURCE_DIR}/common/brkeng.cpp" +"${ICU_SOURCE_DIR}/common/brkiter.cpp" +"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" +"${ICU_SOURCE_DIR}/common/bytestream.cpp" +"${ICU_SOURCE_DIR}/common/bytestrie.cpp" +"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/caniter.cpp" +"${ICU_SOURCE_DIR}/common/characterproperties.cpp" +"${ICU_SOURCE_DIR}/common/chariter.cpp" +"${ICU_SOURCE_DIR}/common/charstr.cpp" +"${ICU_SOURCE_DIR}/common/cmemory.cpp" +"${ICU_SOURCE_DIR}/common/cstr.cpp" +"${ICU_SOURCE_DIR}/common/cstring.cpp" +"${ICU_SOURCE_DIR}/common/cwchar.cpp" "${ICU_SOURCE_DIR}/common/dictbe.cpp" +"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" +"${ICU_SOURCE_DIR}/common/dtintrv.cpp" +"${ICU_SOURCE_DIR}/common/edits.cpp" +"${ICU_SOURCE_DIR}/common/emojiprops.cpp" +"${ICU_SOURCE_DIR}/common/errorcode.cpp" "${ICU_SOURCE_DIR}/common/filteredbrk.cpp" +"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" +"${ICU_SOURCE_DIR}/common/icudataver.cpp" +"${ICU_SOURCE_DIR}/common/icuplug.cpp" +"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/localebuilder.cpp" +"${ICU_SOURCE_DIR}/common/localematcher.cpp" +"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" +"${ICU_SOURCE_DIR}/common/locavailable.cpp" +"${ICU_SOURCE_DIR}/common/locbased.cpp" +"${ICU_SOURCE_DIR}/common/locdispnames.cpp" +"${ICU_SOURCE_DIR}/common/locdistance.cpp" +"${ICU_SOURCE_DIR}/common/locdspnm.cpp" +"${ICU_SOURCE_DIR}/common/locid.cpp" +"${ICU_SOURCE_DIR}/common/loclikely.cpp" +"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" +"${ICU_SOURCE_DIR}/common/locmap.cpp" +"${ICU_SOURCE_DIR}/common/locresdata.cpp" +"${ICU_SOURCE_DIR}/common/locutil.cpp" +"${ICU_SOURCE_DIR}/common/lsr.cpp" +"${ICU_SOURCE_DIR}/common/lstmbe.cpp") +"${ICU_SOURCE_DIR}/common/messagepattern.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/normlzr.cpp" +"${ICU_SOURCE_DIR}/common/parsepos.cpp" +"${ICU_SOURCE_DIR}/common/patternprops.cpp" +"${ICU_SOURCE_DIR}/common/pluralmap.cpp" +"${ICU_SOURCE_DIR}/common/propname.cpp" +"${ICU_SOURCE_DIR}/common/propsvec.cpp" +"${ICU_SOURCE_DIR}/common/punycode.cpp" +"${ICU_SOURCE_DIR}/common/putil.cpp" "${ICU_SOURCE_DIR}/common/rbbi.cpp" +"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" "${ICU_SOURCE_DIR}/common/rbbidata.cpp" "${ICU_SOURCE_DIR}/common/rbbinode.cpp" "${ICU_SOURCE_DIR}/common/rbbirb.cpp" @@ -178,41 +79,140 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/rbbisetb.cpp" "${ICU_SOURCE_DIR}/common/rbbistbl.cpp" "${ICU_SOURCE_DIR}/common/rbbitblb.cpp" -"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" +"${ICU_SOURCE_DIR}/common/resbund.cpp" +"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" +"${ICU_SOURCE_DIR}/common/resource.cpp" +"${ICU_SOURCE_DIR}/common/restrace.cpp" +"${ICU_SOURCE_DIR}/common/ruleiter.cpp" +"${ICU_SOURCE_DIR}/common/schriter.cpp" "${ICU_SOURCE_DIR}/common/serv.cpp" -"${ICU_SOURCE_DIR}/common/servnotf.cpp" -"${ICU_SOURCE_DIR}/common/servls.cpp" "${ICU_SOURCE_DIR}/common/servlk.cpp" "${ICU_SOURCE_DIR}/common/servlkf.cpp" +"${ICU_SOURCE_DIR}/common/servls.cpp" +"${ICU_SOURCE_DIR}/common/servnotf.cpp" "${ICU_SOURCE_DIR}/common/servrbf.cpp" "${ICU_SOURCE_DIR}/common/servslkf.cpp" -"${ICU_SOURCE_DIR}/common/uidna.cpp" -"${ICU_SOURCE_DIR}/common/usprep.cpp" -"${ICU_SOURCE_DIR}/common/uts46.cpp" -"${ICU_SOURCE_DIR}/common/punycode.cpp" -"${ICU_SOURCE_DIR}/common/util.cpp" -"${ICU_SOURCE_DIR}/common/util_props.cpp" -"${ICU_SOURCE_DIR}/common/parsepos.cpp" -"${ICU_SOURCE_DIR}/common/locbased.cpp" -"${ICU_SOURCE_DIR}/common/cwchar.cpp" -"${ICU_SOURCE_DIR}/common/wintz.cpp" -"${ICU_SOURCE_DIR}/common/dtintrv.cpp" -"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" -"${ICU_SOURCE_DIR}/common/propsvec.cpp" -"${ICU_SOURCE_DIR}/common/ulist.cpp" -"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" -"${ICU_SOURCE_DIR}/common/icudataver.cpp" -"${ICU_SOURCE_DIR}/common/icuplug.cpp" "${ICU_SOURCE_DIR}/common/sharedobject.cpp" "${ICU_SOURCE_DIR}/common/simpleformatter.cpp" -"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" -"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" -"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" -"${ICU_SOURCE_DIR}/common/pluralmap.cpp" "${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp" -"${ICU_SOURCE_DIR}/common/restrace.cpp" -"${ICU_SOURCE_DIR}/common/emojiprops.cpp" -"${ICU_SOURCE_DIR}/common/lstmbe.cpp") +"${ICU_SOURCE_DIR}/common/stringpiece.cpp" +"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/uarrsort.cpp" +"${ICU_SOURCE_DIR}/common/ubidi.cpp" +"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" +"${ICU_SOURCE_DIR}/common/ubidiln.cpp" +"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" +"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" +"${ICU_SOURCE_DIR}/common/ubrk.cpp" +"${ICU_SOURCE_DIR}/common/ucase.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ucat.cpp" +"${ICU_SOURCE_DIR}/common/uchar.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" +"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/uchriter.cpp" +"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" +"${ICU_SOURCE_DIR}/common/ucmndata.cpp" +"${ICU_SOURCE_DIR}/common/ucnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" +"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" +"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" +"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" +"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" +"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" +"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" +"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" +"${ICU_SOURCE_DIR}/common/ucptrie.cpp" +"${ICU_SOURCE_DIR}/common/ucurr.cpp" +"${ICU_SOURCE_DIR}/common/udata.cpp" +"${ICU_SOURCE_DIR}/common/udatamem.cpp" +"${ICU_SOURCE_DIR}/common/udataswp.cpp" +"${ICU_SOURCE_DIR}/common/uenum.cpp" +"${ICU_SOURCE_DIR}/common/uhash.cpp" +"${ICU_SOURCE_DIR}/common/uhash_us.cpp" +"${ICU_SOURCE_DIR}/common/uidna.cpp" +"${ICU_SOURCE_DIR}/common/uinit.cpp" +"${ICU_SOURCE_DIR}/common/uinvchar.cpp" +"${ICU_SOURCE_DIR}/common/uiter.cpp" +"${ICU_SOURCE_DIR}/common/ulist.cpp" +"${ICU_SOURCE_DIR}/common/uloc.cpp" +"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" +"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" +"${ICU_SOURCE_DIR}/common/umapfile.cpp" +"${ICU_SOURCE_DIR}/common/umath.cpp" +"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" +"${ICU_SOURCE_DIR}/common/umutex.cpp" +"${ICU_SOURCE_DIR}/common/unames.cpp" +"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" +"${ICU_SOURCE_DIR}/common/unifilt.cpp" +"${ICU_SOURCE_DIR}/common/unifunct.cpp" +"${ICU_SOURCE_DIR}/common/uniset.cpp" +"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" +"${ICU_SOURCE_DIR}/common/uniset_props.cpp" +"${ICU_SOURCE_DIR}/common/unisetspan.cpp" +"${ICU_SOURCE_DIR}/common/unistr.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" +"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr_props.cpp" +"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/unorm.cpp" +"${ICU_SOURCE_DIR}/common/unormcmp.cpp" +"${ICU_SOURCE_DIR}/common/uobject.cpp" +"${ICU_SOURCE_DIR}/common/uprops.cpp" +"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" +"${ICU_SOURCE_DIR}/common/uresbund.cpp" +"${ICU_SOURCE_DIR}/common/uresdata.cpp" +"${ICU_SOURCE_DIR}/common/usc_impl.cpp" +"${ICU_SOURCE_DIR}/common/uscript.cpp" +"${ICU_SOURCE_DIR}/common/uscript_props.cpp" +"${ICU_SOURCE_DIR}/common/uset.cpp" +"${ICU_SOURCE_DIR}/common/uset_props.cpp" +"${ICU_SOURCE_DIR}/common/usetiter.cpp" +"${ICU_SOURCE_DIR}/common/ushape.cpp" +"${ICU_SOURCE_DIR}/common/usprep.cpp" +"${ICU_SOURCE_DIR}/common/ustack.cpp" +"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" +"${ICU_SOURCE_DIR}/common/ustrenum.cpp" +"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" +"${ICU_SOURCE_DIR}/common/ustring.cpp" +"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" +"${ICU_SOURCE_DIR}/common/utext.cpp" +"${ICU_SOURCE_DIR}/common/utf_impl.cpp" +"${ICU_SOURCE_DIR}/common/util.cpp" +"${ICU_SOURCE_DIR}/common/util_props.cpp" +"${ICU_SOURCE_DIR}/common/utrace.cpp" +"${ICU_SOURCE_DIR}/common/utrie.cpp" +"${ICU_SOURCE_DIR}/common/utrie2.cpp" +"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" +"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" +"${ICU_SOURCE_DIR}/common/uts46.cpp" +"${ICU_SOURCE_DIR}/common/utypes.cpp" +"${ICU_SOURCE_DIR}/common/uvector.cpp" +"${ICU_SOURCE_DIR}/common/uvectr32.cpp" +"${ICU_SOURCE_DIR}/common/uvectr64.cpp" +"${ICU_SOURCE_DIR}/common/wintz.cpp" set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" From e047c7c4194bc32d2b0308a941ebd85f306f2489 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 4 Sep 2024 09:31:46 +0000 Subject: [PATCH 45/70] Update sources --- contrib/icu-cmake/CMakeLists.txt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 178af2ade6c..ac849332429 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -58,8 +58,9 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/locresdata.cpp" "${ICU_SOURCE_DIR}/common/locutil.cpp" "${ICU_SOURCE_DIR}/common/lsr.cpp" -"${ICU_SOURCE_DIR}/common/lstmbe.cpp") +"${ICU_SOURCE_DIR}/common/lstmbe.cpp" "${ICU_SOURCE_DIR}/common/messagepattern.cpp" +"${ICU_SOURCE_DIR}/common/mlbe.cpp" "${ICU_SOURCE_DIR}/common/normalizer2.cpp" "${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" "${ICU_SOURCE_DIR}/common/normlzr.cpp" @@ -155,6 +156,8 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/uloc.cpp" "${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" "${ICU_SOURCE_DIR}/common/uloc_tag.cpp" +"${ICU_SOURCE_DIR}/common/ulocale.cpp" +"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp" "${ICU_SOURCE_DIR}/common/umapfile.cpp" "${ICU_SOURCE_DIR}/common/umath.cpp" "${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" @@ -212,7 +215,7 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/uvector.cpp" "${ICU_SOURCE_DIR}/common/uvectr32.cpp" "${ICU_SOURCE_DIR}/common/uvectr64.cpp" -"${ICU_SOURCE_DIR}/common/wintz.cpp" +"${ICU_SOURCE_DIR}/common/wintz.cpp") set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" From 1c6e96071943bfa0f02412e23920625827fa6e35 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 4 Sep 2024 09:35:06 +0000 Subject: [PATCH 46/70] Sort --- contrib/icu-cmake/CMakeLists.txt | 370 +++++++++++++++---------------- 1 file changed, 185 insertions(+), 185 deletions(-) diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index ac849332429..9d6e9ac0a62 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -218,129 +218,42 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/wintz.cpp") set(ICUI18N_SOURCES -"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" -"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" -"${ICU_SOURCE_DIR}/i18n/format.cpp" -"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/umsg.cpp" -"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/unum.cpp" -"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" -"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" -"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" -"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" -"${ICU_SOURCE_DIR}/i18n/udat.cpp" -"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" -"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" -"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" -"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" -"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" -"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" -"${ICU_SOURCE_DIR}/i18n/numsys.cpp" -"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" -"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" -"${ICU_SOURCE_DIR}/i18n/ucal.cpp" -"${ICU_SOURCE_DIR}/i18n/calendar.cpp" -"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" -"${ICU_SOURCE_DIR}/i18n/timezone.cpp" -"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" -"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" +"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" "${ICU_SOURCE_DIR}/i18n/astro.cpp" -"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/basictz.cpp" +"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" +"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" "${ICU_SOURCE_DIR}/i18n/buddhcal.cpp" -"${ICU_SOURCE_DIR}/i18n/persncal.cpp" -"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" -"${ICU_SOURCE_DIR}/i18n/japancal.cpp" -"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" -"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" -"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" -"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/calendar.cpp" +"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" "${ICU_SOURCE_DIR}/i18n/cecal.cpp" -"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" -"${ICU_SOURCE_DIR}/i18n/dangical.cpp" -"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" "${ICU_SOURCE_DIR}/i18n/coleitr.cpp" "${ICU_SOURCE_DIR}/i18n/coll.cpp" -"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" -"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" -"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" "${ICU_SOURCE_DIR}/i18n/collation.cpp" -"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" "${ICU_SOURCE_DIR}/i18n/collationdata.cpp" -"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" "${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp" "${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" "${ICU_SOURCE_DIR}/i18n/collationfcd.cpp" "${ICU_SOURCE_DIR}/i18n/collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" -"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" -"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" "${ICU_SOURCE_DIR}/i18n/collationkeys.cpp" -"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" "${ICU_SOURCE_DIR}/i18n/collationroot.cpp" "${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp" -"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" "${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp" -"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" -"${ICU_SOURCE_DIR}/i18n/usearch.cpp" -"${ICU_SOURCE_DIR}/i18n/search.cpp" -"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" -"${ICU_SOURCE_DIR}/i18n/translit.cpp" -"${ICU_SOURCE_DIR}/i18n/utrans.cpp" -"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" -"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" -"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" -"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" -"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" +"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" "${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" -"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" -"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" -"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" -"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" -"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" -"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" -"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" -"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" -"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" -"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" -"${ICU_SOURCE_DIR}/i18n/quant.cpp" -"${ICU_SOURCE_DIR}/i18n/transreg.cpp" -"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" -"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" -"${ICU_SOURCE_DIR}/i18n/rematch.cpp" -"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" -"${ICU_SOURCE_DIR}/i18n/regexst.cpp" -"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" -"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" -"${ICU_SOURCE_DIR}/i18n/uregex.cpp" -"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" -"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" -"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/curramt.cpp" -"${ICU_SOURCE_DIR}/i18n/currunit.cpp" -"${ICU_SOURCE_DIR}/i18n/measure.cpp" -"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" "${ICU_SOURCE_DIR}/i18n/csdetect.cpp" "${ICU_SOURCE_DIR}/i18n/csmatch.cpp" "${ICU_SOURCE_DIR}/i18n/csr2022.cpp" @@ -349,60 +262,67 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp" "${ICU_SOURCE_DIR}/i18n/csrucode.cpp" "${ICU_SOURCE_DIR}/i18n/csrutf8.cpp" -"${ICU_SOURCE_DIR}/i18n/inputext.cpp" -"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/basictz.cpp" -"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" -"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" -"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" -"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" -"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" -"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" -"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" -"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" -"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" -"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/curramt.cpp" +"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" +"${ICU_SOURCE_DIR}/i18n/currunit.cpp" +"${ICU_SOURCE_DIR}/i18n/dangical.cpp" +"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/decContext.cpp" +"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" +"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" +"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" "${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp" "${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp" -"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" -"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" -"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" -"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" -"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" -"${ICU_SOURCE_DIR}/i18n/ztrans.cpp" -"${ICU_SOURCE_DIR}/i18n/zrule.cpp" -"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" +"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" +"${ICU_SOURCE_DIR}/i18n/erarules.cpp" +"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" +"${ICU_SOURCE_DIR}/i18n/format.cpp" +"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" "${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp" "${ICU_SOURCE_DIR}/i18n/fpositer.cpp" -"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" -"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" -"${ICU_SOURCE_DIR}/i18n/decContext.cpp" -"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" -"${ICU_SOURCE_DIR}/i18n/tznames.cpp" -"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" -"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" "${ICU_SOURCE_DIR}/i18n/gender.cpp" -"${ICU_SOURCE_DIR}/i18n/region.cpp" -"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" -"${ICU_SOURCE_DIR}/i18n/uregion.cpp" -"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" -"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" +"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" +"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" +"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" +"${ICU_SOURCE_DIR}/i18n/inputext.cpp" +"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" +"${ICU_SOURCE_DIR}/i18n/japancal.cpp" +"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" "${ICU_SOURCE_DIR}/i18n/measunit.cpp" -"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp" +"${ICU_SOURCE_DIR}/i18n/measure.cpp" +"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" +"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" +"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" +"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" "${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" +"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" "${ICU_SOURCE_DIR}/i18n/number_compact.cpp" +"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" "${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp" "${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp" "${ICU_SOURCE_DIR}/i18n/number_fluent.cpp" @@ -410,7 +330,9 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/number_grouping.cpp" "${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp" "${ICU_SOURCE_DIR}/i18n/number_longnames.cpp" +"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" "${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp" +"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" "${ICU_SOURCE_DIR}/i18n/number_notation.cpp" "${ICU_SOURCE_DIR}/i18n/number_output.cpp" "${ICU_SOURCE_DIR}/i18n/number_padding.cpp" @@ -418,46 +340,124 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp" "${ICU_SOURCE_DIR}/i18n/number_rounding.cpp" "${ICU_SOURCE_DIR}/i18n/number_scientific.cpp" -"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" -"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" -"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" -"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" -"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" "${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp" -"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" -"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" -"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" -"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/erarules.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp" -"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp" "${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp" "${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp" +"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" +"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" "${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numsys.cpp" +"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/persncal.cpp" "${ICU_SOURCE_DIR}/i18n/pluralranges.cpp" +"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" +"${ICU_SOURCE_DIR}/i18n/quant.cpp" +"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" +"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" +"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" +"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" +"${ICU_SOURCE_DIR}/i18n/regexst.cpp" +"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" +"${ICU_SOURCE_DIR}/i18n/region.cpp" +"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/rematch.cpp" +"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" +"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" +"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" +"${ICU_SOURCE_DIR}/i18n/search.cpp" +"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" +"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" +"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" +"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" +"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" +"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/timezone.cpp" +"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" +"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" +"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" +"${ICU_SOURCE_DIR}/i18n/translit.cpp" +"${ICU_SOURCE_DIR}/i18n/transreg.cpp" +"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" +"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/ucal.cpp" +"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" +"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" +"${ICU_SOURCE_DIR}/i18n/udat.cpp" +"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" +"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" +"${ICU_SOURCE_DIR}/i18n/umsg.cpp" +"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" "${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp" "${ICU_SOURCE_DIR}/i18n/units_converter.cpp" "${ICU_SOURCE_DIR}/i18n/units_data.cpp" -"${ICU_SOURCE_DIR}/i18n/units_router.cpp") +"${ICU_SOURCE_DIR}/i18n/units_router.cpp" +"${ICU_SOURCE_DIR}/i18n/unum.cpp" +"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" +"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" +"${ICU_SOURCE_DIR}/i18n/uregex.cpp" +"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" +"${ICU_SOURCE_DIR}/i18n/uregion.cpp" +"${ICU_SOURCE_DIR}/i18n/usearch.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" +"${ICU_SOURCE_DIR}/i18n/utrans.cpp" +"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" +"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" +"${ICU_SOURCE_DIR}/i18n/zrule.cpp" +"${ICU_SOURCE_DIR}/i18n/ztrans.cpp") file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) From 6f03c51368e950ca5bc734616fd9a55a65504345 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 4 Sep 2024 09:40:45 +0000 Subject: [PATCH 47/70] Update --- contrib/icu-cmake/CMakeLists.txt | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 9d6e9ac0a62..afaa189701d 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -273,6 +273,7 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/decContext.cpp" "${ICU_SOURCE_DIR}/i18n/decNumber.cpp" "${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp" "${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" "${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" "${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" @@ -305,12 +306,24 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/indiancal.cpp" "${ICU_SOURCE_DIR}/i18n/inputext.cpp" "${ICU_SOURCE_DIR}/i18n/islamcal.cpp" +"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp" "${ICU_SOURCE_DIR}/i18n/japancal.cpp" "${ICU_SOURCE_DIR}/i18n/listformatter.cpp" "${ICU_SOURCE_DIR}/i18n/measfmt.cpp" "${ICU_SOURCE_DIR}/i18n/measunit.cpp" "${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp" "${ICU_SOURCE_DIR}/i18n/measure.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp" "${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" "${ICU_SOURCE_DIR}/i18n/name2uni.cpp" "${ICU_SOURCE_DIR}/i18n/nfrs.cpp" @@ -340,6 +353,7 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp" "${ICU_SOURCE_DIR}/i18n/number_rounding.cpp" "${ICU_SOURCE_DIR}/i18n/number_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/number_simple.cpp" "${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp" "${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp" "${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp" From 35ab639d1a602724204286f4b43460770f058272 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 3 Sep 2024 23:18:52 +0200 Subject: [PATCH 48/70] fix --- src/Core/SettingsChangesHistory.cpp | 263 +----------------- src/Databases/DatabaseReplicated.cpp | 3 +- src/Interpreters/InterpreterCreateQuery.cpp | 5 +- .../configs/settings.xml | 1 + .../configs/settings2.xml | 1 + 5 files changed, 10 insertions(+), 263 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 2e89cb85682..8e92f848bdd 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,265 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"optimize_functions_to_subcolumns", false, true, "Enable optimization by default"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"database_replicated_allow_replicated_engine_arguments", 1, 0, "Don't allow explicit arguments by default"}, - {"database_replicated_allow_explicit_uuid", 0, 0, "Added a new setting to disallow explicitly specifying table UUID"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication is dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -335,7 +76,9 @@ static std::initializer_listgetSettingsRef().database_replicated_allow_replicated_engine_arguments == 0) + /// We will replace it with default arguments if the setting is 2 + if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments != 2) throw Exception(ErrorCodes::INCORRECT_QUERY, "Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. " "If you really want to specify it explicitly, then you should use some macros " diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index d34ba4c267b..7efaac029c7 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1239,7 +1239,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool from_path = create.attach_from_path.has_value(); bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; - if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !create.attach) + if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !is_on_cluster && !create.attach) { if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) { @@ -1253,7 +1253,8 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2) { UUID old_uuid = create.uuid; - create.generateRandomUUID(/*always_generate_new_uuid*/ true); + create.uuid = UUIDHelpers::Nil; + create.generateRandomUUIDs(); LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) " "to make sure it's unique", old_uuid, create.uuid); } diff --git a/tests/integration/test_replicated_database/configs/settings.xml b/tests/integration/test_replicated_database/configs/settings.xml index 4b1122b1b5f..41799c5bed2 100644 --- a/tests/integration/test_replicated_database/configs/settings.xml +++ b/tests/integration/test_replicated_database/configs/settings.xml @@ -6,6 +6,7 @@ 0 0 3 + 3 diff --git a/tests/integration/test_replicated_database/configs/settings2.xml b/tests/integration/test_replicated_database/configs/settings2.xml index 9a3038871e1..fb7f0c8d4d3 100644 --- a/tests/integration/test_replicated_database/configs/settings2.xml +++ b/tests/integration/test_replicated_database/configs/settings2.xml @@ -6,6 +6,7 @@ 0 0 3 + 3 0 From 6eef6a7759c8cb1c3da7c113f18572f248a30210 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 4 Sep 2024 21:43:36 +0200 Subject: [PATCH 49/70] fix --- .../configs/settings.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml index 16caee9ba20..a5e45ead44e 100644 --- a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml +++ b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml @@ -1,6 +1,8 @@ + 3 + 3 From a991a58766bb7b8480d9dff908b025186828bdd3 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 4 Sep 2024 12:11:58 +0000 Subject: [PATCH 50/70] rm test_database_creation_idempotency --- .../test_replicated_database/test.py | 35 ------------------- 1 file changed, 35 deletions(-) diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index 58e99d4c408..60a6e099b22 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -1,8 +1,5 @@ -import concurrent.futures import os -import random import shutil -import string import time import re import pytest @@ -1552,35 +1549,3 @@ def test_all_groups_cluster(started_cluster): assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query( "select host_name from system.clusters where name='all_groups.db_cluster' order by host_name" ) - - -def test_database_creation_idempotency(started_cluster): - def randomize_database_name(database_name, random_suffix_length=20): - letters = string.ascii_letters + string.digits - return f"{database_name}{''.join(random.choice(letters) for _ in range(random_suffix_length))}" - - databases = [randomize_database_name("rdb") for _ in range(100)] - - def create_database(name): - main_node.query( - f""" - CREATE DATABASE {name} - ENGINE = Replicated('/test/test_database_creation_idempotency/{name}', 'shard', 'replica') - """ - ) - - with concurrent.futures.ThreadPoolExecutor() as executor: - for name in databases: - executor.submit(create_database, name) - - main_node.restart_clickhouse(kill=True) - - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [executor.submit(create_database, name) for name in databases] - concurrent.futures.wait(futures) - - assert int( - main_node.query( - f"SELECT count() FROM system.databases WHERE name IN {databases}" - ).strip() - ) == len(databases) From df3c5c97914a62b3ab8dcdfd9f9980bc62094c47 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 5 Sep 2024 15:51:03 +0200 Subject: [PATCH 51/70] Update 00609_mv_index_in_in.sql --- tests/queries/0_stateless/00609_mv_index_in_in.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql index 89fc85651ad..848938780c2 100644 --- a/tests/queries/0_stateless/00609_mv_index_in_in.sql +++ b/tests/queries/0_stateless/00609_mv_index_in_in.sql @@ -1,4 +1,4 @@ --- Tags: no-ordinary-database +-- Tags: no-ordinary-database, no-parallel DROP TABLE IF EXISTS test_00609; DROP TABLE IF EXISTS test_mv_00609; From 364ad4315b1b5dff72a21f42e8b1a7a912373dbd Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 5 Sep 2024 18:20:46 +0200 Subject: [PATCH 52/70] Add checks agains segfault in DeltaLakeMetadata --- .../DataLakes/DeltaLakeMetadata.cpp | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index 667a925d11e..f04e868ee5a 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -207,18 +207,28 @@ struct DeltaLakeMetadataImpl Poco::Dynamic::Var json = parser.parse(json_str); Poco::JSON::Object::Ptr object = json.extract(); + if (!object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to parse metadata file"); + +#ifdef ABORT_ON_LOGICAL_ERROR std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM object->stringify(oss); LOG_TEST(log, "Metadata: {}", oss.str()); +#endif if (object->has("metaData")) { const auto metadata_object = object->get("metaData").extract(); + if (!metadata_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `metaData` field"); + const auto schema_object = metadata_object->getValue("schemaString"); Poco::JSON::Parser p; Poco::Dynamic::Var fields_json = parser.parse(schema_object); const Poco::JSON::Object::Ptr & fields_object = fields_json.extract(); + if (!fields_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `fields` field"); auto current_schema = parseMetadata(fields_object); if (file_schema.empty()) @@ -237,6 +247,9 @@ struct DeltaLakeMetadataImpl if (object->has("add")) { auto add_object = object->get("add").extract(); + if (!add_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field"); + auto path = add_object->getValue("path"); result.insert(fs::path(configuration->getPath()) / path); @@ -247,6 +260,9 @@ struct DeltaLakeMetadataImpl if (add_object->has("partitionValues")) { auto partition_values = add_object->get("partitionValues").extract(); + if (!partition_values) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `partitionValues` field"); + if (partition_values->size()) { auto & current_partition_columns = file_partition_columns[filename]; @@ -274,7 +290,11 @@ struct DeltaLakeMetadataImpl } else if (object->has("remove")) { - auto path = object->get("remove").extract()->getValue("path"); + auto remove_object = object->get("remove").extract(); + if (!remove_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field"); + + auto path = remove_object->getValue("path"); result.erase(fs::path(configuration->getPath()) / path); } } From c6f0e29ecad8385954fc26f5578c6c666ff6e07d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 5 Sep 2024 18:36:13 +0200 Subject: [PATCH 53/70] Update StackTrace.cpp --- src/Common/StackTrace.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index bd01b639913..dc5434677ee 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -70,7 +70,7 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc #if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__) (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read"; #else - ""; + ""; #endif std::string_view message; From f22876302e2c284aa29f0e5e2733860fe4f9c253 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 5 Sep 2024 23:37:29 +0000 Subject: [PATCH 54/70] Mark Dynamic type as not safe primary key type to avoid issues with Fields --- src/Processors/QueryPlan/PartsSplitter.cpp | 13 ++----- .../queries/0_stateless/00000_test.reference | 37 +++++++++++++++++++ tests/queries/0_stateless/00000_test.sql | 12 ++++++ ...231_dynamic_not_safe_primary_key.reference | 0 .../03231_dynamic_not_safe_primary_key.sql | 10 +++++ 5 files changed, 62 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/00000_test.reference create mode 100644 tests/queries/0_stateless/00000_test.sql create mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference create mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 63c10a11913..935af914827 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -50,6 +50,9 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) case TypeIndex::Float64: case TypeIndex::Nullable: case TypeIndex::ObjectDeprecated: + case TypeIndex::Object: + case TypeIndex::Variant: + case TypeIndex::Dynamic: return false; case TypeIndex::Array: { @@ -76,16 +79,6 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) const auto & data_type_map = static_cast(data_type); return isSafePrimaryDataKeyType(*data_type_map.getKeyType()) && isSafePrimaryDataKeyType(*data_type_map.getValueType()); } - case TypeIndex::Variant: - { - const auto & data_type_variant = static_cast(data_type); - const auto & data_type_variant_elements = data_type_variant.getVariants(); - for (const auto & data_type_variant_element : data_type_variant_elements) - if (!isSafePrimaryDataKeyType(*data_type_variant_element)) - return false; - - return false; - } default: { break; diff --git a/tests/queries/0_stateless/00000_test.reference b/tests/queries/0_stateless/00000_test.reference new file mode 100644 index 00000000000..5374971d501 --- /dev/null +++ b/tests/queries/0_stateless/00000_test.reference @@ -0,0 +1,37 @@ +a +a +a +b +xx +xx +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +--- + 4 + xx + xx +a 2020-01-01 00:00:00.000 +a 2020-01-01 10:10:10.000 +a 2020-01-01 10:10:10.110 +a 1 +a 1 +a 1 +a a +a a +a b +a \N +a \N +a \N +a \N +a \N +z a diff --git a/tests/queries/0_stateless/00000_test.sql b/tests/queries/0_stateless/00000_test.sql new file mode 100644 index 00000000000..d380f6f69de --- /dev/null +++ b/tests/queries/0_stateless/00000_test.sql @@ -0,0 +1,12 @@ +set max_threads = 1, read_in_order_two_level_merge_threshold = 1; + +CREATE OR REPLACE TABLE test( + key String, + val Map(String, String) +) engine = MergeTree +order by key settings min_bytes_for_wide_part = 0; +insert into test VALUES ('', {'x':'xx'}); +insert into test VALUES ('', {}); +SELECT key, arrayJoin(mapValues(val)) as v FROM test ORDER BY key, v; +DROP TABLE test; + diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql new file mode 100644 index 00000000000..f207581f482 --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql @@ -0,0 +1,10 @@ +SET allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0); +CREATE TABLE t1 (c0 Array(Dynamic), c1 Int) ENGINE = MergeTree() ORDER BY (c0); +INSERT INTO t1 (c0, c1) VALUES ([18446717433683171873], 13623876564923702671), ([-4], 6111684076076982207); +SELECT 1 FROM t0 FINAL JOIN t1 ON TRUE; +DROP TABLE t0; +DROP TABLE t1; + From ff86b1872f7b0e11367d67dbdaee46a93fa00684 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 5 Sep 2024 23:38:31 +0000 Subject: [PATCH 55/70] Remove extra files --- .../queries/0_stateless/00000_test.reference | 37 ------------------- tests/queries/0_stateless/00000_test.sql | 12 ------ 2 files changed, 49 deletions(-) delete mode 100644 tests/queries/0_stateless/00000_test.reference delete mode 100644 tests/queries/0_stateless/00000_test.sql diff --git a/tests/queries/0_stateless/00000_test.reference b/tests/queries/0_stateless/00000_test.reference deleted file mode 100644 index 5374971d501..00000000000 --- a/tests/queries/0_stateless/00000_test.reference +++ /dev/null @@ -1,37 +0,0 @@ -a -a -a -b -xx -xx -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N ---- - 4 - xx - xx -a 2020-01-01 00:00:00.000 -a 2020-01-01 10:10:10.000 -a 2020-01-01 10:10:10.110 -a 1 -a 1 -a 1 -a a -a a -a b -a \N -a \N -a \N -a \N -a \N -z a diff --git a/tests/queries/0_stateless/00000_test.sql b/tests/queries/0_stateless/00000_test.sql deleted file mode 100644 index d380f6f69de..00000000000 --- a/tests/queries/0_stateless/00000_test.sql +++ /dev/null @@ -1,12 +0,0 @@ -set max_threads = 1, read_in_order_two_level_merge_threshold = 1; - -CREATE OR REPLACE TABLE test( - key String, - val Map(String, String) -) engine = MergeTree -order by key settings min_bytes_for_wide_part = 0; -insert into test VALUES ('', {'x':'xx'}); -insert into test VALUES ('', {}); -SELECT key, arrayJoin(mapValues(val)) as v FROM test ORDER BY key, v; -DROP TABLE test; - From e2a7a1f66292366be35f2e1d61d01937befb180d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 6 Sep 2024 12:08:14 +0200 Subject: [PATCH 56/70] Update StackTrace.cpp --- src/Common/StackTrace.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index dc5434677ee..3dce34803b2 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc = info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s); const std::string_view access = -#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__) - (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read"; -#else +#if defined(__arm__) ""; +#elif defined(__powerpc__) + ""; +#elif defined(OS_DARWIN) + ""; +#elif defined(OS_FREEBSD) + ""; +#elif !defined(__x86_64__) + ""; +#else + (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read"; #endif std::string_view message; From 238c9b5789cf9eb03dc688211d0e5f9633b46b9c Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 6 Sep 2024 13:51:06 +0200 Subject: [PATCH 57/70] Update 01153_attach_mv_uuid.sql --- tests/queries/0_stateless/01153_attach_mv_uuid.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.sql b/tests/queries/0_stateless/01153_attach_mv_uuid.sql index 6b167253d8f..0ef16449096 100644 --- a/tests/queries/0_stateless/01153_attach_mv_uuid.sql +++ b/tests/queries/0_stateless/01153_attach_mv_uuid.sql @@ -15,6 +15,7 @@ SELECT * FROM mv ORDER BY n; DROP TABLE mv SYNC; SET database_replicated_allow_explicit_uuid=3; + SET show_table_uuid_in_table_create_query_if_not_nil=1; CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n; ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; From 06632b9bcd5f6cb0b18848cea806012e9ea6baf9 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 6 Sep 2024 15:58:46 +0000 Subject: [PATCH 58/70] fxs --- src/Databases/DatabaseReplicated.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 26a834eea4c..0a79fc45d0f 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -649,7 +649,7 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt if (check_responses[i].data != expected_data[i]) { throw Exception( - ErrorCodes::LOGICAL_ERROR, + ErrorCodes::REPLICA_ALREADY_EXISTS, "Replica node {} in ZooKeeper already exists and contains unexpected value: {}", quoteString(check_paths[i]), quoteString(check_responses[i].data)); } From 8544718d329ff772af2e06b19d949a676fb84af0 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 6 Sep 2024 16:31:39 +0000 Subject: [PATCH 59/70] initialize max_log_ptr_at_creation on attach --- src/Databases/DatabaseReplicated.cpp | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 0a79fc45d0f..fd886d37f0e 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -485,6 +485,12 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL current_zookeeper->set(replica_path + "/replica_group", replica_group_name, -1); createEmptyLogEntry(current_zookeeper); } + + /// Needed to mark all the queries + /// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful. + String max_log_ptr_at_creation_str; + if (current_zookeeper->tryGet(replica_path + "/max_log_ptr_at_creation", max_log_ptr_at_creation_str)) + max_log_ptr_at_creation = parse(max_log_ptr_at_creation_str); } if (is_create_query) @@ -614,12 +620,6 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt replica_path, replica_path + "/replica_group", replica_path + "/digest", - - /// Needed to mark all the queries - /// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful. - /// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory. - /// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation. - replica_path + "/max_log_ptr_at_creation", }; bool nodes_exist = true; auto check_responses = current_zookeeper->tryGet(check_paths); @@ -656,7 +656,6 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt } LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them"); - max_log_ptr_at_creation = parse(check_responses[check_responses.size() - 1].data); createEmptyLogEntry(current_zookeeper); return; } @@ -672,11 +671,10 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent), zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent), - /// In addition to creating the replica nodes, we record the max_log_ptr at the instant where - /// we declared ourself as an existing replica. We'll need this during recoverLostReplica to - /// notify other nodes that issued new queries while this node was recovering. - zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version), + /// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory. + /// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation. zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent), + zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version), }; Coordination::Responses ops_responses; From 5362b117bfbb17a79d31e3e40152c1cb0d56fb34 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 7 Sep 2024 16:20:20 +0000 Subject: [PATCH 60/70] Remove superfluous --multiquery/-n --- tests/docker_scripts/stateful_runner.sh | 2 +- tests/docker_scripts/stress_runner.sh | 2 +- tests/integration/helpers/client.py | 1 - tests/performance/scripts/compare.sh | 12 +-- ...006_client_test_hint_no_such_error_name.sh | 2 +- .../0_stateless/02030_rocksdb_race_long.sh | 6 +- .../02151_hash_table_sizes_stats_joins.sh | 2 +- .../02229_client_stop_multiquery_in_SIGINT.sh | 4 +- ...41_filesystem_cache_on_write_operations.sh | 8 +- .../0_stateless/02242_delete_user_race.sh | 2 +- .../0_stateless/02243_drop_user_grant_race.sh | 4 +- .../02266_protobuf_format_google_wrappers.sh | 16 ++-- .../02286_drop_filesystem_cache.sh | 6 +- .../02313_filesystem_cache_seeks.sh | 2 +- .../02340_parts_refcnt_mergetree.sh | 6 +- .../0_stateless/02344_describe_cache.sh | 4 +- tests/queries/0_stateless/02352_rwlock.sh | 4 +- .../0_stateless/02380_insert_mv_race.sh | 4 +- ...entelemetry_insert_on_distributed_table.sh | 8 +- .../02419_keeper_map_primary_key.sh | 2 +- .../02443_detach_attach_partition.sh | 2 +- .../0_stateless/02473_optimize_old_parts.sh | 8 +- .../0_stateless/02477_s3_request_throttler.sh | 2 +- ...02494_zero_copy_projection_cancel_fetch.sh | 4 +- .../02504_regexp_dictionary_ua_parser.sh | 8 +- .../02504_regexp_dictionary_yaml_source.sh | 18 ++-- .../02515_cleanup_async_insert_block_ids.sh | 6 +- .../02585_query_status_deadlock.sh | 2 +- ...de_client_info_initial_query_start_time.sh | 14 ++-- ...rofile_events_from_query_log_and_client.sh | 16 ++-- .../02676_optimize_old_parts_replicated.sh | 8 +- .../02686_postgres_protocol_decimal_256.sh | 2 +- .../0_stateless/02700_s3_part_INT_MAX.sh | 2 +- .../0_stateless/02725_start_stop_fetches.sh | 2 +- .../02731_zero_objects_in_metadata.sh | 6 +- tests/queries/0_stateless/02766_prql.sh | 2 +- .../0_stateless/02770_async_buffer_ignore.sh | 4 +- .../02789_filesystem_cache_alignment.sh | 18 ++-- ...89_reading_from_s3_with_connection_pool.sh | 8 +- .../0_stateless/02801_backup_native_copy.sh | 10 +-- .../0_stateless/02803_backup_tmp_files.sh | 2 +- ...2808_custom_disk_with_user_defined_name.sh | 8 +- .../02808_filesystem_cache_drop_query.sh | 22 ++--- ...ackup_use_same_password_for_base_backup.sh | 2 +- ...use_same_s3_credentials_for_base_backup.sh | 2 +- .../02844_max_backup_bandwidth_s3.sh | 6 +- .../0_stateless/02867_storage_set_tsan.sh | 4 +- .../02900_union_schema_inference_mode.sh | 10 +-- .../02908_many_requests_to_system_replicas.sh | 2 +- .../02922_deduplication_with_zero_copy.sh | 10 +-- .../02932_refreshable_materialized_views_1.sh | 72 ++++++++-------- .../02932_refreshable_materialized_views_2.sh | 82 +++++++++---------- .../0_stateless/02941_variant_type_1.sh | 10 +-- .../0_stateless/02941_variant_type_2.sh | 4 +- .../0_stateless/02941_variant_type_3.sh | 4 +- .../0_stateless/02941_variant_type_4.sh | 2 +- ...ynamically_change_filesystem_cache_size.sh | 10 +-- .../0_stateless/02960_polygon_bound_bug.sh | 2 +- .../02961_storage_config_volume_priority.sh | 4 +- ...02963_remote_read_small_buffer_size_bug.sh | 6 +- ...allel_replicas_join_algo_and_analyzer_1.sh | 2 +- ...allel_replicas_join_algo_and_analyzer_2.sh | 2 +- ...allel_replicas_join_algo_and_analyzer_3.sh | 2 +- .../02969_auto_format_detection.sh | 6 +- .../02980_s3_plain_DROP_TABLE_MergeTree.sh | 4 +- ...s3_plain_DROP_TABLE_ReplicatedMergeTree.sh | 6 +- ...ion_insert_several_blocks_nonreplicated.sh | 2 +- ...cation_insert_several_blocks_replicated.sh | 2 +- ..._generates_several_blocks_nonreplicated.sh | 2 +- ..._mv_generates_several_blocks_replicated.sh | 2 +- ...several_mv_into_one_table_nonreplicated.sh | 2 +- ...on_several_mv_into_one_table_replicated.sh | 2 +- .../0_stateless/03008_s3_plain_rewritable.sh | 8 +- ...2_dynamically_resize_filesystem_cache_2.sh | 6 +- .../03039_dynamic_aggregating_merge_tree.sh | 2 +- .../03039_dynamic_collapsing_merge_tree.sh | 2 +- .../03039_dynamic_replacing_merge_tree.sh | 2 +- .../03039_dynamic_summing_merge_tree.sh | 2 +- ...dynamic_versioned_collapsing_merge_tree.sh | 2 +- .../0_stateless/03151_unload_index_race.sh | 6 +- .../03172_error_log_table_not_empty.sh | 8 +- .../03203_hive_style_partitioning.sh | 18 ++-- ...kup_and_clear_old_temporary_directories.sh | 4 +- 83 files changed, 298 insertions(+), 299 deletions(-) diff --git a/tests/docker_scripts/stateful_runner.sh b/tests/docker_scripts/stateful_runner.sh index 86f6a299ad3..defc24781a1 100755 --- a/tests/docker_scripts/stateful_runner.sh +++ b/tests/docker_scripts/stateful_runner.sh @@ -105,7 +105,7 @@ setup_logs_replication clickhouse-client --query "SHOW DATABASES" clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql +clickhouse-client < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/tests/docker_scripts/stress_runner.sh b/tests/docker_scripts/stress_runner.sh index 039c60c8e4e..af93c93f202 100755 --- a/tests/docker_scripts/stress_runner.sh +++ b/tests/docker_scripts/stress_runner.sh @@ -62,7 +62,7 @@ start_server setup_logs_replication clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql +clickhouse-client < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" diff --git a/tests/integration/helpers/client.py b/tests/integration/helpers/client.py index 8ba7b342020..97405860784 100644 --- a/tests/integration/helpers/client.py +++ b/tests/integration/helpers/client.py @@ -89,7 +89,6 @@ class Client: command = self.command[:] if stdin is None: - command += ["--multiquery"] stdin = sql else: command += ["--query", sql] diff --git a/tests/performance/scripts/compare.sh b/tests/performance/scripts/compare.sh index da7bbf77a28..c3566c51a16 100755 --- a/tests/performance/scripts/compare.sh +++ b/tests/performance/scripts/compare.sh @@ -427,7 +427,7 @@ do done # for each query run, prepare array of metrics from query log -clickhouse-local --multiquery --query " +clickhouse-local --query " create view query_runs as select * from file('analyze/query-runs.tsv', TSV, 'test text, query_index int, query_id text, version UInt8, time float'); @@ -582,7 +582,7 @@ numactl --cpunodebind=all --membind=all numactl --show # If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs. numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log -clickhouse-local --multiquery --query " +clickhouse-local --query " -- Join the metric names back to the metric statistics we've calculated, and make -- a denormalized table of them -- statistics for all metrics for all queries. -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: @@ -680,7 +680,7 @@ rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.ts cat analyze/errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||: -clickhouse-local --multiquery --query " +clickhouse-local --query " create view query_display_names as select * from file('analyze/query-display-names.tsv', TSV, 'test text, query_index int, query_display_name text') @@ -981,7 +981,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts for version in {right,left} do rm -rf data - clickhouse-local --multiquery --query " + clickhouse-local --query " create view query_profiles as with 0 as left, 1 as right select * from file('analyze/query-profiles.tsv', TSV, @@ -1151,7 +1151,7 @@ function report_metrics rm -rf metrics ||: mkdir metrics -clickhouse-local --multiquery --query " +clickhouse-local --query " create view right_async_metric_log as select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes) ; @@ -1211,7 +1211,7 @@ function upload_results # Prepare info for the CI checks table. rm -f ci-checks.tsv - clickhouse-local --multiquery --query " + clickhouse-local --query " create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes); create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') diff --git a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh index 972ff3ba73f..7bf8100a53d 100755 --- a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh +++ b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' +$CLICKHOUSE_CLIENT -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' diff --git a/tests/queries/0_stateless/02030_rocksdb_race_long.sh b/tests/queries/0_stateless/02030_rocksdb_race_long.sh index da31861991c..7d329f78d98 100755 --- a/tests/queries/0_stateless/02030_rocksdb_race_long.sh +++ b/tests/queries/0_stateless/02030_rocksdb_race_long.sh @@ -12,14 +12,14 @@ echo " DROP TABLE IF EXISTS rocksdb_race; CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key); INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000); -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT function read_stat_thread() { while true; do echo " SELECT * FROM system.rocksdb FORMAT Null; - " | $CLICKHOUSE_CLIENT -n + " | $CLICKHOUSE_CLIENT done } @@ -29,7 +29,7 @@ function truncate_thread() sleep 3s; echo " TRUNCATE TABLE rocksdb_race; - " | $CLICKHOUSE_CLIENT -n + " | $CLICKHOUSE_CLIENT done } diff --git a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh index 007dae6e427..a9b5c461c5a 100755 --- a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh +++ b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh @@ -12,7 +12,7 @@ opts=( --join_algorithm='parallel_hash' ) -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY (); INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6); diff --git a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh index e5d00bc1a1c..b23164f8354 100755 --- a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh +++ b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh @@ -5,12 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -nm -q " +timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -m -q " SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT 'FAIL'; " -timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -nm -q " +timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -m -q " SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT 'FAIL'; " diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh index f8e7b7e7e72..8a44b65e6d1 100755 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh @@ -16,7 +16,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -37,7 +37,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" - $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -70,7 +70,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" - $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -109,7 +109,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS" - $CLICKHOUSE_CLIENT -n --query "SELECT + $CLICKHOUSE_CLIENT --query "SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read FROM system.query_log diff --git a/tests/queries/0_stateless/02242_delete_user_race.sh b/tests/queries/0_stateless/02242_delete_user_race.sh index 2af54276469..3cb79d69955 100755 --- a/tests/queries/0_stateless/02242_delete_user_race.sh +++ b/tests/queries/0_stateless/02242_delete_user_race.sh @@ -15,7 +15,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP ROLE IF EXISTS test_role_02242; CREATE ROLE test_role_02242; " diff --git a/tests/queries/0_stateless/02243_drop_user_grant_race.sh b/tests/queries/0_stateless/02243_drop_user_grant_race.sh index 4dce8e8124c..1972ecade06 100755 --- a/tests/queries/0_stateless/02243_drop_user_grant_race.sh +++ b/tests/queries/0_stateless/02243_drop_user_grant_race.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP ROLE IF EXISTS test_role_02244; CREATE ROLE test_role_02244; DROP USER IF EXISTS kek_02243; @@ -37,4 +37,4 @@ $CLICKHOUSE_CLIENT --user kek_02243 -q "SELECT * FROM test" 2>&1| grep -Fa "Exce $CLICKHOUSE_CLIENT -q "DROP ROLE IF EXISTS test_role_02243" $CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS test_user_02243" -$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243" diff --git a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh index ae2a2351c6b..eb1109aaac1 100755 --- a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh +++ b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh @@ -44,7 +44,7 @@ protobuf_info() { fi } -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS $MAIN_TABLE; DROP TABLE IF EXISTS $ROUNDTRIP_TABLE; DROP TABLE IF EXISTS $COMPATIBILITY_TABLE; @@ -78,14 +78,14 @@ echo $SET_OUTPUT echo echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES; SELECT * FROM $MAIN_TABLE; " echo echo "Protobuf representation of the second row:" -$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" hexdump -C $BINARY_FILE_PATH echo @@ -101,12 +101,12 @@ hexdump -C $MESSAGE_FILE_PATH echo echo "Insert proto message into table (Nullable(String), Int32):" -$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" +$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE" echo echo "Proto output of the table using Google wrapper:" -$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" hexdump -C $BINARY_FILE_PATH echo @@ -124,14 +124,14 @@ echo echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:" echo "Table (Nullable(Int32), Nullable(Int32), Int32):" $CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES" -$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" -$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE" rm "$BINARY_FILE_PATH" rm "$MESSAGE_FILE_PATH" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE $MAIN_TABLE; DROP TABLE $ROUNDTRIP_TABLE; DROP TABLE $COMPATIBILITY_TABLE; diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh index dab777fcc31..672bf4d068b 100755 --- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh +++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh @@ -11,7 +11,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286" - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_02286 (key UInt32, value String) + $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02286 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760" @@ -38,7 +38,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT -n --query "SELECT count() + $CLICKHOUSE_CLIENT --query "SELECT count() FROM ( SELECT arrayJoin(cache_paths) AS cache_path, @@ -54,7 +54,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT -n --query "SELECT cache_path, local_path + $CLICKHOUSE_CLIENT --query "SELECT cache_path, local_path FROM ( SELECT arrayJoin(cache_paths) AS cache_path, diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh index b7adde6fcbb..64fae06f220 100755 --- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh +++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh @@ -23,7 +23,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d ORDER BY tuple() SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313 + $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02313 SELECT * FROM generateRandom('id Int32, val String') LIMIT 100000" diff --git a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh index bd018018789..f252b9304cd 100755 --- a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh +++ b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh @@ -9,7 +9,7 @@ function check_refcnt_for_table() { local table=$1 && shift - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system stop merges $table; -- cleanup thread may hold the parts lock system stop cleanup $table; @@ -66,14 +66,14 @@ function check_refcnt_for_table() # NOTE: index_granularity=1 to cancel ASAP -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " drop table if exists data_02340; create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1; " || exit 1 check_refcnt_for_table data_02340 $CLICKHOUSE_CLIENT -q "drop table data_02340 sync" -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " drop table if exists data_02340_rep sync; create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1; " || exit 1 diff --git a/tests/queries/0_stateless/02344_describe_cache.sh b/tests/queries/0_stateless/02344_describe_cache.sh index c5373b4d7e3..4cc4c415f3f 100755 --- a/tests/queries/0_stateless/02344_describe_cache.sh +++ b/tests/queries/0_stateless/02344_describe_cache.sh @@ -7,14 +7,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="02344_describe_cache_test" -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ diff --git a/tests/queries/0_stateless/02352_rwlock.sh b/tests/queries/0_stateless/02352_rwlock.sh index b4a77e0b08a..7f02b3ee935 100755 --- a/tests/queries/0_stateless/02352_rwlock.sh +++ b/tests/queries/0_stateless/02352_rwlock.sh @@ -24,7 +24,7 @@ function wait_query_by_id_started() # wait for query to be started while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do if [ "$( - $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q " + $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -m -q " system flush logs; select count() from system.query_log @@ -52,7 +52,7 @@ $CLICKHOUSE_CLIENT -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_ordinary Engine=Or # debug build on CI, so if this will happen, then DROP query will be # finished instantly, and to avoid flakiness we will retry in this case while :; do - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352; CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null(); " diff --git a/tests/queries/0_stateless/02380_insert_mv_race.sh b/tests/queries/0_stateless/02380_insert_mv_race.sh index 725c7eacce6..a5f05365180 100755 --- a/tests/queries/0_stateless/02380_insert_mv_race.sh +++ b/tests/queries/0_stateless/02380_insert_mv_race.sh @@ -9,13 +9,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q "ATTACH TABLE mv" |& { +$CLICKHOUSE_CLIENT -m -q "ATTACH TABLE mv" |& { # CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS # TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS } -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS null; CREATE TABLE null (key Int) ENGINE = Null; DROP TABLE IF EXISTS mv; diff --git a/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh b/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh index c692c37f27f..9e8b854375b 100755 --- a/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh +++ b/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh @@ -27,7 +27,7 @@ function insert() function check_span() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT operation_name, @@ -50,7 +50,7 @@ ${CLICKHOUSE_CLIENT} -nq " # $2 - value of distributed_foreground_insert function check_span_kind() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT count() @@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} -nq " # # Prepare tables for tests # -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry; @@ -122,7 +122,7 @@ check_span_kind $trace_id 'CLIENT' # # Cleanup # -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry; " diff --git a/tests/queries/0_stateless/02419_keeper_map_primary_key.sh b/tests/queries/0_stateless/02419_keeper_map_primary_key.sh index c43c5bb6408..6a23f666f36 100755 --- a/tests/queries/0_stateless/02419_keeper_map_primary_key.sh +++ b/tests/queries/0_stateless/02419_keeper_map_primary_key.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS 02419_test SYNC;" test_primary_key() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1); INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2); SELECT value FROM 02419_test WHERE key = 1; diff --git a/tests/queries/0_stateless/02443_detach_attach_partition.sh b/tests/queries/0_stateless/02443_detach_attach_partition.sh index 6a47b7d8d61..4a5377a952a 100755 --- a/tests/queries/0_stateless/02443_detach_attach_partition.sh +++ b/tests/queries/0_stateless/02443_detach_attach_partition.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/replication.lib -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; diff --git a/tests/queries/0_stateless/02473_optimize_old_parts.sh b/tests/queries/0_stateless/02473_optimize_old_parts.sh index b563bc31b39..3a4e6145f12 100755 --- a/tests/queries/0_stateless/02473_optimize_old_parts.sh +++ b/tests/queries/0_stateless/02473_optimize_old_parts.sh @@ -21,7 +21,7 @@ wait_for_number_of_parts() { echo "$res" } -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_with_merge; @@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;" wait_for_number_of_parts 'test_without_merge' 1 10 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE test_without_merge; SELECT 'With merge any part range'; @@ -47,7 +47,7 @@ INSERT INTO test_with_merge SELECT 3;" wait_for_number_of_parts 'test_with_merge' 1 100 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE test_with_merge; SELECT 'With merge partition only'; @@ -60,7 +60,7 @@ INSERT INTO test_with_merge SELECT 3;" wait_for_number_of_parts 'test_with_merge' 1 100 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active; diff --git a/tests/queries/0_stateless/02477_s3_request_throttler.sh b/tests/queries/0_stateless/02477_s3_request_throttler.sh index c74cb598d42..5e6185e7304 100755 --- a/tests/queries/0_stateless/02477_s3_request_throttler.sh +++ b/tests/queries/0_stateless/02477_s3_request_throttler.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " -- Limit S3 PUT request per second rate SET s3_max_put_rps = 2; SET s3_max_put_burst = 1; diff --git a/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh b/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh index b72c3eb56c7..122684abe9c 100755 --- a/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh +++ b/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC; " @@ -60,7 +60,7 @@ wait $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC; " diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh index e389cf410e8..77c8049bd58 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh @@ -11,7 +11,7 @@ cp $CURDIR/data_ua_parser/os.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" drop dictionary if exists regexp_os; drop dictionary if exists regexp_browser; drop dictionary if exists regexp_device; @@ -61,10 +61,10 @@ create table user_agents Engine = Log(); " -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" select ua, device, concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser , concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os @@ -74,7 +74,7 @@ from ( dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua; " -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" drop dictionary if exists regexp_os; drop dictionary if exists regexp_browser; drop dictionary if exists regexp_device; diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh index 68a87a14320..dec03bcaaa0 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh @@ -27,7 +27,7 @@ cat > "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < /dev/null & diff --git a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh index 3b0d2309784..9f743581e1a 100755 --- a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh +++ b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function get_query_id() { random_str 10; } -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists buf; drop table if exists dist; drop table if exists data; @@ -31,7 +31,7 @@ query_id="$(get_query_id)" # test, since we care about the difference between NOW() and there should # not be any significant difference. $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; " @@ -42,25 +42,25 @@ query_id="$(get_query_id)" # this query (and all subsequent) should reuse the previous connection (at least most of the time) $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; " echo "INSERT" query_id="$(get_query_id)" -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -m -q " insert into dist_dist values (1),(2); select * from data; " sleep 1 -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist" +$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist_dist" sleep 1 -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist" +$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist" echo "CHECK" -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " select * from data order by key; system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index ff534a6a2e6..5015521ca4d 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh echo "INSERT TO S3" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; " 2>&1 | $CLICKHOUSE_LOCAL -q " WITH '(\\w+): (\\d+)' AS pattern, @@ -30,7 +30,7 @@ SELECT * FROM ( " echo "CHECK WITH query_log" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; SELECT type, 'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'], @@ -45,7 +45,7 @@ ORDER BY query_start_time DESC; " echo "CREATE" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS times; CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t SETTINGS @@ -56,29 +56,29 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t " echo "INSERT" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "READ" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "INSERT and READ INSERT" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "DROP" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " DROP TABLE times; " echo "CHECK with query_log" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; SELECT type, query, diff --git a/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh b/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh index c1f28f9f079..84be5959344 100755 --- a/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh +++ b/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh @@ -21,7 +21,7 @@ wait_for_number_of_parts() { echo "$res" } -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_replicated; @@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;" wait_for_number_of_parts 'test_without_merge' 1 10 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE test_without_merge; SELECT 'With merge replicated any part range'; @@ -47,7 +47,7 @@ INSERT INTO test_replicated SELECT 3;" wait_for_number_of_parts 'test_replicated' 1 100 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " DROP TABLE test_replicated; SELECT 'With merge replicated partition only'; @@ -60,7 +60,7 @@ INSERT INTO test_replicated SELECT 3;" wait_for_number_of_parts 'test_replicated' 1 100 -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active; diff --git a/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh b/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh index 2a94f940327..a540a0981b3 100755 --- a/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh +++ b/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh @@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo " DROP USER IF EXISTS postgresql_user; CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;" diff --git a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh index cfb38c60615..dcd83f9fec3 100755 --- a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh +++ b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh @@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # too slow with this. # # Unfortunately, the test has to buffer it in memory. -$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " +$CLICKHOUSE_CLIENT --max_memory_usage 16G -m -q " INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SETTINGS s3_max_single_part_upload_size = '5Gi'; diff --git a/tests/queries/0_stateless/02725_start_stop_fetches.sh b/tests/queries/0_stateless/02725_start_stop_fetches.sh index c9922455d94..604d0774b83 100755 --- a/tests/queries/0_stateless/02725_start_stop_fetches.sh +++ b/tests/queries/0_stateless/02725_start_stop_fetches.sh @@ -10,7 +10,7 @@ set -e NUM_REPLICAS=5 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1; " diff --git a/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh b/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh index 78659b70129..6e40495157b 100755 --- a/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh +++ b/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for DISK in s3_disk s3_cache do - ${CLICKHOUSE_CLIENT} -n --query " + ${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS test; CREATE TABLE test (id Int32, empty Array(Int32)) ENGINE=MergeTree ORDER BY id @@ -17,13 +17,13 @@ do SELECT * FROM test; " - ${CLICKHOUSE_CLIENT} -n --query " + ${CLICKHOUSE_CLIENT} --query " BACKUP TABLE test TO Disk('backups', 'test_s3_backup'); DROP TABLE test; RESTORE TABLE test FROM Disk('backups', 'test_s3_backup'); " &>/dev/null - ${CLICKHOUSE_CLIENT} -n --query " + ${CLICKHOUSE_CLIENT} --query " SELECT * FROM test; SELECT empty FROM test; " diff --git a/tests/queries/0_stateless/02766_prql.sh b/tests/queries/0_stateless/02766_prql.sh index 85b1167027c..d1e4dca070c 100755 --- a/tests/queries/0_stateless/02766_prql.sh +++ b/tests/queries/0_stateless/02766_prql.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " CREATE TEMPORARY TABLE IF NOT EXISTS aboba ( user_id UInt32, diff --git a/tests/queries/0_stateless/02770_async_buffer_ignore.sh b/tests/queries/0_stateless/02770_async_buffer_ignore.sh index 37f002767d6..af9f18b232f 100755 --- a/tests/queries/0_stateless/02770_async_buffer_ignore.sh +++ b/tests/queries/0_stateless/02770_async_buffer_ignore.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS test_s3; CREATE TABLE test_s3 (a UInt64, b UInt64) @@ -17,7 +17,7 @@ INSERT INTO test_s3 SELECT number, number FROM numbers(1000000); query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000" query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " SELECT ProfileEvents['S3ReadRequestsCount'], ProfileEvents['ReadBufferFromS3Bytes'], diff --git a/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh b/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh index c69c635f6ed..53d2832c589 100755 --- a/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh +++ b/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() @@ -22,7 +22,7 @@ INSERT INTO test SELECT number, randomString(100) FROM numbers(1000000); " QUERY_ID=$RANDOM -$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -nm -q " +$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -m -q " SET enable_filesystem_cache_log = 1; SYSTEM DROP FILESYSTEM CACHE; SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null; @@ -49,14 +49,14 @@ WHERE query_id = '$QUERY_ID' " # File segments cannot be less that 20Mi, # except for last file segment in a file or if file size is less. -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size AND file_segment_size < 20 * 1024 * 1024; " -all=$($CLICKHOUSE_CLIENT -nm -q " +all=$($CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size; ") @@ -68,7 +68,7 @@ else echo "FAIL" fi -count=$($CLICKHOUSE_CLIENT -nm -q " +count=$($CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size @@ -87,21 +87,21 @@ FROM (SELECT * FROM ($query)) AS cache_log INNER JOIN system.filesystem_cache AS cache ON cache_log.cache_path = cache.cache_path " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size AND downloaded_size < 20 * 1024 * 1024; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB'); " -all=$($CLICKHOUSE_CLIENT -nm -q " +all=$($CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query2) WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size; ") @@ -112,7 +112,7 @@ else echo "FAIL" fi -count2=$($CLICKHOUSE_CLIENT -nm -q " +count2=$($CLICKHOUSE_CLIENT -m -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh index 5a37d51233d..239485ab8dd 100755 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS test_s3; CREATE TABLE test_s3 (a UInt64, b UInt64) @@ -25,7 +25,7 @@ do query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" - RES=$(${CLICKHOUSE_CLIENT} -nm --query " + RES=$(${CLICKHOUSE_CLIENT} -m --query " SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 FROM system.query_log WHERE type = 'QueryFinish' @@ -41,7 +41,7 @@ done while true do - query_id=$(${CLICKHOUSE_CLIENT} -nq " + query_id=$(${CLICKHOUSE_CLIENT} -q " create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; set insert_keeper_fault_injection_probability=0; insert into mut values (1, 2, 3), (10, 20, 30); @@ -60,7 +60,7 @@ do ) limit 1 settings max_threads=1; " 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" - RES=$(${CLICKHOUSE_CLIENT} -nm --query " + RES=$(${CLICKHOUSE_CLIENT} -m --query " SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 FROM system.query_log WHERE type = 'QueryFinish' diff --git a/tests/queries/0_stateless/02801_backup_native_copy.sh b/tests/queries/0_stateless/02801_backup_native_copy.sh index b8ee97a7c7d..bc485f903bc 100755 --- a/tests/queries/0_stateless/02801_backup_native_copy.sh +++ b/tests/queries/0_stateless/02801_backup_native_copy.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; insert into data select * from numbers(10); @@ -16,28 +16,28 @@ $CLICKHOUSE_CLIENT -nm -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " diff --git a/tests/queries/0_stateless/02803_backup_tmp_files.sh b/tests/queries/0_stateless/02803_backup_tmp_files.sh index d86beae4923..2208d3d32ef 100755 --- a/tests/queries/0_stateless/02803_backup_tmp_files.sh +++ b/tests/queries/0_stateless/02803_backup_tmp_files.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; insert into data select * from numbers(10); diff --git a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh index 63fa60bd548..4d7ab4063be 100755 --- a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh +++ b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() @@ -17,17 +17,17 @@ SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path = disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ diff --git a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh index 8a4a2e906b0..a7dfa035a22 100755 --- a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh +++ b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() @@ -22,29 +22,29 @@ query_id=$RANDOM $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek; """ 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL" ${CLICKHOUSE_CLIENT} -q " system flush logs" -key=$($CLICKHOUSE_CLIENT -nm --query """ +key=$($CLICKHOUSE_CLIENT -m --query """ SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -offset=$($CLICKHOUSE_CLIENT -nm --query """ +offset=$($CLICKHOUSE_CLIENT -m --query """ SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset; """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; """ @@ -54,18 +54,18 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Nul ${CLICKHOUSE_CLIENT} -q " system flush logs" -key=$($CLICKHOUSE_CLIENT -nm --query """ +key=$($CLICKHOUSE_CLIENT -m --query """ SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key'; """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key """ -$CLICKHOUSE_CLIENT -nm --query """ +$CLICKHOUSE_CLIENT -m --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key'; """ diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index a2b1a953e24..ecd73bcc8a0 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data_1; DROP TABLE IF EXISTS data_2; diff --git a/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh index 16ac095312c..e6a1d547945 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple(); insert into data select * from numbers(10); diff --git a/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh b/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh index 4650415c202..830e78e1e9f 100755 --- a/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh +++ b/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk'; -- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds @@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -nm -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT 'native_copy', @@ -26,7 +26,7 @@ $CLICKHOUSE_CLIENT -nm -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT 'no_native_copy', diff --git a/tests/queries/0_stateless/02867_storage_set_tsan.sh b/tests/queries/0_stateless/02867_storage_set_tsan.sh index 81ae5f0bda8..f6b684c0a55 100755 --- a/tests/queries/0_stateless/02867_storage_set_tsan.sh +++ b/tests/queries/0_stateless/02867_storage_set_tsan.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -mn -q """ +$CLICKHOUSE_CLIENT -m -q """ DROP TABLE IF EXISTS t1_02867; CREATE TABLE t1_02867 (x UInt64) ENGINE=Set(); """ @@ -39,4 +39,4 @@ repeat_truncate_insert & sleep 10 -$CLICKHOUSE_CLIENT -mn -q "DROP TABLE IF EXISTS t1_02867;" +$CLICKHOUSE_CLIENT -m -q "DROP TABLE IF EXISTS t1_02867;" diff --git a/tests/queries/0_stateless/02900_union_schema_inference_mode.sh b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh index a0fdb5276e0..a091645d1e3 100755 --- a/tests/queries/0_stateless/02900_union_schema_inference_mode.sh +++ b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh @@ -10,14 +10,14 @@ echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNI echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; " -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); @@ -25,14 +25,14 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd .. -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; " -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); @@ -41,7 +41,7 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl $CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE" -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl'); diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index 81ba59fc591..91eeb22c19e 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -67,7 +67,7 @@ curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader wait; -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; -- Check that number of ZK request is less then a half of (total replicas * concurrency) diff --git a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh index d1cbc54d294..2eccade5c81 100755 --- a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh +++ b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh @@ -8,7 +8,7 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists r1; drop table if exists r2; @@ -64,7 +64,7 @@ function insert_duplicates() { wait - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system sync replica r1; system sync replica r2; " @@ -84,7 +84,7 @@ function loop() do while ! insert_duplicates do - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " truncate table r1; truncate table r2; system sync replica r1; @@ -137,8 +137,8 @@ function list_keeper_nodes() { list_keeper_nodes "${table_shared_id}" -$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & -$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & +$CLICKHOUSE_CLIENT -m -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & +$CLICKHOUSE_CLIENT -m -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & wait list_keeper_nodes "${table_shared_id}" diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh index 2b92a113e91..057f76e63d0 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh @@ -10,11 +10,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" -$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" +$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" # Basic refreshing. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view a refresh after 2 second engine Memory @@ -23,41 +23,41 @@ $CLICKHOUSE_CLIENT -nq " select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; show create a;" # Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] do sleep 0.5 done -start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" +start_time="`$CLICKHOUSE_CLIENT -q "select reinterpret(now64(), 'Int64')"`" # Check table contents. -$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" +$CLICKHOUSE_CLIENT -q "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" # Wait for table contents to change. -res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" +res1="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values'`" while : do - res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + res2="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`" [ "$res2" == "$res1" ] || break sleep 0.5 done # Wait for another change. while : do - res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + res3="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`" [ "$res3" == "$res2" ] || break sleep 0.5 done # Check that the two changes were at least 1 second apart, in particular that we're not refreshing # like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer # to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" # Create a source table from which views will read. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create table src (x Int8) engine Memory as select 1;" # Switch to fake clock, change refresh schedule, change query. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " system test view a set fake time '2050-01-01 00:00:01'; system wait view a; system refresh view a; @@ -68,19 +68,19 @@ $CLICKHOUSE_CLIENT -nq " select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; show create a;" # Advance time to trigger the refresh. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<5: no refresh>', count() from a; system test view a set fake time '2052-02-03 04:05:06';" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<6: refreshed>', * from a; select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" # Create a dependent view, refresh it once. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; show create b; system test view b set fake time '2052-11-11 11:11:11'; @@ -88,89 +88,89 @@ $CLICKHOUSE_CLIENT -nq " system wait view b; select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" # Next refresh shouldn't start until the dependency refreshes. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<8: refreshed>', * from b; select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; system test view b set fake time '2054-01-24 23:22:21';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] do sleep 0.5 done # Drop the source table, check that refresh fails and doesn't leave a temp table behind. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); drop table src; system refresh view a;" -$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -q " select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" # Create the source table again, check that refresh succeeds (in particular that tables are looked # up by name rather than uuid). -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; create table src (x Int16) engine Memory as select 2; system test view a set fake time '2054-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] do sleep 0.5 done # Both tables should've refreshed. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<11: chain-refreshed a>', * from a; select '<12: chain-refreshed b>', * from b; select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" # Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to # catch up to the same cycle. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " system test view b set fake time '2059-01-01 00:00:00'; system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " system test view b set fake time '2061-01-01 00:00:00'; system test view a set fake time '2057-01-01 00:00:00';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; truncate src; insert into src values (3); system test view a set fake time '2060-02-02 02:02:02';" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<15: chain-refreshed a>', * from a; select '<16: chain-refreshed b>', * from b; select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" # Get to WaitingForDependencies state and remove the depencency. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " system test view b set fake time '2062-03-03 03:03:03'" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " alter table b modify refresh every 2 year" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; show create b;" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " drop table src; drop table a; drop table b; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh index 50a905576d5..2d00d61f253 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -12,29 +12,29 @@ CLICKHOUSE_LOG_COMMENT= CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" -$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" +$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" # Select from a table that doesn't exist, get an exception. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create table src (x Int8) engine Memory as select 1; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; drop table src;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] do sleep 0.5 done # Check exception, create src, expect successful refresh. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; create table src (x Int64) engine Memory as select 1; system refresh view c;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done # Rename table. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<20: unexception>', * from c; rename table c to d; select '<21: rename>', * from d; @@ -42,130 +42,130 @@ $CLICKHOUSE_CLIENT -nq " # Do various things during a refresh. # First make a nonempty view. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " drop table d; truncate src; insert into src values (1); create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done # Stop refreshes. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<23: simple refresh>', * from e; system stop view e;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] do sleep 0.5 done # Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure # we wait for a slow refresh, not a previous fast one.) -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " insert into src select * from numbers(1000) settings max_block_size=1; system start view e;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] do sleep 0.5 done # Rename. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " rename table e to f; select '<24: rename during refresh>', * from f; select '<25: rename during refresh>', view, status from refreshes where view = 'f'; alter table f modify refresh after 10 year;" # Cancel. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " system cancel view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] do sleep 0.5 done # Check that another refresh doesn't immediately start after the cancelled one. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; system refresh view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] +while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] do sleep 0.5 done # Drop. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " drop table f; select '<28: drop during refresh>', view, status from refreshes; select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" # Try OFFSET and RANDOMIZE FOR. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; show create g; system test view g set fake time '2050-02-03 15:30:13';" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] +while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " with '2050-02-10 04:00:00'::DateTime as expected select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" # Send data 'TO' an existing table. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " drop table g; create table dest (x Int64) engine MergeTree order by x; truncate src; insert into src values (1); create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; show create h;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<30: to existing table>', * from dest; insert into src values (2);" -while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] +while [ "`$CLICKHOUSE_CLIENT -q "select count() from dest -- $LINENO" | xargs`" != '2' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<31: to existing table>', * from dest; drop table dest; drop table h;" # Retries. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" -$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -q " select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; create table src2 (x Int8) engine Memory; insert into src2 values (1); exchange tables src and src2; drop table src2;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] +while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<31.6: did retry>', x from h2; drop table h2" # EMPTY -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" -while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] +while [ "`$CLICKHOUSE_CLIENT -q "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; drop table i; drop table j;" # APPEND -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; select '<33: append>', * from k; system refresh view k; @@ -177,7 +177,7 @@ $CLICKHOUSE_CLIENT -nq " system wait view k; select '<35: append>', * from k order by x;" # ALTER to non-APPEND -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " alter table k modify refresh every 10 year; system wait view k; system refresh view k; @@ -187,7 +187,7 @@ $CLICKHOUSE_CLIENT -nq " truncate table src;" # APPEND + TO + regular materialized view reading from it. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create table mid (x Int64) engine MergeTree order by x; create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; create materialized view m (x Int64) engine Memory as select x*10 as x from mid; @@ -204,19 +204,19 @@ $CLICKHOUSE_CLIENT -nq " drop table mid;" # Failing to create inner table. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); drop table n;" # Reading from table that doesn't exist yet. -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; drop table o;" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " drop table refreshes;" diff --git a/tests/queries/0_stateless/02941_variant_type_1.sh b/tests/queries/0_stateless/02941_variant_type_1.sh index c12bece6d54..ef1d1deff4e 100755 --- a/tests/queries/0_stateless/02941_variant_type_1.sh +++ b/tests/queries/0_stateless/02941_variant_type_1.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test1_insert() { echo "test1 insert" - $CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); + $CH_CLIENT -mq "insert into test select number, NULL from numbers(3); insert into test select number + 3, number from numbers(3); insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3); @@ -21,7 +21,7 @@ insert into test select number + 15, range(number + 1)::Array(UInt64) from numbe function test1_select() { echo "test1 select" - $CH_CLIENT -nmq "select v from test order by id; + $CH_CLIENT -mq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; @@ -36,7 +36,7 @@ select v.\`Array(UInt64)\`.size0 from test order by id;" function test2_insert() { echo "test2 insert" - $CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); + $CH_CLIENT -mq "insert into test select number, NULL from numbers(3); insert into test select number + 3, number % 2 ? NULL : number from numbers(3); insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3); @@ -47,7 +47,7 @@ insert into test select number + 15, number % 2 ? CAST(NULL, 'Variant(String, UI function test2_select() { echo "test2 select" - $CH_CLIENT -nmq "select v from test order by id; + $CH_CLIENT -mq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; @@ -68,7 +68,7 @@ function test3_insert() function test3_select() { echo "test3 select" - $CH_CLIENT -nmq "select v from test order by id; + $CH_CLIENT -mq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index e93dfac8510..7017edac525 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test4_insert() { echo "test4 insert" - $CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000); + $CH_CLIENT -mq "insert into test select number, NULL from numbers(100000); insert into test select number + 100000, number from numbers(100000); insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000); insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); @@ -21,7 +21,7 @@ insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) f function test4_select { echo "test4 select" - $CH_CLIENT -nmq "select v from test format Null; + $CH_CLIENT -mq "select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; select count() from test where isNotNull(v.String); diff --git a/tests/queries/0_stateless/02941_variant_type_3.sh b/tests/queries/0_stateless/02941_variant_type_3.sh index cc0fde5b689..5d923a47e9b 100755 --- a/tests/queries/0_stateless/02941_variant_type_3.sh +++ b/tests/queries/0_stateless/02941_variant_type_3.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test5_insert() { echo "test5 insert" - $CH_CLIENT -nmq " + $CH_CLIENT -mq " insert into test select number, NULL from numbers(200000); insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000); insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000); @@ -22,7 +22,7 @@ insert into test select number + 1000000, number % 2 ? CAST(NULL, 'Variant(Strin function test5_select() { echo "test5 select" - $CH_CLIENT -nmq " + $CH_CLIENT -mq " select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; diff --git a/tests/queries/0_stateless/02941_variant_type_4.sh b/tests/queries/0_stateless/02941_variant_type_4.sh index 93a1770d05e..88f6a475d46 100755 --- a/tests/queries/0_stateless/02941_variant_type_4.sh +++ b/tests/queries/0_stateless/02941_variant_type_4.sh @@ -17,7 +17,7 @@ function test6_insert() function test6_select() { echo "test6 select" - $CH_CLIENT -nmq "select v from test format Null; + $CH_CLIENT -mq "select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; select count() from test where isNotNull(v.String); diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh index cb099bb59ae..f9e84dc2b50 100755 --- a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh @@ -10,7 +10,7 @@ disk_name="s3_cache_02944" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; INSERT INTO test SELECT randomString(100); @@ -33,7 +33,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -47,7 +47,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -63,7 +63,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -77,7 +77,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" diff --git a/tests/queries/0_stateless/02960_polygon_bound_bug.sh b/tests/queries/0_stateless/02960_polygon_bound_bug.sh index 0c3db01a77c..1e9be36da55 100755 --- a/tests/queries/0_stateless/02960_polygon_bound_bug.sh +++ b/tests/queries/0_stateless/02960_polygon_bound_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL -nm -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; +$CLICKHOUSE_LOCAL -m -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet); CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)); SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));" diff --git a/tests/queries/0_stateless/02961_storage_config_volume_priority.sh b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh index 145b921a750..1bd86ee0c75 100755 --- a/tests/queries/0_stateless/02961_storage_config_volume_priority.sh +++ b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh @@ -24,7 +24,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='error'; SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy' @@ -40,7 +40,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='error'; SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps' diff --git a/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh b/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh index 24fe964b824..3b57941452d 100755 --- a/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh +++ b/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="02963_remote_read_bug" -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, s String) @@ -22,7 +22,7 @@ OPTIMIZE TABLE test FINAL; query_id=$(random_str 10) -$CLICKHOUSE_CLIENT -nm --query_id "$query_id" --query " +$CLICKHOUSE_CLIENT -m --query_id "$query_id" --query " WITH RANDOM_SET AS ( SELECT rand32() % 10000 FROM numbers(100) ) @@ -37,7 +37,7 @@ SETTINGS merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1; " -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " SYSTEM FLUSH LOGS; -- This threshold was determined experimentally - before the fix this ratio had values around 50K diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index 1089eb4051f..b4271c3d29b 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh index 7a0e2d9bfdb..ed13bf3321b 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh index c4b44ce11c5..9abe870f4e4 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02969_auto_format_detection.sh b/tests/queries/0_stateless/02969_auto_format_detection.sh index 88d6575e499..ab21dbb3df8 100755 --- a/tests/queries/0_stateless/02969_auto_format_detection.sh +++ b/tests/queries/0_stateless/02969_auto_format_detection.sh @@ -24,12 +24,12 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array( $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')" -$CLICKHOUSE_LOCAL -nmq " +$CLICKHOUSE_LOCAL -mq " desc file('$DATA_FILE'); desc file('$DATA_FILE'); " -$CLICKHOUSE_LOCAL -nmq " +$CLICKHOUSE_LOCAL -mq " desc file('$DATA_FILE', JSONEachRow); desc file('$DATA_FILE'); " @@ -39,7 +39,7 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array( $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT" -$CLICKHOUSE_LOCAL -nmq " +$CLICKHOUSE_LOCAL -mq " desc file('$DATA_FILE.2'); desc file('$DATA_FILE.{1,2}'); " diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh index d543f7195a9..a46f0018ad3 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_DATABASE="$new_database" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by key; insert into data values (1); @@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -nm -q " # suppress output $CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table data; attach table data (key Int) engine=MergeTree() order by key settings diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh index eec05c81344..522fdd6096a 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh @@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_DATABASE="$new_database" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_read; drop table if exists data_write; @@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -nm -q " # suppress output $CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table data_read; attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key settings @@ -57,7 +57,7 @@ echo "Files before DETACH TABLE" # sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0 clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g' -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " detach table data_read; detach table data_write; " diff --git a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh index ef4866cb69e..fca8b7a5e97 100755 --- a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh index dd4b1f7cec0..08d5ae3a6b2 100755 --- a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh index 33386c76edb..678b1db4d68 100755 --- a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh index b66ef83abf2..606fda6d4fd 100755 --- a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh index f9e1838f491..b571631dc8f 100755 --- a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh index 698e70d4064..e4358011e50 100755 --- a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh index 8eea7940774..c13674b9bd1 100755 --- a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh +++ b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query "drop table if exists test_s3_mt" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " create table test_s3_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) settings disk = disk( name = 03008_s3_plain_rewritable, @@ -19,7 +19,7 @@ settings disk = disk( secret_access_key = clickhouse); " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " insert into test_s3_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5); insert into test_s3_mt (*) select number, number, number from numbers_mt(10000); select count(*) from test_s3_mt; @@ -31,13 +31,13 @@ ${CLICKHOUSE_CLIENT} --query "optimize table test_s3_mt final" ${CLICKHOUSE_CLIENT} -m --query " alter table test_s3_mt add projection test_s3_mt_projection (select * order by b)" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " alter table test_s3_mt update c = 0 where a % 2 = 1; alter table test_s3_mt add column d Int64 after c; alter table test_s3_mt drop column c; " 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " detach table test_s3_mt; attach table test_s3_mt; " diff --git a/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh b/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh index cba5317fcfa..278ad6f0654 100755 --- a/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh +++ b/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="s3_cache" -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; INSERT INTO test SELECT randomString(1000); @@ -26,7 +26,7 @@ sed -i "s|$prev_max_size<\/max_size>|$new_max_size<\/max_siz # echo $prev_max_size # echo $new_max_size -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" @@ -36,7 +36,7 @@ $CLICKHOUSE_CLIENT --query "SELECT current_size <= max_size FROM system.filesyst sed -i "s|$new_max_size<\/max_size>|$prev_max_size<\/max_size>|" $config_path -$CLICKHOUSE_CLIENT -nm --query " +$CLICKHOUSE_CLIENT -m --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" diff --git a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh index 9ea86105a3a..8183b636549 100755 --- a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" - $CH_CLIENT -nm -q "system start merges test; optimize table test final" + $CH_CLIENT -m -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" $CH_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh index 9a2a6dd957c..ddafa9db210 100755 --- a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh @@ -16,7 +16,7 @@ function test() $CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final" + $CH_CLIENT -m -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh index 0199035a3df..b1046ca08ba 100755 --- a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final" + $CH_CLIENT -m -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh index e2ea5bc3466..f3693a2e7b1 100755 --- a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" - $CH_CLIENT -nm -q "system start merges test; optimize table test final" + $CH_CLIENT -m -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" $CH_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh index 43607cf95a8..bba92a2ee06 100755 --- a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "insert into test select number, -1, number >= 75000 ? 2 : 1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final" + $CH_CLIENT -m -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03151_unload_index_race.sh b/tests/queries/0_stateless/03151_unload_index_race.sh index 7e9dfa7cddc..4cdf06abae2 100755 --- a/tests/queries/0_stateless/03151_unload_index_race.sh +++ b/tests/queries/0_stateless/03151_unload_index_race.sh @@ -42,8 +42,8 @@ function thread_alter_settings() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - $CLICKHOUSE_CLIENT -n --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM" - $CLICKHOUSE_CLIENT -n --query "SYSTEM UNLOAD PRIMARY KEY t" + $CLICKHOUSE_CLIENT --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM" + $CLICKHOUSE_CLIENT --query "SYSTEM UNLOAD PRIMARY KEY t" sleep 0.0$RANDOM done } @@ -52,7 +52,7 @@ function thread_query_table() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - COUNT=$($CLICKHOUSE_CLIENT -n --query "SELECT count() FROM t where not ignore(*);") + COUNT=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM t where not ignore(*);") if [ "$COUNT" -ne "2000" ]; then echo "$COUNT" fi diff --git a/tests/queries/0_stateless/03172_error_log_table_not_empty.sh b/tests/queries/0_stateless/03172_error_log_table_not_empty.sh index 22a2fd82c64..d1afafa77a5 100755 --- a/tests/queries/0_stateless/03172_error_log_table_not_empty.sh +++ b/tests/queries/0_stateless/03172_error_log_table_not_empty.sh @@ -15,7 +15,7 @@ errors_222=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHER errors_333=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHERE code = 333") # Throw three random errors: 111, 222 and 333 and wait for more than collect_interval_milliseconds to ensure system.error_log is flushed -$CLICKHOUSE_CLIENT -mn -q " +$CLICKHOUSE_CLIENT -m -q " SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } @@ -24,14 +24,14 @@ SYSTEM FLUSH LOGS; " # Check that the three random errors are propagated -$CLICKHOUSE_CLIENT -mn -q " +$CLICKHOUSE_CLIENT -m -q " SELECT sum(value) > $errors_111 FROM system.error_log WHERE code = 111; SELECT sum(value) > $errors_222 FROM system.error_log WHERE code = 222; SELECT sum(value) > $errors_333 FROM system.error_log WHERE code = 333; " # Ensure that if we throw them again, they're still propagated -$CLICKHOUSE_CLIENT -mn -q " +$CLICKHOUSE_CLIENT -m -q " SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } @@ -39,7 +39,7 @@ SELECT sleep(2) format NULL; SYSTEM FLUSH LOGS; " -$CLICKHOUSE_CLIENT -mn -q " +$CLICKHOUSE_CLIENT -m -q " SELECT sum(value) > $(($errors_111+1)) FROM system.error_log WHERE code = 111; SELECT sum(value) > $(($errors_222+1)) FROM system.error_log WHERE code = 222; SELECT sum(value) > $(($errors_333+1)) FROM system.error_log WHERE code = 333; diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 60e8a6e9faa..fdd0fcd8ec5 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -22,20 +22,20 @@ SELECT toTypeName(array), toTypeName(float) FROM file('$CURDIR/data_hive/partiti SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE number = 42; """ -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 1; SELECT identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; SELECT a FROM file('$CURDIR/data_hive/partitioning/a=b/a=b/sample.parquet') LIMIT 1; """ -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth1/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "INCORRECT_DATA" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 0; SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; @@ -45,14 +45,14 @@ SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_exis $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_LOCAL -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -62,7 +62,7 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_CLIENT -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -71,7 +71,7 @@ SELECT *, non_existing_column FROM s3('http://localhost:11111/test/hive_partitio SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = 'Elizabeth' LIMIT 10; """ -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_CLIENT -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -79,7 +79,7 @@ SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_CLIENT -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; diff --git a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh index e0c8f08e695..6bfe82a91a6 100755 --- a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh +++ b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # In this test we restore from "/tests/queries/0_stateless/backups/mt_250_parts.zip" backup_name="$($CURDIR/helpers/install_predefined_backup.sh mt_250_parts.zip)" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS manyparts; CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merge_tree_clear_old_temporary_directories_interval_seconds=1, temporary_directories_lifetime=1; " @@ -16,7 +16,7 @@ CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merg # RESTORE must protect its temporary directories from removing. ${CLICKHOUSE_CLIENT} --query "RESTORE TABLE default.mt_250_parts AS manyparts FROM Disk('backups', '${backup_name}') SETTINGS allow_different_table_def=true" | grep -o "RESTORED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " SELECT count(), sum(x) FROM manyparts; DROP TABLE manyparts; " From 2bbede2d939c6929cfa8dcb9e75c323269d56fff Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 7 Sep 2024 16:21:06 +0000 Subject: [PATCH 61/70] Remove superfluous --multiquery/-n --- .../00115_shard_in_incomplete_result.sh | 2 +- ...ard_memory_tracker_and_exception_safety.sh | 2 +- ...rage_file_and_clickhouse-local_app_long.sh | 6 ++-- tests/queries/0_stateless/00505_secure.sh | 2 +- .../0_stateless/00531_client_ignore_error.sh | 6 ++-- .../0_stateless/00534_client_ignore_error.sh | 6 ++-- .../0_stateless/00686_client_exit_code.sh | 2 +- .../00705_drop_create_merge_tree.sh | 4 +-- ...731_long_merge_tree_select_opened_files.sh | 2 +- .../00838_system_tables_drop_table_race.sh | 4 +-- ...ong_concurrent_select_and_drop_deadlock.sh | 2 +- .../queries/0_stateless/00900_long_parquet.sh | 28 +++++++++---------- .../0_stateless/00900_long_parquet_decimal.sh | 16 +++++------ .../00921_datetime64_compatibility_long.sh | 2 +- ...ices_mutation_replicated_zookeeper_long.sh | 2 +- .../00991_system_parts_race_condition_long.sh | 2 +- ...tem_parts_race_condition_zookeeper_long.sh | 8 +++--- .../0_stateless/01014_lazy_database_basic.sh | 12 ++++---- ...rrent_recreate_reattach_and_show_tables.sh | 2 +- ...8_ddl_dictionaries_concurrent_requrests.sh | 8 +++--- .../01019_alter_materialized_view_atomic.sh | 2 +- .../0_stateless/01035_avg_weighted_long.sh | 2 +- .../0_stateless/01053_ssd_dictionary.sh | 2 +- .../01079_bad_alters_zookeeper_long.sh | 2 +- .../01111_create_drop_replicated_db_stress.sh | 2 +- .../0_stateless/01114_database_atomic.sh | 6 ++-- .../queries/0_stateless/01119_session_log.sh | 2 +- ...1213_alter_rename_column_zookeeper_long.sh | 2 +- .../0_stateless/01238_http_memory_tracking.sh | 2 +- .../01280_ssd_complex_key_dictionary.sh | 12 ++++---- ..._recreate_reattach_and_show_tables_long.sh | 2 +- .../01305_replica_create_drop_zookeeper.sh | 2 +- ...20_create_sync_race_condition_zookeeper.sh | 2 +- .../0_stateless/01395_limit_more_cases.sh | 2 +- .../01395_limit_more_cases_random.sh | 2 +- .../01412_cache_dictionary_race.sh | 4 +-- ...01454_storagememory_data_race_challenge.sh | 2 +- ...check_many_parallel_quorum_inserts_long.sh | 6 ++-- ...1509_check_parallel_quorum_inserts_long.sh | 8 +++--- .../01563_distributed_query_finish.sh | 4 +-- .../01732_race_condition_storage_join_long.sh | 10 +++---- ...est_postgresql_protocol_with_row_policy.sh | 6 ++-- .../01810_max_part_removal_threads_long.sh | 8 +++--- .../01889_postgresql_protocol_null_fields.sh | 2 +- .../01900_kill_mutation_parallel_long.sh | 10 +++---- ...nt_ttl_and_normal_merges_zookeeper_long.sh | 2 +- .../1_stateful/00177_memory_bound_merging.sh | 8 +++--- ...o_seek_avoiding_when_reading_from_cache.sh | 2 +- 48 files changed, 117 insertions(+), 117 deletions(-) diff --git a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh index 4916721764c..1efe529ac24 100755 --- a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh +++ b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh @@ -15,6 +15,6 @@ $CLICKHOUSE_CLIENT --query=" INSERT INTO users VALUES (1321770221388956068); "; -for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo +for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo $CLICKHOUSE_CLIENT --query="DROP TABLE users;"; diff --git a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh index d57efaa1f0e..f0c0354ab33 100755 --- a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh +++ b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS numbers_100k; CREATE VIEW numbers_100k AS SELECT * FROM system.numbers LIMIT 100000; "; diff --git a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh index b59aae83e81..b6cc270994f 100755 --- a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh +++ b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh @@ -48,13 +48,13 @@ pack_unpack_compare "SELECT name, is_aggregate FROM system.functions" "name Stri echo # Check settings are passed correctly ${CLICKHOUSE_LOCAL} --max_rows_in_distinct=33 -q "SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" -${CLICKHOUSE_LOCAL} -n -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" +${CLICKHOUSE_LOCAL} -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" ${CLICKHOUSE_LOCAL} --max_bytes_before_external_group_by=1 --max_block_size=10 -q "SELECT sum(ignore(*)) FROM (SELECT number, count() FROM numbers(1000) GROUP BY number)" echo # Check exta options, we expect zero exit code and no stderr output -(${CLICKHOUSE_LOCAL} --ignore-error -n --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC") +(${CLICKHOUSE_LOCAL} --ignore-error --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC") echo -${CLICKHOUSE_LOCAL} -n -q "CREATE TABLE sophisticated_default +${CLICKHOUSE_LOCAL} -q "CREATE TABLE sophisticated_default ( a UInt8 DEFAULT 3, b UInt8 ALIAS a + 5, diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh index eaa50bce6b1..eed0d3bf5c6 100755 --- a/tests/queries/0_stateless/00505_secure.sh +++ b/tests/queries/0_stateless/00505_secure.sh @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT_SECURE -q "SELECT 4;" # TODO: can test only on unchanged port. Possible solutions: generate config or pass shard port via command line if [[ "$CLICKHOUSE_PORT_TCP_SECURE" = "$CLICKHOUSE_PORT_TCP_SECURE" ]]; then - cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -n -m + cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -m else tail -n 13 "$CURDIR"/00505_secure.reference fi diff --git a/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh index d3215e1beac..553cb9fa897 100755 --- a/tests/queries/0_stateless/00531_client_ignore_error.sh +++ b/tests/queries/0_stateless/00531_client_ignore_error.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error +echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error #$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" diff --git a/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh index d3215e1beac..553cb9fa897 100755 --- a/tests/queries/0_stateless/00534_client_ignore_error.sh +++ b/tests/queries/0_stateless/00534_client_ignore_error.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error +echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error #$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" diff --git a/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh index 9586ddbd0a3..e348f93f30d 100755 --- a/tests/queries/0_stateless/00686_client_exit_code.sh +++ b/tests/queries/0_stateless/00686_client_exit_code.sh @@ -8,5 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib -echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} 2>/dev/null echo $? diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index ea8b9d02e49..fd002668696 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & -yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT & +yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT & wait ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh index 5a4fd901f8d..285fd3945f9 100755 --- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh +++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh @@ -12,7 +12,7 @@ settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_que # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " DROP TABLE IF EXISTS merge_tree_table; CREATE TABLE merge_tree_table (id UInt64, date Date, uid UInt32) ENGINE = MergeTree(date, id, 8192);" diff --git a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh index da4d3b05987..a29d2e5bc71 100755 --- a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh +++ b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS table" -seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT -n & -seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT -n 2>/dev/null & +seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT & +seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT 2>/dev/null & wait diff --git a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh index f0bc52ee356..cb774116356 100755 --- a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh +++ b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh @@ -27,7 +27,7 @@ function thread_drop_create() while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 100 ]; do it=$((it+1)) - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table if exists view_00840; create view view_00840 as select count(*),database,table from system.columns group by database,table; " diff --git a/tests/queries/0_stateless/00900_long_parquet.sh b/tests/queries/0_stateless/00900_long_parquet.sh index 07d2f24e446..86a0d013078 100755 --- a/tests/queries/0_stateless/00900_long_parquet.sh +++ b/tests/queries/0_stateless/00900_long_parquet.sh @@ -8,11 +8,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS contributors; CREATE TABLE contributors (name String) ENGINE = Memory;" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.contributors ORDER BY name DESC FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO contributors FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" -- random results SELECT * FROM contributors LIMIT 10 FORMAT Null; DROP TABLE contributors; @@ -21,30 +21,30 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_numbers (number UInt64) ENGINE = Memory;" # less than default block size (65k) ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 10000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" # More than default block size ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 100000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" ${CLICKHOUSE_CLIENT} --max_block_size=2 --query="SELECT * FROM system.numbers LIMIT 3 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" ${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 1000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; DROP TABLE parquet_numbers; DROP TABLE IF EXISTS parquet_events; CREATE TABLE parquet_events (event String, value UInt64, description String) ENGINE = Memory;" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.events FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_events FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT event, description FROM parquet_events WHERE event IN ('ContextLock', 'Query') ORDER BY event; DROP TABLE parquet_events; @@ -78,7 +78,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8 FORMAT echo diff: diff "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" TRUNCATE TABLE parquet_types2; INSERT INTO parquet_types3 values ( 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str01', 'fstr1', '2003-03-04', '2004-05-06', toDateTime64('2004-05-06 07:08:09.012', 9));" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" @@ -88,7 +88,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 values ( 80, ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT 'dest:'; SELECT * FROM parquet_types2 ORDER BY int8; SELECT 'min:'; @@ -106,7 +106,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" echo dest from null: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_types6 ORDER BY int8; DROP TABLE parquet_types5; @@ -126,7 +126,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" INSERT INTO parquet_arrays VALUES (2, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_arrays ORDER BY id; DROP TABLE parquet_arrays; @@ -135,7 +135,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_nullable_arrays (id UInt32, a1 Array(Nullable(UInt32)), a2 Array(Nullable(String)), a3 Array(Nullable(Decimal(4, 2)))) engine=Memory(); INSERT INTO parquet_nullable_arrays VALUES (1, [1, Null, 2], [Null, 'Some string', Null], [0.001, Null, 42.42]), (2, [Null], [Null], [Null]), (3, [], [], []);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_nullable_arrays ORDER BY id; DROP TABLE parquet_nullable_arrays; @@ -143,7 +143,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_nested_arrays (a1 Array(Array(Array(UInt32))), a2 Array(Array(Array(String))), a3 Array(Array(Nullable(UInt32))), a4 Array(Array(Nullable(String)))) engine=Memory(); INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3], [1,2,3]], [[1,2,3]], [[], [1,2,3]]], [[['Some string', 'Some string'], []], [['Some string']], [[]]], [[Null, 1, 2], [Null], [1, 2], []], [['Some string', Null, 'Some string'], [Null], []]);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_nested_arrays; DROP TABLE parquet_nested_arrays; @@ -151,6 +151,6 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory; INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_decimal; DROP TABLE parquet_decimal;" diff --git a/tests/queries/0_stateless/00900_long_parquet_decimal.sh b/tests/queries/0_stateless/00900_long_parquet_decimal.sh index a819dcbcdc3..14e8fdcc038 100755 --- a/tests/queries/0_stateless/00900_long_parquet_decimal.sh +++ b/tests/queries/0_stateless/00900_long_parquet_decimal.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -26,7 +26,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump echo diff0: diff "${CLICKHOUSE_TMP}"/parquet_decimal0_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -61,7 +61,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump echo diff1: diff "${CLICKHOUSE_TMP}"/parquet_decimal1_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -75,7 +75,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump echo diff2: diff "${CLICKHOUSE_TMP}"/parquet_decimal2_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -86,7 +86,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_1.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" 2> /dev/null echo nothing: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM decimal2 ORDER BY a, b, c, d; TRUNCATE TABLE decimal2; @@ -94,7 +94,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_2.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" echo nulls: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM decimal2 ORDER BY a, b, c, d; TRUNCATE TABLE decimal2; @@ -104,7 +104,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_3.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT 'full orig:'; SELECT * FROM decimal ORDER BY a, b, c, d; SELECT 'full inserted:'; @@ -115,6 +115,6 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d;" > "${ echo diff3: diff "${CLICKHOUSE_TMP}"/parquet_decimal3_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal3_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2;" diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh index d310a2c3612..152d5a847b7 100755 --- a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh +++ b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh @@ -13,5 +13,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # ${CURDIR}/00921_datetime64_compatibility.python python3 "${CURDIR}"/00921_datetime64_compatibility_long.python \ - | ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ + | ${CLICKHOUSE_CLIENT} --ignore-error -m --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ | grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh index 686dd7f6df0..1d35daf9f2e 100755 --- a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh +++ b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions1;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions2;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE indices_mutaions1 ( u64 UInt64, diff --git a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh index 8243c6bde62..df330b82c80 100755 --- a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh +++ b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh @@ -22,7 +22,7 @@ function thread1() function thread2() { - while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done + while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done } function thread3() diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 02a739ece4a..d5c0248e2b3 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; @@ -31,7 +31,7 @@ function thread1() function thread2() { - while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done + while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done } function thread3() @@ -87,6 +87,6 @@ check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum( $CLICKHOUSE_CLIENT -q "SELECT table, lost_part_count FROM system.replicas WHERE database=currentDatabase() AND lost_part_count!=0"; -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & +$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & +$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & wait diff --git a/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh index ea7603b2519..55d18a7c527 100755 --- a/tests/queries/0_stateless/01014_lazy_database_basic.sh +++ b/tests/queries/0_stateless/01014_lazy_database_basic.sh @@ -5,9 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n -q "DROP DATABASE IF EXISTS testlazy" +${CLICKHOUSE_CLIENT} -q "DROP DATABASE IF EXISTS testlazy" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " CREATE DATABASE testlazy ENGINE = Lazy(1); CREATE TABLE testlazy.log (a UInt64, b UInt64) ENGINE = Log; CREATE TABLE testlazy.slog (a UInt64, b UInt64) ENGINE = StripeLog; @@ -30,7 +30,7 @@ ${CLICKHOUSE_CLIENT} -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.log LIMIT 0; -- drop testlazy.log from cache RENAME TABLE testlazy.log TO testlazy.log2; SELECT database, name FROM system.tables WHERE database = 'testlazy'; @@ -44,7 +44,7 @@ ${CLICKHOUSE_CLIENT} -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " INSERT INTO testlazy.log2 VALUES (1, 1); INSERT INTO testlazy.slog VALUES (2, 2); INSERT INTO testlazy.tlog VALUES (3, 3); @@ -55,14 +55,14 @@ ${CLICKHOUSE_CLIENT} -n -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.log2 LIMIT 0; -- drop testlazy.log2 from cache DROP TABLE testlazy.log2; " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.slog; SELECT * FROM testlazy.tlog; " diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh index e4b3a31b13f..ff2c0b8821e 100755 --- a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -83,7 +83,7 @@ export -f recreate_lazy_func4; export -f show_tables_func; -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DATABASE IF EXISTS $CURR_DATABASE; CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); " diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh index 872b0a7c1a1..82221eb06e9 100755 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP DATABASE IF EXISTS database_for_dict; DROP TABLE IF EXISTS table_for_dict1; DROP TABLE IF EXISTS table_for_dict2; @@ -44,7 +44,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT -n -q " + while true; do $CLICKHOUSE_CLIENT -q " SELECT * FROM database_for_dict.dict1 FORMAT Null; SELECT * FROM database_for_dict.dict2 FORMAT Null; " ||: ; done @@ -52,7 +52,7 @@ function thread4() function thread5() { - while true; do $CLICKHOUSE_CLIENT -n -q " + while true; do $CLICKHOUSE_CLIENT -q " SELECT dictGetString('database_for_dict.dict1', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null; SELECT dictGetString('database_for_dict.dict2', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null; " ||: ; done @@ -117,7 +117,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1" $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP DATABASE database_for_dict; DROP TABLE table_for_dict1; DROP TABLE table_for_dict2; diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh index 4bd21fcee02..eb12a76eb62 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery <&1 \ | grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Types .* are non-conforming as arguments for aggregate function avgWeighted' diff --git a/tests/queries/0_stateless/01053_ssd_dictionary.sh b/tests/queries/0_stateless/01053_ssd_dictionary.sh index fdd40940ab5..7dd3fa4657a 100755 --- a/tests/queries/0_stateless/01053_ssd_dictionary.sh +++ b/tests/queries/0_stateless/01053_ssd_dictionary.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query=" +$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query=" DROP DATABASE IF EXISTS 01053_db; CREATE DATABASE 01053_db; diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh index 39e65af039b..22f8e5269bd 100755 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_bad_alters"; -$CLICKHOUSE_CLIENT -n --query "CREATE TABLE table_for_bad_alters ( +$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_bad_alters ( key UInt64, value1 UInt8, value2 String diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh index 619b6e91d11..9101b9faa3d 100755 --- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh +++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh @@ -30,7 +30,7 @@ function drop_db() database=$($CLICKHOUSE_CLIENT -q "select name from system.databases where name like '${CLICKHOUSE_DATABASE}%' order by rand() limit 1") if [[ "$database" == "$CLICKHOUSE_DATABASE" ]]; then continue; fi if [ -z "$database" ]; then continue; fi - $CLICKHOUSE_CLIENT -n --query \ + $CLICKHOUSE_CLIENT --query \ "drop database if exists $database" 2>&1| grep -Fa "Exception: " sleep 0.$RANDOM done diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index 5eebb558575..5fe85136d05 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -q "SELECT name, splitByChar('/', metadata_path)[-2] as uuid_path, ((splitByChar('/', metadata_path)[-3] as metadata) = substr(uuid_path, 1, 3)) OR metadata='metadata' FROM system.databases WHERE name LIKE '${CLICKHOUSE_DATABASE}_%'" | sed "s/$uuid_db_1/00001114-1000-4000-8000-000000000001/g" | sed "s/$uuid_db_2/00001114-1000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " CREATE TABLE ${DATABASE_1}.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); INSERT INTO ${DATABASE_1}.mt_tmp SELECT * FROM numbers(100); CREATE TABLE ${DATABASE_3}.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5); @@ -65,7 +65,7 @@ while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE que sleep 0.1 done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " RENAME TABLE ${DATABASE_1}.mt TO ${DATABASE_1}.mt_tmp; RENAME TABLE ${DATABASE_1}.mt_tmp TO ${DATABASE_2}.mt_tmp; EXCHANGE TABLES ${DATABASE_2}.mt AND ${DATABASE_2}.mt_tmp; @@ -79,7 +79,7 @@ uuid_mt1=$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.tables WHERE database= $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_1}.mt" | sed "s/$uuid_mt1/00001114-0000-4000-8000-000000000001/g" $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_2}.mt" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE ${DATABASE_1}.mt SETTINGS database_atomic_wait_for_drop_and_detach_synchronously=0; CREATE TABLE ${DATABASE_1}.mt (s String) ENGINE=Log(); INSERT INTO ${DATABASE_1}.mt SELECT 's' || toString(number) FROM numbers(5); diff --git a/tests/queries/0_stateless/01119_session_log.sh b/tests/queries/0_stateless/01119_session_log.sh index 2d17b545276..61bb7cf3ea8 100755 --- a/tests/queries/0_stateless/01119_session_log.sh +++ b/tests/queries/0_stateless/01119_session_log.sh @@ -14,7 +14,7 @@ and interface in ('HTTP', 'TCP', 'TCP_Interserver') and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) and event_time >= now() - interval 5 minute" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " select * from remote('127.0.0.2', system, one, 'default', ''); select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED } select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED } diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh index a255c1db30e..8afb0c18462 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " CREATE TABLE table_for_rename_replicated ( date Date, diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh index ce1310cf302..f88c8fb47c6 100755 --- a/tests/queries/0_stateless/01238_http_memory_tracking.sh +++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh @@ -14,7 +14,7 @@ ${CLICKHOUSE_CLIENT} --format Null -q "CREATE USER $MISTER_USER" # This is needed to keep at least one running query for user for the time of test. # (1k http queries takes ~1 second, let's run for 5x more to avoid flaps) -${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' & +${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null <<<'SELECT sleepEachRow(1) FROM numbers(5)' & # ignore "yes: standard output: Broken pipe" yes 'SELECT 1' 2>/dev/null | { diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh index 60a65b9a253..c1ec812875c 100755 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DATABASE IF EXISTS 01280_db; CREATE DATABASE 01280_db; DROP TABLE IF EXISTS 01280_db.table_for_dict; @@ -39,9 +39,9 @@ $CLICKHOUSE_CLIENT -n --query=" LIFETIME(MIN 1000 MAX 2000) LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));" -$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" +$CLICKHOUSE_CLIENT -q "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SELECT 'TEST_SMALL'; SELECT 'VALUE FROM RAM BUFFER'; SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple('1', toInt32(3))); @@ -63,9 +63,9 @@ $CLICKHOUSE_CLIENT -n --query=" SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20))); SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));" -$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" +$CLICKHOUSE_CLIENT -q "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" -$CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; +$CLICKHOUSE_CLIENT --query="DROP DICTIONARY 01280_db.ssd_dict; DROP TABLE IF EXISTS 01280_db.keys_table; CREATE TABLE 01280_db.keys_table ( @@ -122,4 +122,4 @@ $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; DROP DICTIONARY IF EXISTS database_for_dict.ssd_dict; DROP TABLE IF EXISTS database_for_dict.keys_table;" -$CLICKHOUSE_CLIENT -n --query="DROP DATABASE IF EXISTS 01280_db;" +$CLICKHOUSE_CLIENT --query="DROP DATABASE IF EXISTS 01280_db;" diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh index 21f46a34514..00619f13173 100755 --- a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh +++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh @@ -85,7 +85,7 @@ export -f recreate_lazy_func4; export -f test_func; -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DATABASE IF EXISTS $CURR_DATABASE; CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); " diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh index 1d2d4516b9c..6ff6644f11e 100755 --- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh +++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh @@ -10,7 +10,7 @@ set -e function thread() { while true; do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC; + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table_$1 SYNC; CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now| were removed by another replica|Removing leftovers from table|Another replica was suddenly created|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|^\(query: ' done diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh index 1d5f5d54853..cc96a37a0ce 100755 --- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh @@ -17,7 +17,7 @@ function thread1() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;" + $CLICKHOUSE_CLIENT --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;" done } diff --git a/tests/queries/0_stateless/01395_limit_more_cases.sh b/tests/queries/0_stateless/01395_limit_more_cases.sh index 9709bd74f26..6be8a91f0c7 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases.sh @@ -20,4 +20,4 @@ for OFFSET in {0..15}; do FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT); " done -done | $CLICKHOUSE_CLIENT -n --max_block_size 5 +done | $CLICKHOUSE_CLIENT --max_block_size 5 diff --git a/tests/queries/0_stateless/01395_limit_more_cases_random.sh b/tests/queries/0_stateless/01395_limit_more_cases_random.sh index c2f6b060aab..bb942906e63 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases_random.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases_random.sh @@ -19,4 +19,4 @@ for _ in $(seq $ITERATIONS); do throwIf((c != 0 OR first != 0 OR last != 0) AND (c != last - first + 1)) FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT); " -done | $CLICKHOUSE_CLIENT -n --max_block_size $(($RANDOM % 20 + 1)) | uniq +done | $CLICKHOUSE_CLIENT --max_block_size $(($RANDOM % 20 + 1)) | uniq diff --git a/tests/queries/0_stateless/01412_cache_dictionary_race.sh b/tests/queries/0_stateless/01412_cache_dictionary_race.sh index 36295ca01ea..b0c73cf742f 100755 --- a/tests/queries/0_stateless/01412_cache_dictionary_race.sh +++ b/tests/queries/0_stateless/01412_cache_dictionary_race.sh @@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db" $CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " CREATE DICTIONARY ordinary_db.dict1 ( @@ -35,7 +35,7 @@ function dict_get_thread() function drop_create_table_thread() { while true; do - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ordinary_db.table_for_dict_real ( + $CLICKHOUSE_CLIENT --query "CREATE TABLE ordinary_db.table_for_dict_real ( key_column UInt64, second_column UInt8, third_column String diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh index ec9c5134059..950afea9086 100755 --- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh +++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh @@ -23,7 +23,7 @@ function f { function g { local TIMELIMIT=$((SECONDS+$1)) for _ in $(seq 1 100); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " INSERT INTO mem SELECT number FROM numbers(1000000); INSERT INTO mem SELECT number FROM numbers(1000000); INSERT INTO mem SELECT number FROM numbers(1000000); diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh index 22cd6fb8127..0e6ab287146 100755 --- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x; " @@ -39,12 +39,12 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i; " done for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS r$i SYNC;" + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS r$i SYNC;" done diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh index 1589f17c752..3f4210f9bb0 100755 --- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh @@ -12,13 +12,13 @@ NUM_REPLICAS=2 NUM_INSERTS=5 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum', 'r$i') ORDER BY x; " done -$CLICKHOUSE_CLIENT -n -q "SYSTEM STOP REPLICATION QUEUES r2;" +$CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES r2;" function thread { $CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --query "INSERT INTO r1 SELECT $1" @@ -28,12 +28,12 @@ for i in $(seq 1 $NUM_INSERTS); do thread $i & done -$CLICKHOUSE_CLIENT -n -q "SYSTEM START REPLICATION QUEUES r2;" +$CLICKHOUSE_CLIENT -q "SYSTEM START REPLICATION QUEUES r2;" wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SELECT count(), min(x), max(x), sum(x) FROM r$i; DROP TABLE IF EXISTS r$i; " diff --git a/tests/queries/0_stateless/01563_distributed_query_finish.sh b/tests/queries/0_stateless/01563_distributed_query_finish.sh index e3c5928f108..50d194b1f15 100755 --- a/tests/queries/0_stateless/01563_distributed_query_finish.sh +++ b/tests/queries/0_stateless/01563_distributed_query_finish.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm < 0 @@ -22,7 +22,7 @@ check_replicas_read_in_order() { # at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414) test1() { query_id="query_id_memory_bound_merging_$RANDOM$RANDOM" - $CLICKHOUSE_CLIENT --query_id="$query_id" -nq " + $CLICKHOUSE_CLIENT --query_id="$query_id" -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SELECT URL, EventDate, max(URL) @@ -39,7 +39,7 @@ test1() { # at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414) test2() { query_id="query_id_memory_bound_merging_$RANDOM$RANDOM" - $CLICKHOUSE_CLIENT --query_id="$query_id" -nq " + $CLICKHOUSE_CLIENT --query_id="$query_id" -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SELECT URL, EventDate, max(URL) @@ -53,7 +53,7 @@ test2() { } test3() { - $CLICKHOUSE_CLIENT -nq " + $CLICKHOUSE_CLIENT -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SET max_threads = 16, prefer_localhost_replica = 1, read_in_order_two_level_merge_threshold = 1000, query_plan_aggregation_in_order = 1, distributed_aggregation_memory_efficient = 1; diff --git a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh index f9fea2c1dad..bf44f2d7ce7 100755 --- a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh +++ b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' O query_id=02906_read_from_cache_$RANDOM $CLICKHOUSE_CLIENT --query_id ${query_id} -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10 FORMAT Null SETTINGS filesystem_cache_reserve_space_wait_lock_timeout_milliseconds=2000" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; -- AsynchronousReaderIgnoredBytes = 0: no seek-avoiding happened From f1c43472d80bb1f04f0d0dfac87d985a4f33bfd5 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 9 Sep 2024 11:35:54 +0800 Subject: [PATCH 62/70] change as request --- src/Functions/array/array.cpp | 44 ++++++++++++++++++----------------- src/Functions/map.cpp | 4 ++-- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index f193acafe06..57d24b185e4 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -100,14 +100,14 @@ public: if (num_elements == 1) out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count); else - executeAny(column_ptrs, out_data, input_rows_count); + execute(column_ptrs, out_data, input_rows_count); return out; } + private: - bool executeAny(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + bool execute(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const { - return false // NOLINT - || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + return executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) @@ -123,14 +123,7 @@ private: || executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count); } - bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const - { - for (size_t i = 0; i < input_rows_count; ++i) - for (const auto * column : columns) - out_data.insertFrom(*column, i); - return true; - } - + template bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const { @@ -178,18 +171,18 @@ private: out_chars.resize_exact(total_bytes); out_offsets.resize_exact(input_rows_count * columns.size()); - size_t curr_out_offset = 0; + size_t cur_out_offset = 0; for (size_t row_i = 0; row_i < input_rows_count; ++row_i) { const size_t base = row_i * columns.size(); for (size_t col_i = 0; col_i < columns.size(); ++col_i) { StringRef ref = concrete_columns[col_i]->getDataAt(row_i); - memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, ref.size); - out_chars[curr_out_offset + ref.size] = 0; + memcpySmallAllowReadWriteOverflow15(&out_chars[cur_out_offset], ref.data, ref.size); + out_chars[cur_out_offset + ref.size] = 0; - curr_out_offset += ref.size + 1; - out_offsets[base + col_i] = curr_out_offset; + cur_out_offset += ref.size + 1; + out_offsets[base + col_i] = cur_out_offset; } } return true; @@ -211,7 +204,7 @@ private: auto & out_chars = concrete_out_data.getChars(); const size_t n = concrete_out_data.getN(); - size_t total_bytes = concrete_out_data.getN() * columns.size() * input_rows_count; + size_t total_bytes = n * columns.size() * input_rows_count; out_chars.resize_exact(total_bytes); size_t curr_out_offset = 0; @@ -244,8 +237,8 @@ private: ColumnNullable & concrete_out_data = assert_cast(out_data); auto & out_null_map = concrete_out_data.getNullMapColumn(); auto & out_nested_column = concrete_out_data.getNestedColumn(); - executeAny(null_maps, out_null_map, input_rows_count); - executeAny(nested_columns, out_nested_column, input_rows_count); + execute(null_maps, out_null_map, input_rows_count); + execute(nested_columns, out_nested_column, input_rows_count); return true; } @@ -264,11 +257,20 @@ private: const ColumnTuple * concrete_column = assert_cast(columns[j]); elem_columns[j] = &concrete_column->getColumn(i); } - executeAny(elem_columns, concrete_out_data->getColumn(i), input_rows_count); + execute(elem_columns, concrete_out_data->getColumn(i), input_rows_count); } return true; } + bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + for (size_t i = 0; i < input_rows_count; ++i) + for (const auto * column : columns) + out_data.insertFrom(*column, i); + return true; + } + + String getName() const override { return name; diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index d9643087b11..14672cfd568 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -38,7 +38,7 @@ public: static constexpr auto name = "map"; explicit FunctionMap(ContextPtr context_) - : context(std::move(context_)) + : context(context_) , use_variant_as_common_type( context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type) , function_array(FunctionFactory::instance().get("array", context)) @@ -48,7 +48,7 @@ public: static FunctionPtr create(ContextPtr context) { - return std::make_shared(std::move(context)); + return std::make_shared(context); } String getName() const override From 774ec08fafc851f6c71a922dd0dde4a3f1f0d41d Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 9 Sep 2024 11:37:27 +0800 Subject: [PATCH 63/70] fix style --- src/Functions/array/array.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index 57d24b185e4..d2aedd57f99 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -123,7 +123,6 @@ private: || executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count); } - template bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const { From eb800d9e3918387c243c0480ac9df37190a91dc7 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 9 Sep 2024 11:45:39 +0800 Subject: [PATCH 64/70] Rename statistics of type count_min to countmin --- .../engines/table-engines/mergetree-family/mergetree.md | 8 ++++---- src/Storages/Statistics/Statistics.cpp | 2 +- src/Storages/Statistics/StatisticsCountMinSketch.cpp | 4 ++-- src/Storages/StatisticsDescription.cpp | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 74f1295ef96..317d1cca61e 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1003,16 +1003,16 @@ They can be used for prewhere optimization only if we enable `set allow_statisti [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains. -- `count_min` +- `CountMin` - [Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. + [CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. ### Supported Data Types {#supported-data-types} | | (U)Int* | Float* | Decimal(*) | Date* | Boolean | Enum* | (Fixed)String | |-----------|---------|--------|------------|-------|---------|-------|------------------| -| count_min | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| CountMin | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | MinMax | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ | | TDigest | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ | | Uniq | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | @@ -1022,7 +1022,7 @@ They can be used for prewhere optimization only if we enable `set allow_statisti | | Equality filters (==) | Range filters (>, >=, <, <=) | |-----------|-----------------------|------------------------------| -| count_min | ✔ | ✗ | +| CountMin | ✔ | ✗ | | MinMax | ✗ | ✔ | | TDigest | ✗ | ✔ | | Uniq | ✔ | ✗ | diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 795963bd55d..0557530515f 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -248,7 +248,7 @@ ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnDescription & co { auto it = creators.find(type); if (it == creators.end()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'count_min', 'minmax', 'tdigest' and 'uniq'", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'countmin', 'minmax', 'tdigest' and 'uniq'", type); auto stat_ptr = (it->second)(desc, column_desc.type); column_stat->stats[type] = stat_ptr; } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 1a2459c230d..f477181ec2d 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -49,7 +49,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const if (isStringOrFixedString(data_type)) return sketch.get_estimate(val.safeGet()); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'countmin' does not support estimate data type of {}", data_type->getName()); } void StatisticsCountMinSketch::update(const ColumnPtr & column) @@ -88,7 +88,7 @@ void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*des DataTypePtr inner_data_type = removeNullable(data_type); inner_data_type = removeLowCardinalityAndNullable(inner_data_type); if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type)) - throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName()); } StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 4e0d901d0c7..ac7fa8898de 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -48,11 +48,11 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::TDigest; if (type == "uniq") return StatisticsType::Uniq; - if (type == "count_min") + if (type == "countmin") return StatisticsType::CountMinSketch; if (type == "minmax") return StatisticsType::MinMax; - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type); } String SingleStatisticsDescription::getTypeName() const @@ -64,11 +64,11 @@ String SingleStatisticsDescription::getTypeName() const case StatisticsType::Uniq: return "Uniq"; case StatisticsType::CountMinSketch: - return "count_min"; + return "countmin"; case StatisticsType::MinMax: return "minmax"; default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type); } } From 077b8239f55163ae25f901924f5a93997d084256 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 9 Sep 2024 11:52:58 +0800 Subject: [PATCH 65/70] Rename in tests --- ...atistics_create_materialize_drop.reference | 4 +- ...864_statistics_create_materialize_drop.sql | 10 +-- .../0_stateless/02864_statistics_ddl.sql | 56 ++++++++-------- .../02864_statistics_predicates.sql | 64 +++++++++---------- 4 files changed, 67 insertions(+), 67 deletions(-) diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference index 4ca2c5e5f9b..1843964377d 100644 --- a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference +++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference @@ -1,5 +1,5 @@ Test create statistics: -CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, count_min, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, count_min, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, count_min, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, countmin, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, countmin, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, countmin, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 Test materialize and drop statistics: -CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql index 90a57c99624..249e3c84a51 100644 --- a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql +++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql @@ -12,10 +12,10 @@ SELECT 'Test create statistics:'; CREATE TABLE tab ( - a LowCardinality(Int64) STATISTICS(count_min, minmax, tdigest, uniq), - b LowCardinality(Nullable(String)) STATISTICS(count_min, uniq), - c LowCardinality(Nullable(Int64)) STATISTICS(count_min, minmax, tdigest, uniq), - d DateTime STATISTICS(count_min, minmax, tdigest, uniq), + a LowCardinality(Int64) STATISTICS(countmin, minmax, tdigest, uniq), + b LowCardinality(Nullable(String)) STATISTICS(countmin, uniq), + c LowCardinality(Nullable(Int64)) STATISTICS(countmin, minmax, tdigest, uniq), + d DateTime STATISTICS(countmin, minmax, tdigest, uniq), pk String, ) Engine = MergeTree() ORDER BY pk; @@ -25,7 +25,7 @@ SHOW CREATE TABLE tab; SELECT 'Test materialize and drop statistics:'; ALTER TABLE tab DROP STATISTICS a, b, c, d; -ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq; +ALTER TABLE tab ADD STATISTICS b TYPE countmin, uniq; ALTER TABLE tab MATERIALIZE STATISTICS b; SHOW CREATE TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index bcaaa9e7b61..5b2c5cebc1d 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,5 +1,5 @@ -- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library +-- no-fasttest: 'countmin' sketches need a 3rd party library -- Tests that DDL statements which create / drop / materialize statistics @@ -71,29 +71,29 @@ CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } --- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- countmin requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String -- These types work: -CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt8 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -- These types don't work: -CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -- minmax requires data_type.isValueRepresentedByInteger -- These types work: @@ -187,17 +187,17 @@ ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATI -- uniq -- Works: ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; -- Doesn't work: ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } --- count_min +-- countmin -- Works: -ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab ADD STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; -- Doesn't work: -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } -- minmax -- Works: ALTER TABLE tab ADD STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64; diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql index 473a7bc95ad..d7afba12c1d 100644 --- a/tests/queries/0_stateless/02864_statistics_predicates.sql +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -1,5 +1,5 @@ -- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library +-- no-fasttest: 'countmin' sketches need a 3rd party library -- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. @@ -13,27 +13,27 @@ CREATE TABLE tab u64 UInt64, u64_tdigest UInt64 STATISTICS(tdigest), u64_minmax UInt64 STATISTICS(minmax), - u64_count_min UInt64 STATISTICS(count_min), + u64_countmin UInt64 STATISTICS(countmin), u64_uniq UInt64 STATISTICS(uniq), f64 Float64, f64_tdigest Float64 STATISTICS(tdigest), f64_minmax Float64 STATISTICS(minmax), - f64_count_min Float64 STATISTICS(count_min), + f64_countmin Float64 STATISTICS(countmin), f64_uniq Float64 STATISTICS(uniq), dt DateTime, dt_tdigest DateTime STATISTICS(tdigest), dt_minmax DateTime STATISTICS(minmax), - dt_count_min DateTime STATISTICS(count_min), + dt_countmin DateTime STATISTICS(countmin), dt_uniq DateTime STATISTICS(uniq), b Bool, b_tdigest Bool STATISTICS(tdigest), b_minmax Bool STATISTICS(minmax), - b_count_min Bool STATISTICS(count_min), + b_countmin Bool STATISTICS(countmin), b_uniq Bool STATISTICS(uniq), s String, -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest -- s_minmax String STATISTICS(minmax), -- not supported by minmax - s_count_min String STATISTICS(count_min), + s_countmin String STATISTICS(countmin), s_uniq String STATISTICS(uniq) ) Engine = MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; @@ -72,25 +72,25 @@ SELECT 'u64 and ='; SELECT count(*) FROM tab WHERE u64 = 7; SELECT count(*) FROM tab WHERE u64_tdigest = 7; SELECT count(*) FROM tab WHERE u64_minmax = 7; -SELECT count(*) FROM tab WHERE u64_count_min = 7; +SELECT count(*) FROM tab WHERE u64_countmin = 7; SELECT count(*) FROM tab WHERE u64_uniq = 7; SELECT count(*) FROM tab WHERE u64 = 7.7; SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; SELECT count(*) FROM tab WHERE u64_minmax = 7.7; -SELECT count(*) FROM tab WHERE u64_count_min = 7.7; +SELECT count(*) FROM tab WHERE u64_countmin = 7.7; SELECT count(*) FROM tab WHERE u64_uniq = 7.7; SELECT count(*) FROM tab WHERE u64 = '7'; SELECT count(*) FROM tab WHERE u64_tdigest = '7'; SELECT count(*) FROM tab WHERE u64_minmax = '7'; -SELECT count(*) FROM tab WHERE u64_count_min = '7'; +SELECT count(*) FROM tab WHERE u64_countmin = '7'; SELECT count(*) FROM tab WHERE u64_uniq = '7'; SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_minmax = '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin = '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } SELECT 'u64 and <'; @@ -98,25 +98,25 @@ SELECT 'u64 and <'; SELECT count(*) FROM tab WHERE u64 < 7; SELECT count(*) FROM tab WHERE u64_tdigest < 7; SELECT count(*) FROM tab WHERE u64_minmax < 7; -SELECT count(*) FROM tab WHERE u64_count_min < 7; +SELECT count(*) FROM tab WHERE u64_countmin < 7; SELECT count(*) FROM tab WHERE u64_uniq < 7; SELECT count(*) FROM tab WHERE u64 < 7.7; SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; SELECT count(*) FROM tab WHERE u64_minmax < 7.7; -SELECT count(*) FROM tab WHERE u64_count_min < 7.7; +SELECT count(*) FROM tab WHERE u64_countmin < 7.7; SELECT count(*) FROM tab WHERE u64_uniq < 7.7; SELECT count(*) FROM tab WHERE u64 < '7'; SELECT count(*) FROM tab WHERE u64_tdigest < '7'; SELECT count(*) FROM tab WHERE u64_minmax < '7'; -SELECT count(*) FROM tab WHERE u64_count_min < '7'; +SELECT count(*) FROM tab WHERE u64_countmin < '7'; SELECT count(*) FROM tab WHERE u64_uniq < '7'; SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_minmax < '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin < '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } -- f64 ---------------------------------------------------- @@ -126,25 +126,25 @@ SELECT 'f64 and ='; SELECT count(*) FROM tab WHERE f64 = 7; SELECT count(*) FROM tab WHERE f64_tdigest = 7; SELECT count(*) FROM tab WHERE f64_minmax = 7; -SELECT count(*) FROM tab WHERE f64_count_min = 7; +SELECT count(*) FROM tab WHERE f64_countmin = 7; SELECT count(*) FROM tab WHERE f64_uniq = 7; SELECT count(*) FROM tab WHERE f64 = 7.7; SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; SELECT count(*) FROM tab WHERE f64_minmax = 7.7; -SELECT count(*) FROM tab WHERE f64_count_min = 7.7; +SELECT count(*) FROM tab WHERE f64_countmin = 7.7; SELECT count(*) FROM tab WHERE f64_uniq = 7.7; SELECT count(*) FROM tab WHERE f64 = '7'; SELECT count(*) FROM tab WHERE f64_tdigest = '7'; SELECT count(*) FROM tab WHERE f64_minmax = '7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7'; SELECT count(*) FROM tab WHERE f64_uniq = '7'; SELECT count(*) FROM tab WHERE f64 = '7.7'; SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; SELECT count(*) FROM tab WHERE f64_minmax = '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7.7'; SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; SELECT 'f64 and <'; @@ -152,25 +152,25 @@ SELECT 'f64 and <'; SELECT count(*) FROM tab WHERE f64 < 7; SELECT count(*) FROM tab WHERE f64_tdigest < 7; SELECT count(*) FROM tab WHERE f64_minmax < 7; -SELECT count(*) FROM tab WHERE f64_count_min < 7; +SELECT count(*) FROM tab WHERE f64_countmin < 7; SELECT count(*) FROM tab WHERE f64_uniq < 7; SELECT count(*) FROM tab WHERE f64 < 7.7; SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; SELECT count(*) FROM tab WHERE f64_minmax < 7.7; -SELECT count(*) FROM tab WHERE f64_count_min < 7.7; +SELECT count(*) FROM tab WHERE f64_countmin < 7.7; SELECT count(*) FROM tab WHERE f64_uniq < 7.7; SELECT count(*) FROM tab WHERE f64 < '7'; SELECT count(*) FROM tab WHERE f64_tdigest < '7'; SELECT count(*) FROM tab WHERE f64_minmax < '7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7'; SELECT count(*) FROM tab WHERE f64_uniq < '7'; SELECT count(*) FROM tab WHERE f64 < '7.7'; SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; SELECT count(*) FROM tab WHERE f64_minmax < '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7.7'; SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; -- dt ---------------------------------------------------- @@ -180,13 +180,13 @@ SELECT 'dt and ='; SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_minmax = '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt = 7; SELECT count(*) FROM tab WHERE dt_tdigest = 7; SELECT count(*) FROM tab WHERE dt_minmax = 7; -SELECT count(*) FROM tab WHERE dt_count_min = 7; +SELECT count(*) FROM tab WHERE dt_countmin = 7; SELECT count(*) FROM tab WHERE dt_uniq = 7; SELECT 'dt and <'; @@ -194,13 +194,13 @@ SELECT 'dt and <'; SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_minmax < '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt < 7; SELECT count(*) FROM tab WHERE dt_tdigest < 7; SELECT count(*) FROM tab WHERE dt_minmax < 7; -SELECT count(*) FROM tab WHERE dt_count_min < 7; +SELECT count(*) FROM tab WHERE dt_countmin < 7; SELECT count(*) FROM tab WHERE dt_uniq < 7; -- b ---------------------------------------------------- @@ -210,25 +210,25 @@ SELECT 'b and ='; SELECT count(*) FROM tab WHERE b = true; SELECT count(*) FROM tab WHERE b_tdigest = true; SELECT count(*) FROM tab WHERE b_minmax = true; -SELECT count(*) FROM tab WHERE b_count_min = true; +SELECT count(*) FROM tab WHERE b_countmin = true; SELECT count(*) FROM tab WHERE b_uniq = true; SELECT count(*) FROM tab WHERE b = 'true'; SELECT count(*) FROM tab WHERE b_tdigest = 'true'; SELECT count(*) FROM tab WHERE b_minmax = 'true'; -SELECT count(*) FROM tab WHERE b_count_min = 'true'; +SELECT count(*) FROM tab WHERE b_countmin = 'true'; SELECT count(*) FROM tab WHERE b_uniq = 'true'; SELECT count(*) FROM tab WHERE b = 1; SELECT count(*) FROM tab WHERE b_tdigest = 1; SELECT count(*) FROM tab WHERE b_minmax = 1; -SELECT count(*) FROM tab WHERE b_count_min = 1; +SELECT count(*) FROM tab WHERE b_countmin = 1; SELECT count(*) FROM tab WHERE b_uniq = 1; SELECT count(*) FROM tab WHERE b = 1.1; SELECT count(*) FROM tab WHERE b_tdigest = 1.1; SELECT count(*) FROM tab WHERE b_minmax = 1.1; -SELECT count(*) FROM tab WHERE b_count_min = 1.1; +SELECT count(*) FROM tab WHERE b_countmin = 1.1; SELECT count(*) FROM tab WHERE b_uniq = 1.1; -- s ---------------------------------------------------- @@ -238,13 +238,13 @@ SELECT 's and ='; SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } -- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported -- SELECT count(*) FROM tab WHERE s_minmax = 7; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } +SELECT count(*) FROM tab WHERE s_countmin = 7; -- { serverError NO_COMMON_TYPE } SELECT count(*) FROM tab WHERE s_uniq = 7; -- { serverError NO_COMMON_TYPE } SELECT count(*) FROM tab WHERE s = '7'; -- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported -- SELECT count(*) FROM tab WHERE s_minmax = '7'; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = '7'; +SELECT count(*) FROM tab WHERE s_countmin = '7'; SELECT count(*) FROM tab WHERE s_uniq = '7'; DROP TABLE tab; From c5f8802ac8e43efd51441cf4dbbf1ccf2381af9a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 9 Sep 2024 08:04:53 +0000 Subject: [PATCH 66/70] Docs: Mention FROM-before-SELECT --- docs/en/sql-reference/statements/select/from.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index 7a6e2ab054c..f319e7b1357 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from: Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause. -`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them. +The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them. + +`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example: + +```sql +FROM table +SELECT * +``` ## FINAL Modifier @@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu ### Example Usage -**Using the `FINAL` keyword** +Using the `FINAL` keyword ```sql SELECT x, y FROM mytable FINAL WHERE x > 1; ``` -**Using `FINAL` as a query-level setting** +Using `FINAL` as a query-level setting ```sql SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1; ``` -**Using `FINAL` as a session-level setting** +Using `FINAL` as a session-level setting ```sql SET final = 1; From 6539cbd1ce1997ca14c65131502cda2e0daf7676 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 9 Sep 2024 17:34:15 +0800 Subject: [PATCH 67/70] Add CountMin to dictionary file and a little fixup --- .../table-engines/mergetree-family/mergetree.md | 17 +++++++++++++++++ .../aspell-ignore/en/aspell-dict.txt | 1 + 2 files changed, 18 insertions(+) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 317d1cca61e..d17208ab099 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -991,6 +991,23 @@ They can be used for prewhere optimization only if we enable `set allow_statisti ### Available Types of Column Statistics {#available-types-of-column-statistics} +Names of statistics types are case-insensitive, i.e. both + +``` sql +CREATE TABLE tab +( +a LowCardinality(Int64) STATISTICS(minmax) +) [...] +``` +and +``` sql +CREATE TABLE tab +( +a LowCardinality(Int64) STATISTICS(MiNmAx) +) [...] +``` +work. + - `MinMax` The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index d10db5f0d3d..a51132e6d7f 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -120,6 +120,7 @@ CMPLNT CMake CMakeLists CODECS +CountMin COVID CPUFrequencyMHz CPUs From 3363fe33888b2c1f994ea4157b7f323a518e89b6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 9 Sep 2024 10:49:53 +0000 Subject: [PATCH 68/70] Docs: Document per-logger level overriding --- .../settings.md | 71 ++++++++++++------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 9fce83a0dc4..ccc8cf017ca 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1463,26 +1463,29 @@ Examples: ## logger {#logger} -Logging settings. +The location and format of log messages. Keys: -- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. -- `log` – The log file. Contains all the entries according to `level`. -- `errorlog` – Error log file. -- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place. -- `count` – The number of archived log files that ClickHouse stores. -- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`. -- `console_log_level` – Logging level for console. Default to `level`. -- `use_syslog` - Log to syslog as well. -- `syslog_level` - Logging level for logging to syslog. -- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`. -- `formatting` – Specify log format to be printed in console log (currently only `json` supported). +- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`, + `debug`, `trace`, `test` +- `log` – The path to the log file. +- `errorlog` – The path to the error log file. +- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created. +- `count` – Rotation policy: How many historical log files Clickhouse are kept at most. +- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable. +- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is + `1` if Clickhouse does not run in daemon mode, `0` otherwise. +- `console_log_level` – Log level for console output. Defaults to `level`. +- `formatting` – Log format for console output. Currently, only `json` is supported). +- `use_syslog` - Also forward log output to syslog. +- `syslog_level` - Log level for logging to syslog. -Both log and error log file names (only file names, not directories) support date and time format specifiers. +**Log format specifiers** -**Format specifiers** -Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`. +File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them). + +Column “Example” shows the output at `2023-07-06 18:32:07`. | Specifier | Description | Example | |-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------| @@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin ``` -Writing to the console can be configured. Config example: +To print log messages only in the console: ``` xml information - 1 + true + +``` + +**Per-level Overrides** + +The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC". + +```xml + + + + Backup + none + + + RBAC + none + + ``` ### syslog -Writing to the syslog is also supported. Config example: +To write log messages additionally to syslog: ``` xml @@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example: ``` -Keys for syslog: +Keys for ``: -- use_syslog — Required setting if you want to write to the syslog. -- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. -- hostname — Optional. The name of the host that logs are sent from. -- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on). - Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. -- format – Message format. Possible values: `bsd` and `syslog.` +- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used. +- `hostname` — The name of the host from which logs are send. Optional. +- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. +- `format` – Log message format. Possible values: `bsd` and `syslog.` ### Log formats @@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren "source_line": "192" } ``` + To enable JSON logging support, use the following snippet: ```xml From c4720d9728ba0e264da4834dea7fcaad6b2a8322 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 9 Sep 2024 11:18:42 +0000 Subject: [PATCH 69/70] Minor doc fixups --- .../mergetree-family/mergetree.md | 37 +++++++------------ 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index d17208ab099..0bbee5f86f3 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -991,48 +991,39 @@ They can be used for prewhere optimization only if we enable `set allow_statisti ### Available Types of Column Statistics {#available-types-of-column-statistics} -Names of statistics types are case-insensitive, i.e. both - -``` sql -CREATE TABLE tab -( -a LowCardinality(Int64) STATISTICS(minmax) -) [...] -``` -and -``` sql -CREATE TABLE tab -( -a LowCardinality(Int64) STATISTICS(MiNmAx) -) [...] -``` -work. - - `MinMax` The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns. + Syntax: `minmax` + - `TDigest` [TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns. + Syntax: `tdigest` + - `Uniq` [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains. + Syntax: `uniq` + - `CountMin` [CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. + Syntax `countmin` + ### Supported Data Types {#supported-data-types} -| | (U)Int* | Float* | Decimal(*) | Date* | Boolean | Enum* | (Fixed)String | -|-----------|---------|--------|------------|-------|---------|-------|------------------| -| CountMin | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| MinMax | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ | -| TDigest | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ | -| Uniq | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString | +|-----------|----------------------------------------------------|-----------------------| +| CountMin | ✔ | ✔ | +| MinMax | ✔ | ✗ | +| TDigest | ✔ | ✗ | +| Uniq | ✔ | ✔ | ### Supported Operations {#supported-operations} From d2ee805d8f9bf9d72ab6ddca536c6b180d7b6aa0 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 9 Sep 2024 17:32:22 +0200 Subject: [PATCH 70/70] Revert "Remove obsolete `--multiquery` parameter (follow-up to #63898), pt. V" --- tests/docker_scripts/stateful_runner.sh | 2 +- tests/docker_scripts/stress_runner.sh | 2 +- tests/integration/helpers/client.py | 1 + tests/performance/scripts/compare.sh | 12 +-- ...006_client_test_hint_no_such_error_name.sh | 2 +- .../0_stateless/02030_rocksdb_race_long.sh | 6 +- .../02151_hash_table_sizes_stats_joins.sh | 2 +- .../02229_client_stop_multiquery_in_SIGINT.sh | 4 +- ...41_filesystem_cache_on_write_operations.sh | 8 +- .../0_stateless/02242_delete_user_race.sh | 2 +- .../0_stateless/02243_drop_user_grant_race.sh | 4 +- .../02266_protobuf_format_google_wrappers.sh | 16 ++-- .../02286_drop_filesystem_cache.sh | 6 +- .../02313_filesystem_cache_seeks.sh | 2 +- .../02340_parts_refcnt_mergetree.sh | 6 +- .../0_stateless/02344_describe_cache.sh | 4 +- tests/queries/0_stateless/02352_rwlock.sh | 4 +- .../0_stateless/02380_insert_mv_race.sh | 4 +- ...entelemetry_insert_on_distributed_table.sh | 8 +- .../02419_keeper_map_primary_key.sh | 2 +- .../02443_detach_attach_partition.sh | 2 +- .../0_stateless/02473_optimize_old_parts.sh | 8 +- .../0_stateless/02477_s3_request_throttler.sh | 2 +- ...02494_zero_copy_projection_cancel_fetch.sh | 4 +- .../02504_regexp_dictionary_ua_parser.sh | 8 +- .../02504_regexp_dictionary_yaml_source.sh | 18 ++-- .../02515_cleanup_async_insert_block_ids.sh | 6 +- .../02585_query_status_deadlock.sh | 2 +- ...de_client_info_initial_query_start_time.sh | 14 ++-- ...rofile_events_from_query_log_and_client.sh | 16 ++-- .../02676_optimize_old_parts_replicated.sh | 8 +- .../02686_postgres_protocol_decimal_256.sh | 2 +- .../0_stateless/02700_s3_part_INT_MAX.sh | 2 +- .../0_stateless/02725_start_stop_fetches.sh | 2 +- .../02731_zero_objects_in_metadata.sh | 6 +- tests/queries/0_stateless/02766_prql.sh | 2 +- .../0_stateless/02770_async_buffer_ignore.sh | 4 +- .../02789_filesystem_cache_alignment.sh | 18 ++-- ...89_reading_from_s3_with_connection_pool.sh | 8 +- .../0_stateless/02801_backup_native_copy.sh | 10 +-- .../0_stateless/02803_backup_tmp_files.sh | 2 +- ...2808_custom_disk_with_user_defined_name.sh | 8 +- .../02808_filesystem_cache_drop_query.sh | 22 ++--- ...ackup_use_same_password_for_base_backup.sh | 2 +- ...use_same_s3_credentials_for_base_backup.sh | 2 +- .../02844_max_backup_bandwidth_s3.sh | 6 +- .../0_stateless/02867_storage_set_tsan.sh | 4 +- .../02900_union_schema_inference_mode.sh | 10 +-- .../02908_many_requests_to_system_replicas.sh | 2 +- .../02922_deduplication_with_zero_copy.sh | 10 +-- .../02932_refreshable_materialized_views_1.sh | 72 ++++++++-------- .../02932_refreshable_materialized_views_2.sh | 82 +++++++++---------- .../0_stateless/02941_variant_type_1.sh | 10 +-- .../0_stateless/02941_variant_type_2.sh | 4 +- .../0_stateless/02941_variant_type_3.sh | 4 +- .../0_stateless/02941_variant_type_4.sh | 2 +- ...ynamically_change_filesystem_cache_size.sh | 10 +-- .../0_stateless/02960_polygon_bound_bug.sh | 2 +- .../02961_storage_config_volume_priority.sh | 4 +- ...02963_remote_read_small_buffer_size_bug.sh | 6 +- ...allel_replicas_join_algo_and_analyzer_1.sh | 2 +- ...allel_replicas_join_algo_and_analyzer_2.sh | 2 +- ...allel_replicas_join_algo_and_analyzer_3.sh | 2 +- .../02969_auto_format_detection.sh | 6 +- .../02980_s3_plain_DROP_TABLE_MergeTree.sh | 4 +- ...s3_plain_DROP_TABLE_ReplicatedMergeTree.sh | 6 +- ...ion_insert_several_blocks_nonreplicated.sh | 2 +- ...cation_insert_several_blocks_replicated.sh | 2 +- ..._generates_several_blocks_nonreplicated.sh | 2 +- ..._mv_generates_several_blocks_replicated.sh | 2 +- ...several_mv_into_one_table_nonreplicated.sh | 2 +- ...on_several_mv_into_one_table_replicated.sh | 2 +- .../0_stateless/03008_s3_plain_rewritable.sh | 8 +- ...2_dynamically_resize_filesystem_cache_2.sh | 6 +- .../03039_dynamic_aggregating_merge_tree.sh | 2 +- .../03039_dynamic_collapsing_merge_tree.sh | 2 +- .../03039_dynamic_replacing_merge_tree.sh | 2 +- .../03039_dynamic_summing_merge_tree.sh | 2 +- ...dynamic_versioned_collapsing_merge_tree.sh | 2 +- .../0_stateless/03151_unload_index_race.sh | 6 +- .../03172_error_log_table_not_empty.sh | 8 +- .../03203_hive_style_partitioning.sh | 18 ++-- ...kup_and_clear_old_temporary_directories.sh | 4 +- 83 files changed, 299 insertions(+), 298 deletions(-) diff --git a/tests/docker_scripts/stateful_runner.sh b/tests/docker_scripts/stateful_runner.sh index defc24781a1..86f6a299ad3 100755 --- a/tests/docker_scripts/stateful_runner.sh +++ b/tests/docker_scripts/stateful_runner.sh @@ -105,7 +105,7 @@ setup_logs_replication clickhouse-client --query "SHOW DATABASES" clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client < /repo/tests/docker_scripts/create.sql +clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/tests/docker_scripts/stress_runner.sh b/tests/docker_scripts/stress_runner.sh index af93c93f202..039c60c8e4e 100755 --- a/tests/docker_scripts/stress_runner.sh +++ b/tests/docker_scripts/stress_runner.sh @@ -62,7 +62,7 @@ start_server setup_logs_replication clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client < /repo/tests/docker_scripts/create.sql +clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" diff --git a/tests/integration/helpers/client.py b/tests/integration/helpers/client.py index 97405860784..8ba7b342020 100644 --- a/tests/integration/helpers/client.py +++ b/tests/integration/helpers/client.py @@ -89,6 +89,7 @@ class Client: command = self.command[:] if stdin is None: + command += ["--multiquery"] stdin = sql else: command += ["--query", sql] diff --git a/tests/performance/scripts/compare.sh b/tests/performance/scripts/compare.sh index c3566c51a16..da7bbf77a28 100755 --- a/tests/performance/scripts/compare.sh +++ b/tests/performance/scripts/compare.sh @@ -427,7 +427,7 @@ do done # for each query run, prepare array of metrics from query log -clickhouse-local --query " +clickhouse-local --multiquery --query " create view query_runs as select * from file('analyze/query-runs.tsv', TSV, 'test text, query_index int, query_id text, version UInt8, time float'); @@ -582,7 +582,7 @@ numactl --cpunodebind=all --membind=all numactl --show # If the available memory falls below 2 * size, GNU parallel will suspend some of the running jobs. numactl --cpunodebind=all --membind=all parallel -v --joblog analyze/parallel-log.txt --memsuspend 15G --null < analyze/commands.txt 2>> analyze/errors.log -clickhouse-local --query " +clickhouse-local --multiquery --query " -- Join the metric names back to the metric statistics we've calculated, and make -- a denormalized table of them -- statistics for all metrics for all queries. -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: @@ -680,7 +680,7 @@ rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.ts cat analyze/errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||: -clickhouse-local --query " +clickhouse-local --multiquery --query " create view query_display_names as select * from file('analyze/query-display-names.tsv', TSV, 'test text, query_index int, query_display_name text') @@ -981,7 +981,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts for version in {right,left} do rm -rf data - clickhouse-local --query " + clickhouse-local --multiquery --query " create view query_profiles as with 0 as left, 1 as right select * from file('analyze/query-profiles.tsv', TSV, @@ -1151,7 +1151,7 @@ function report_metrics rm -rf metrics ||: mkdir metrics -clickhouse-local --query " +clickhouse-local --multiquery --query " create view right_async_metric_log as select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes) ; @@ -1211,7 +1211,7 @@ function upload_results # Prepare info for the CI checks table. rm -f ci-checks.tsv - clickhouse-local --query " + clickhouse-local --multiquery --query " create view queries as select * from file('report/queries.tsv', TSVWithNamesAndTypes); create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') diff --git a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh index 7bf8100a53d..972ff3ba73f 100755 --- a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh +++ b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' +$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' diff --git a/tests/queries/0_stateless/02030_rocksdb_race_long.sh b/tests/queries/0_stateless/02030_rocksdb_race_long.sh index 7d329f78d98..da31861991c 100755 --- a/tests/queries/0_stateless/02030_rocksdb_race_long.sh +++ b/tests/queries/0_stateless/02030_rocksdb_race_long.sh @@ -12,14 +12,14 @@ echo " DROP TABLE IF EXISTS rocksdb_race; CREATE TABLE rocksdb_race (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key); INSERT INTO rocksdb_race SELECT '1_' || toString(number), number FROM numbers(100000); -" | $CLICKHOUSE_CLIENT +" | $CLICKHOUSE_CLIENT -n function read_stat_thread() { while true; do echo " SELECT * FROM system.rocksdb FORMAT Null; - " | $CLICKHOUSE_CLIENT + " | $CLICKHOUSE_CLIENT -n done } @@ -29,7 +29,7 @@ function truncate_thread() sleep 3s; echo " TRUNCATE TABLE rocksdb_race; - " | $CLICKHOUSE_CLIENT + " | $CLICKHOUSE_CLIENT -n done } diff --git a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh index a9b5c461c5a..007dae6e427 100755 --- a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh +++ b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh @@ -12,7 +12,7 @@ opts=( --join_algorithm='parallel_hash' ) -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " CREATE TABLE t1(a UInt32, b UInt32) ENGINE=MergeTree ORDER BY (); INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6); diff --git a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh index b23164f8354..e5d00bc1a1c 100755 --- a/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh +++ b/tests/queries/0_stateless/02229_client_stop_multiquery_in_SIGINT.sh @@ -5,12 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -m -q " +timeout -s INT 3s $CLICKHOUSE_CLIENT --max_block_size 1 -nm -q " SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT 'FAIL'; " -timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -m -q " +timeout -s INT 3s $CLICKHOUSE_LOCAL --max_block_size 1 -nm -q " SELECT sleep(1) FROM numbers(100) FORMAT Null; SELECT 'FAIL'; " diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh index 8a44b65e6d1..f8e7b7e7e72 100755 --- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh +++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh @@ -16,7 +16,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -37,7 +37,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)" - $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -70,7 +70,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --enable_filesystem_cache_on_write_operations=1 --query "INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)" - $CLICKHOUSE_CLIENT --echo --query "SELECT file_segment_range_begin, file_segment_range_end, size, state + $CLICKHOUSE_CLIENT --echo -n --query "SELECT file_segment_range_begin, file_segment_range_end, size, state FROM ( SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path @@ -109,7 +109,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --echo --query "SYSTEM FLUSH LOGS" - $CLICKHOUSE_CLIENT --query "SELECT + $CLICKHOUSE_CLIENT -n --query "SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read FROM system.query_log diff --git a/tests/queries/0_stateless/02242_delete_user_race.sh b/tests/queries/0_stateless/02242_delete_user_race.sh index 3cb79d69955..2af54276469 100755 --- a/tests/queries/0_stateless/02242_delete_user_race.sh +++ b/tests/queries/0_stateless/02242_delete_user_race.sh @@ -15,7 +15,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP ROLE IF EXISTS test_role_02242; CREATE ROLE test_role_02242; " diff --git a/tests/queries/0_stateless/02243_drop_user_grant_race.sh b/tests/queries/0_stateless/02243_drop_user_grant_race.sh index 1972ecade06..4dce8e8124c 100755 --- a/tests/queries/0_stateless/02243_drop_user_grant_race.sh +++ b/tests/queries/0_stateless/02243_drop_user_grant_race.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP ROLE IF EXISTS test_role_02244; CREATE ROLE test_role_02244; DROP USER IF EXISTS kek_02243; @@ -37,4 +37,4 @@ $CLICKHOUSE_CLIENT --user kek_02243 -q "SELECT * FROM test" 2>&1| grep -Fa "Exce $CLICKHOUSE_CLIENT -q "DROP ROLE IF EXISTS test_role_02243" $CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS test_user_02243" -$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243" +$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS kek_02243" \ No newline at end of file diff --git a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh index eb1109aaac1..ae2a2351c6b 100755 --- a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh +++ b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh @@ -44,7 +44,7 @@ protobuf_info() { fi } -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS $MAIN_TABLE; DROP TABLE IF EXISTS $ROUNDTRIP_TABLE; DROP TABLE IF EXISTS $COMPATIBILITY_TABLE; @@ -78,14 +78,14 @@ echo $SET_OUTPUT echo echo "Insert $INITIAL_INSERT_VALUES into table (Nullable(String), Int32):" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " INSERT INTO $MAIN_TABLE VALUES $INITIAL_INSERT_VALUES; SELECT * FROM $MAIN_TABLE; " echo echo "Protobuf representation of the second row:" -$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MAIN_TABLE WHERE ref = 2 LIMIT 1 $(protobuf_info output ProtobufSingle Message)" > "$BINARY_FILE_PATH" hexdump -C $BINARY_FILE_PATH echo @@ -101,12 +101,12 @@ hexdump -C $MESSAGE_FILE_PATH echo echo "Insert proto message into table (Nullable(String), Int32):" -$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" +$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $ROUNDTRIP_TABLE $(protobuf_info input Protobuf Message)" < "$MESSAGE_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM $ROUNDTRIP_TABLE" echo echo "Proto output of the table using Google wrapper:" -$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $ROUNDTRIP_TABLE $(protobuf_info output Protobuf Message)" > "$BINARY_FILE_PATH" hexdump -C $BINARY_FILE_PATH echo @@ -124,14 +124,14 @@ echo echo "Insert $MULTI_WRAPPER_VALUES and reinsert using Google wrappers into:" echo "Table (Nullable(Int32), Nullable(Int32), Int32):" $CLICKHOUSE_CLIENT --query "INSERT INTO $MULTI_TABLE VALUES $MULTI_WRAPPER_VALUES" -$CLICKHOUSE_CLIENT --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" -$CLICKHOUSE_CLIENT --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT -n --query "$SET_OUTPUT SELECT * FROM $MULTI_TABLE $(protobuf_info output Protobuf MessageMultiWrapper)" > "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT -n --query "$SET_INPUT INSERT INTO $MULTI_TABLE $(protobuf_info input Protobuf MessageMultiWrapper)" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM $MULTI_TABLE" rm "$BINARY_FILE_PATH" rm "$MESSAGE_FILE_PATH" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP TABLE $MAIN_TABLE; DROP TABLE $ROUNDTRIP_TABLE; DROP TABLE $COMPATIBILITY_TABLE; diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh index 672bf4d068b..dab777fcc31 100755 --- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh +++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh @@ -11,7 +11,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286" - $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02286 (key UInt32, value String) + $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_02286 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='$STORAGE_POLICY', min_bytes_for_wide_part = 10485760" @@ -38,7 +38,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02286 FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SELECT count() + $CLICKHOUSE_CLIENT -n --query "SELECT count() FROM ( SELECT arrayJoin(cache_paths) AS cache_path, @@ -54,7 +54,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache" $CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache" - $CLICKHOUSE_CLIENT --query "SELECT cache_path, local_path + $CLICKHOUSE_CLIENT -n --query "SELECT cache_path, local_path FROM ( SELECT arrayJoin(cache_paths) AS cache_path, diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh index 64fae06f220..b7adde6fcbb 100755 --- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh +++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh @@ -23,7 +23,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d ORDER BY tuple() SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null - $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02313 + $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313 SELECT * FROM generateRandom('id Int32, val String') LIMIT 100000" diff --git a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh index f252b9304cd..bd018018789 100755 --- a/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh +++ b/tests/queries/0_stateless/02340_parts_refcnt_mergetree.sh @@ -9,7 +9,7 @@ function check_refcnt_for_table() { local table=$1 && shift - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " system stop merges $table; -- cleanup thread may hold the parts lock system stop cleanup $table; @@ -66,14 +66,14 @@ function check_refcnt_for_table() # NOTE: index_granularity=1 to cancel ASAP -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " drop table if exists data_02340; create table data_02340 (key Int, part Int) engine=MergeTree() partition by part order by key settings index_granularity=1; " || exit 1 check_refcnt_for_table data_02340 $CLICKHOUSE_CLIENT -q "drop table data_02340 sync" -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " drop table if exists data_02340_rep sync; create table data_02340_rep (key Int, part Int) engine=ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') partition by part order by key settings index_granularity=1; " || exit 1 diff --git a/tests/queries/0_stateless/02344_describe_cache.sh b/tests/queries/0_stateless/02344_describe_cache.sh index 4cc4c415f3f..c5373b4d7e3 100755 --- a/tests/queries/0_stateless/02344_describe_cache.sh +++ b/tests/queries/0_stateless/02344_describe_cache.sh @@ -7,14 +7,14 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="02344_describe_cache_test" -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ diff --git a/tests/queries/0_stateless/02352_rwlock.sh b/tests/queries/0_stateless/02352_rwlock.sh index 7f02b3ee935..b4a77e0b08a 100755 --- a/tests/queries/0_stateless/02352_rwlock.sh +++ b/tests/queries/0_stateless/02352_rwlock.sh @@ -24,7 +24,7 @@ function wait_query_by_id_started() # wait for query to be started while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do if [ "$( - $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -m -q " + $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q " system flush logs; select count() from system.query_log @@ -52,7 +52,7 @@ $CLICKHOUSE_CLIENT -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_ordinary Engine=Or # debug build on CI, so if this will happen, then DROP query will be # finished instantly, and to avoid flakiness we will retry in this case while :; do - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}_ordinary.data_02352; CREATE TABLE ${CLICKHOUSE_DATABASE}_ordinary.data_02352 (key Int) Engine=Null(); " diff --git a/tests/queries/0_stateless/02380_insert_mv_race.sh b/tests/queries/0_stateless/02380_insert_mv_race.sh index a5f05365180..725c7eacce6 100755 --- a/tests/queries/0_stateless/02380_insert_mv_race.sh +++ b/tests/queries/0_stateless/02380_insert_mv_race.sh @@ -9,13 +9,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q "ATTACH TABLE mv" |& { +$CLICKHOUSE_CLIENT -nm -q "ATTACH TABLE mv" |& { # CANNOT_GET_CREATE_TABLE_QUERY -- ATTACH TABLE IF EXISTS # TABLE_ALREADY_EXISTS -- ATTACH TABLE IF NOT EXISTS grep -F -m1 Exception | grep -v -e CANNOT_GET_CREATE_TABLE_QUERY -e TABLE_ALREADY_EXISTS } -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS null; CREATE TABLE null (key Int) ENGINE = Null; DROP TABLE IF EXISTS mv; diff --git a/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh b/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh index 9e8b854375b..c692c37f27f 100755 --- a/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh +++ b/tests/queries/0_stateless/02417_opentelemetry_insert_on_distributed_table.sh @@ -27,7 +27,7 @@ function insert() function check_span() { -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -nq " SYSTEM FLUSH LOGS; SELECT operation_name, @@ -50,7 +50,7 @@ ${CLICKHOUSE_CLIENT} -q " # $2 - value of distributed_foreground_insert function check_span_kind() { -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -nq " SYSTEM FLUSH LOGS; SELECT count() @@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} -q " # # Prepare tables for tests # -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -nq " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.local_opentelemetry; @@ -122,7 +122,7 @@ check_span_kind $trace_id 'CLIENT' # # Cleanup # -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -nq " DROP TABLE ${CLICKHOUSE_DATABASE}.dist_opentelemetry; DROP TABLE ${CLICKHOUSE_DATABASE}.local_opentelemetry; " diff --git a/tests/queries/0_stateless/02419_keeper_map_primary_key.sh b/tests/queries/0_stateless/02419_keeper_map_primary_key.sh index 6a23f666f36..c43c5bb6408 100755 --- a/tests/queries/0_stateless/02419_keeper_map_primary_key.sh +++ b/tests/queries/0_stateless/02419_keeper_map_primary_key.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS 02419_test SYNC;" test_primary_key() { - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE 02419_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY($1); INSERT INTO 02419_test VALUES (1, 1.1), (2, 2.2); SELECT value FROM 02419_test WHERE key = 1; diff --git a/tests/queries/0_stateless/02443_detach_attach_partition.sh b/tests/queries/0_stateless/02443_detach_attach_partition.sh index 4a5377a952a..6a47b7d8d61 100755 --- a/tests/queries/0_stateless/02443_detach_attach_partition.sh +++ b/tests/queries/0_stateless/02443_detach_attach_partition.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/replication.lib -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; diff --git a/tests/queries/0_stateless/02473_optimize_old_parts.sh b/tests/queries/0_stateless/02473_optimize_old_parts.sh index 3a4e6145f12..b563bc31b39 100755 --- a/tests/queries/0_stateless/02473_optimize_old_parts.sh +++ b/tests/queries/0_stateless/02473_optimize_old_parts.sh @@ -21,7 +21,7 @@ wait_for_number_of_parts() { echo "$res" } -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_with_merge; @@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;" wait_for_number_of_parts 'test_without_merge' 1 10 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE test_without_merge; SELECT 'With merge any part range'; @@ -47,7 +47,7 @@ INSERT INTO test_with_merge SELECT 3;" wait_for_number_of_parts 'test_with_merge' 1 100 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE test_with_merge; SELECT 'With merge partition only'; @@ -60,7 +60,7 @@ INSERT INTO test_with_merge SELECT 3;" wait_for_number_of_parts 'test_with_merge' 1 100 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active; diff --git a/tests/queries/0_stateless/02477_s3_request_throttler.sh b/tests/queries/0_stateless/02477_s3_request_throttler.sh index 5e6185e7304..c74cb598d42 100755 --- a/tests/queries/0_stateless/02477_s3_request_throttler.sh +++ b/tests/queries/0_stateless/02477_s3_request_throttler.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " -- Limit S3 PUT request per second rate SET s3_max_put_rps = 2; SET s3_max_put_burst = 1; diff --git a/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh b/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh index 122684abe9c..b72c3eb56c7 100755 --- a/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh +++ b/tests/queries/0_stateless/02494_zero_copy_projection_cancel_fetch.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC; " @@ -60,7 +60,7 @@ wait $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat1 WHERE NOT ignore(*)" $CLICKHOUSE_CLIENT --query "SELECT count() FROM wikistat2 WHERE NOT ignore(*)" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS wikistat1 SYNC; DROP TABLE IF EXISTS wikistat2 SYNC; " diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh index 77c8049bd58..e389cf410e8 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh @@ -11,7 +11,7 @@ cp $CURDIR/data_ua_parser/os.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/browser.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ cp $CURDIR/data_ua_parser/device.yaml ${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}/ -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" drop dictionary if exists regexp_os; drop dictionary if exists regexp_browser; drop dictionary if exists regexp_device; @@ -61,10 +61,10 @@ create table user_agents Engine = Log(); " -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" select ua, device, concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser , concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os @@ -74,7 +74,7 @@ from ( dictGet('regexp_device', 'device_replacement', ua) device from user_agents) order by ua; " -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" drop dictionary if exists regexp_os; drop dictionary if exists regexp_browser; drop dictionary if exists regexp_device; diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh index dec03bcaaa0..68a87a14320 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh @@ -27,7 +27,7 @@ cat > "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < "$yaml" < /dev/null & diff --git a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh index 9f743581e1a..3b0d2309784 100755 --- a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh +++ b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function get_query_id() { random_str 10; } -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists buf; drop table if exists dist; drop table if exists data; @@ -31,7 +31,7 @@ query_id="$(get_query_id)" # test, since we care about the difference between NOW() and there should # not be any significant difference. $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" -$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; " @@ -42,25 +42,25 @@ query_id="$(get_query_id)" # this query (and all subsequent) should reuse the previous connection (at least most of the time) $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" -$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; " echo "INSERT" query_id="$(get_query_id)" -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -m -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q " insert into dist_dist values (1),(2); select * from data; " sleep 1 -$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist_dist" +$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist" sleep 1 -$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q "system flush distributed dist" +$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist" echo "CHECK" -$CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " +$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " select * from data order by key; system flush logs; select count(), count(distinct initial_query_start_time_microseconds) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index 5015521ca4d..ff534a6a2e6 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh echo "INSERT TO S3" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; " 2>&1 | $CLICKHOUSE_LOCAL -q " WITH '(\\w+): (\\d+)' AS pattern, @@ -30,7 +30,7 @@ SELECT * FROM ( " echo "CHECK WITH query_log" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " SYSTEM FLUSH LOGS; SELECT type, 'S3CreateMultipartUpload', ProfileEvents['S3CreateMultipartUpload'], @@ -45,7 +45,7 @@ ORDER BY query_start_time DESC; " echo "CREATE" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " DROP TABLE IF EXISTS times; CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t SETTINGS @@ -56,29 +56,29 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t " echo "INSERT" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "READ" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "INSERT and READ INSERT" -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q " +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; " 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "DROP" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " DROP TABLE times; " echo "CHECK with query_log" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " SYSTEM FLUSH LOGS; SELECT type, query, diff --git a/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh b/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh index 84be5959344..c1f28f9f079 100755 --- a/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh +++ b/tests/queries/0_stateless/02676_optimize_old_parts_replicated.sh @@ -21,7 +21,7 @@ wait_for_number_of_parts() { echo "$res" } -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE IF EXISTS test_without_merge; DROP TABLE IF EXISTS test_replicated; @@ -34,7 +34,7 @@ INSERT INTO test_without_merge SELECT 3;" wait_for_number_of_parts 'test_without_merge' 1 10 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE test_without_merge; SELECT 'With merge replicated any part range'; @@ -47,7 +47,7 @@ INSERT INTO test_replicated SELECT 3;" wait_for_number_of_parts 'test_replicated' 1 100 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " DROP TABLE test_replicated; SELECT 'With merge replicated partition only'; @@ -60,7 +60,7 @@ INSERT INTO test_replicated SELECT 3;" wait_for_number_of_parts 'test_replicated' 1 100 -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " SELECT sleepEachRow(1) FROM numbers(9) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null; -- Sleep for 9 seconds and verify that we keep the old part because it's the only one SELECT (now() - modification_time) > 5 FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active; diff --git a/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh b/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh index a540a0981b3..2a94f940327 100755 --- a/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh +++ b/tests/queries/0_stateless/02686_postgres_protocol_decimal_256.sh @@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo " DROP USER IF EXISTS postgresql_user; CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; -" | $CLICKHOUSE_CLIENT +" | $CLICKHOUSE_CLIENT -n psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT 1.23::Decimal256(70) AS test;" diff --git a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh index dcd83f9fec3..cfb38c60615 100755 --- a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh +++ b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh @@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # too slow with this. # # Unfortunately, the test has to buffer it in memory. -$CLICKHOUSE_CLIENT --max_memory_usage 16G -m -q " +$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SETTINGS s3_max_single_part_upload_size = '5Gi'; diff --git a/tests/queries/0_stateless/02725_start_stop_fetches.sh b/tests/queries/0_stateless/02725_start_stop_fetches.sh index 604d0774b83..c9922455d94 100755 --- a/tests/queries/0_stateless/02725_start_stop_fetches.sh +++ b/tests/queries/0_stateless/02725_start_stop_fetches.sh @@ -10,7 +10,7 @@ set -e NUM_REPLICAS=5 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1; " diff --git a/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh b/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh index 6e40495157b..78659b70129 100755 --- a/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh +++ b/tests/queries/0_stateless/02731_zero_objects_in_metadata.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for DISK in s3_disk s3_cache do - ${CLICKHOUSE_CLIENT} --query " + ${CLICKHOUSE_CLIENT} -n --query " DROP TABLE IF EXISTS test; CREATE TABLE test (id Int32, empty Array(Int32)) ENGINE=MergeTree ORDER BY id @@ -17,13 +17,13 @@ do SELECT * FROM test; " - ${CLICKHOUSE_CLIENT} --query " + ${CLICKHOUSE_CLIENT} -n --query " BACKUP TABLE test TO Disk('backups', 'test_s3_backup'); DROP TABLE test; RESTORE TABLE test FROM Disk('backups', 'test_s3_backup'); " &>/dev/null - ${CLICKHOUSE_CLIENT} --query " + ${CLICKHOUSE_CLIENT} -n --query " SELECT * FROM test; SELECT empty FROM test; " diff --git a/tests/queries/0_stateless/02766_prql.sh b/tests/queries/0_stateless/02766_prql.sh index d1e4dca070c..85b1167027c 100755 --- a/tests/queries/0_stateless/02766_prql.sh +++ b/tests/queries/0_stateless/02766_prql.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -n -q " CREATE TEMPORARY TABLE IF NOT EXISTS aboba ( user_id UInt32, diff --git a/tests/queries/0_stateless/02770_async_buffer_ignore.sh b/tests/queries/0_stateless/02770_async_buffer_ignore.sh index af9f18b232f..37f002767d6 100755 --- a/tests/queries/0_stateless/02770_async_buffer_ignore.sh +++ b/tests/queries/0_stateless/02770_async_buffer_ignore.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " DROP TABLE IF EXISTS test_s3; CREATE TABLE test_s3 (a UInt64, b UInt64) @@ -17,7 +17,7 @@ INSERT INTO test_s3 SELECT number, number FROM numbers(1000000); query="SELECT sum(b) FROM test_s3 WHERE a >= 100000 AND a <= 102000" query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " SELECT ProfileEvents['S3ReadRequestsCount'], ProfileEvents['ReadBufferFromS3Bytes'], diff --git a/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh b/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh index 53d2832c589..c69c635f6ed 100755 --- a/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh +++ b/tests/queries/0_stateless/02789_filesystem_cache_alignment.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() @@ -22,7 +22,7 @@ INSERT INTO test SELECT number, randomString(100) FROM numbers(1000000); " QUERY_ID=$RANDOM -$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -m -q " +$CLICKHOUSE_CLIENT --query_id "$QUERY_ID" -nm -q " SET enable_filesystem_cache_log = 1; SYSTEM DROP FILESYSTEM CACHE; SELECT * FROM test WHERE NOT ignore() LIMIT 1 FORMAT Null; @@ -49,14 +49,14 @@ WHERE query_id = '$QUERY_ID' " # File segments cannot be less that 20Mi, # except for last file segment in a file or if file size is less. -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size AND file_segment_size < 20 * 1024 * 1024; " -all=$($CLICKHOUSE_CLIENT -m -q " +all=$($CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size; ") @@ -68,7 +68,7 @@ else echo "FAIL" fi -count=$($CLICKHOUSE_CLIENT -m -q " +count=$($CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query) WHERE file_segment_size < file_size AND end_offset + 1 != file_size @@ -87,21 +87,21 @@ FROM (SELECT * FROM ($query)) AS cache_log INNER JOIN system.filesystem_cache AS cache ON cache_log.cache_path = cache.cache_path " -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size AND downloaded_size < 20 * 1024 * 1024; " -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size AND formatReadableSize(downloaded_size) not in ('20.00 MiB', '40.00 MiB'); " -all=$($CLICKHOUSE_CLIENT -m -q " +all=$($CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query2) WHERE file_segment_size < file_size AND file_segment_range_end + 1 != file_size; ") @@ -112,7 +112,7 @@ else echo "FAIL" fi -count2=$($CLICKHOUSE_CLIENT -m -q " +count2=$($CLICKHOUSE_CLIENT -nm -q " SELECT count() FROM ($query2) WHERE file_segment_range_begin - file_segment_range_end + 1 < file_size AND file_segment_range_end + 1 != file_size diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh index 239485ab8dd..5a37d51233d 100755 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " DROP TABLE IF EXISTS test_s3; CREATE TABLE test_s3 (a UInt64, b UInt64) @@ -25,7 +25,7 @@ do query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" - RES=$(${CLICKHOUSE_CLIENT} -m --query " + RES=$(${CLICKHOUSE_CLIENT} -nm --query " SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 FROM system.query_log WHERE type = 'QueryFinish' @@ -41,7 +41,7 @@ done while true do - query_id=$(${CLICKHOUSE_CLIENT} -q " + query_id=$(${CLICKHOUSE_CLIENT} -nq " create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; set insert_keeper_fault_injection_probability=0; insert into mut values (1, 2, 3), (10, 20, 30); @@ -60,7 +60,7 @@ do ) limit 1 settings max_threads=1; " 2>&1) ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" - RES=$(${CLICKHOUSE_CLIENT} -m --query " + RES=$(${CLICKHOUSE_CLIENT} -nm --query " SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 FROM system.query_log WHERE type = 'QueryFinish' diff --git a/tests/queries/0_stateless/02801_backup_native_copy.sh b/tests/queries/0_stateless/02801_backup_native_copy.sh index bc485f903bc..b8ee97a7c7d 100755 --- a/tests/queries/0_stateless/02801_backup_native_copy.sh +++ b/tests/queries/0_stateless/02801_backup_native_copy.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; insert into data select * from numbers(10); @@ -16,28 +16,28 @@ $CLICKHOUSE_CLIENT -m -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "BACKUP TABLE data TO S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_native_copy') SETTINGS allow_s3_native_copy=true" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --format Null --query_id $query_id -q "RESTORE TABLE data AS data_no_native_copy FROM S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data_no_native_copy') SETTINGS allow_s3_native_copy=false" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT query, ProfileEvents['S3CopyObject']>0 FROM system.query_log WHERE type = 'QueryFinish' AND event_date >= yesterday() AND current_database = '$CLICKHOUSE_DATABASE' AND query_id = '$query_id' " diff --git a/tests/queries/0_stateless/02803_backup_tmp_files.sh b/tests/queries/0_stateless/02803_backup_tmp_files.sh index 2208d3d32ef..d86beae4923 100755 --- a/tests/queries/0_stateless/02803_backup_tmp_files.sh +++ b/tests/queries/0_stateless/02803_backup_tmp_files.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk'; insert into data select * from numbers(10); diff --git a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh index 4d7ab4063be..63fa60bd548 100755 --- a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh +++ b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() @@ -17,17 +17,17 @@ SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path = disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.disks WHERE name = '$disk_name' """ diff --git a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh index a7dfa035a22..8a4a2e906b0 100755 --- a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh +++ b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() @@ -22,29 +22,29 @@ query_id=$RANDOM $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY kek; """ 2>&1 | grep -q "Invalid cache key hex: kek" && echo "OK" || echo "FAIL" ${CLICKHOUSE_CLIENT} -q " system flush logs" -key=$($CLICKHOUSE_CLIENT -m --query """ +key=$($CLICKHOUSE_CLIENT -nm --query """ SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -offset=$($CLICKHOUSE_CLIENT -m --query """ +offset=$($CLICKHOUSE_CLIENT -nm --query """ SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset; """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; """ @@ -54,18 +54,18 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Nul ${CLICKHOUSE_CLIENT} -q " system flush logs" -key=$($CLICKHOUSE_CLIENT -m --query """ +key=$($CLICKHOUSE_CLIENT -nm --query """ SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; """) -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key'; """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key """ -$CLICKHOUSE_CLIENT -m --query """ +$CLICKHOUSE_CLIENT -nm --query """ SELECT count() FROM system.filesystem_cache WHERE key = '$key'; """ diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index ecd73bcc8a0..a2b1a953e24 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data_1; DROP TABLE IF EXISTS data_2; diff --git a/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh index e6a1d547945..16ac095312c 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_s3_credentials_for_base_backup.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by tuple(); insert into data select * from numbers(10); diff --git a/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh b/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh index 830e78e1e9f..4650415c202 100755 --- a/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh +++ b/tests/queries/0_stateless/02844_max_backup_bandwidth_s3.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, disk='s3_disk'; -- reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds @@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -m -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup2') SETTINGS allow_s3_native_copy=1" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT 'native_copy', @@ -26,7 +26,7 @@ $CLICKHOUSE_CLIENT -m -q " query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to S3(s3_conn, 'backups/$CLICKHOUSE_DATABASE/data/backup3') SETTINGS allow_s3_native_copy=0" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SYSTEM FLUSH LOGS; SELECT 'no_native_copy', diff --git a/tests/queries/0_stateless/02867_storage_set_tsan.sh b/tests/queries/0_stateless/02867_storage_set_tsan.sh index f6b684c0a55..81ae5f0bda8 100755 --- a/tests/queries/0_stateless/02867_storage_set_tsan.sh +++ b/tests/queries/0_stateless/02867_storage_set_tsan.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q """ +$CLICKHOUSE_CLIENT -mn -q """ DROP TABLE IF EXISTS t1_02867; CREATE TABLE t1_02867 (x UInt64) ENGINE=Set(); """ @@ -39,4 +39,4 @@ repeat_truncate_insert & sleep 10 -$CLICKHOUSE_CLIENT -m -q "DROP TABLE IF EXISTS t1_02867;" +$CLICKHOUSE_CLIENT -mn -q "DROP TABLE IF EXISTS t1_02867;" diff --git a/tests/queries/0_stateless/02900_union_schema_inference_mode.sh b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh index a091645d1e3..a0fdb5276e0 100755 --- a/tests/queries/0_stateless/02900_union_schema_inference_mode.sh +++ b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh @@ -10,14 +10,14 @@ echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNI echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl -$CLICKHOUSE_LOCAL -m -q " +$CLICKHOUSE_LOCAL -nm -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; " -$CLICKHOUSE_LOCAL -m -q " +$CLICKHOUSE_LOCAL -nm -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); @@ -25,14 +25,14 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd .. -$CLICKHOUSE_LOCAL -m -q " +$CLICKHOUSE_LOCAL -nm -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; " -$CLICKHOUSE_LOCAL -m -q " +$CLICKHOUSE_LOCAL -nm -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); @@ -41,7 +41,7 @@ desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl $CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "CANNOT_EXTRACT_TABLE_STRUCTURE" -$CLICKHOUSE_LOCAL -m -q " +$CLICKHOUSE_LOCAL -nm -q " set schema_inference_mode = 'union'; desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl'); desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl'); diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index 91eeb22c19e..81ba59fc591 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -67,7 +67,7 @@ curl "$CLICKHOUSE_URL" --silent --fail --show-error --data "SELECT sum(is_leader wait; -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " SYSTEM FLUSH LOGS; -- Check that number of ZK request is less then a half of (total replicas * concurrency) diff --git a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh index 2eccade5c81..d1cbc54d294 100755 --- a/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh +++ b/tests/queries/0_stateless/02922_deduplication_with_zero_copy.sh @@ -8,7 +8,7 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists r1; drop table if exists r2; @@ -64,7 +64,7 @@ function insert_duplicates() { wait - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " system sync replica r1; system sync replica r2; " @@ -84,7 +84,7 @@ function loop() do while ! insert_duplicates do - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " truncate table r1; truncate table r2; system sync replica r1; @@ -137,8 +137,8 @@ function list_keeper_nodes() { list_keeper_nodes "${table_shared_id}" -$CLICKHOUSE_CLIENT -m -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & -$CLICKHOUSE_CLIENT -m -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & +$CLICKHOUSE_CLIENT -nm -q "drop table r1;" --allow_repeated_settings --send_logs_level="error" & +$CLICKHOUSE_CLIENT -nm -q "drop table r2;" --allow_repeated_settings --send_logs_level="error" & wait list_keeper_nodes "${table_shared_id}" diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh index 057f76e63d0..2b92a113e91 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh @@ -10,11 +10,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" -$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" # Basic refreshing. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view a refresh after 2 second engine Memory @@ -23,41 +23,41 @@ $CLICKHOUSE_CLIENT -q " select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; show create a;" # Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] do sleep 0.5 done -start_time="`$CLICKHOUSE_CLIENT -q "select reinterpret(now64(), 'Int64')"`" +start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" # Check table contents. -$CLICKHOUSE_CLIENT -q "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" +$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" # Wait for table contents to change. -res1="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values'`" +res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" while : do - res2="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`" + res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" [ "$res2" == "$res1" ] || break sleep 0.5 done # Wait for another change. while : do - res3="`$CLICKHOUSE_CLIENT -q 'select * from a order by x format Values -- $LINENO'`" + res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" [ "$res3" == "$res2" ] || break sleep 0.5 done # Check that the two changes were at least 1 second apart, in particular that we're not refreshing # like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer # to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" # Create a source table from which views will read. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create table src (x Int8) engine Memory as select 1;" # Switch to fake clock, change refresh schedule, change query. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " system test view a set fake time '2050-01-01 00:00:01'; system wait view a; system refresh view a; @@ -68,19 +68,19 @@ $CLICKHOUSE_CLIENT -q " select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; show create a;" # Advance time to trigger the refresh. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<5: no refresh>', count() from a; system test view a set fake time '2052-02-03 04:05:06';" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<6: refreshed>', * from a; select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" # Create a dependent view, refresh it once. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; show create b; system test view b set fake time '2052-11-11 11:11:11'; @@ -88,89 +88,89 @@ $CLICKHOUSE_CLIENT -q " system wait view b; select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" # Next refresh shouldn't start until the dependency refreshes. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<8: refreshed>', * from b; select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; system test view b set fake time '2054-01-24 23:22:21';" -while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] do sleep 0.5 done # Drop the source table, check that refresh fails and doesn't leave a temp table behind. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); drop table src; system refresh view a;" -$CLICKHOUSE_CLIENT -q "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" # Create the source table again, check that refresh succeeds (in particular that tables are looked # up by name rather than uuid). -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; create table src (x Int16) engine Memory as select 2; system test view a set fake time '2054-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] do sleep 0.5 done # Both tables should've refreshed. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<11: chain-refreshed a>', * from a; select '<12: chain-refreshed b>', * from b; select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" # Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to # catch up to the same cycle. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " system test view b set fake time '2059-01-01 00:00:00'; system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " system test view b set fake time '2061-01-01 00:00:00'; system test view a set fake time '2057-01-01 00:00:00';" -while [ "`$CLICKHOUSE_CLIENT -q "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; truncate src; insert into src values (3); system test view a set fake time '2060-02-02 02:02:02';" -while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<15: chain-refreshed a>', * from a; select '<16: chain-refreshed b>', * from b; select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" # Get to WaitingForDependencies state and remove the depencency. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " system test view b set fake time '2062-03-03 03:03:03'" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " alter table b modify refresh every 2 year" -while [ "`$CLICKHOUSE_CLIENT -q "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; show create b;" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " drop table src; drop table a; drop table b; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh index 2d00d61f253..50a905576d5 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -12,29 +12,29 @@ CLICKHOUSE_LOG_COMMENT= CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" -$CLICKHOUSE_CLIENT -q "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" # Select from a table that doesn't exist, get an exception. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create table src (x Int8) engine Memory as select 1; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; drop table src;" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] do sleep 0.5 done # Check exception, create src, expect successful refresh. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; create table src (x Int64) engine Memory as select 1; system refresh view c;" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done # Rename table. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<20: unexception>', * from c; rename table c to d; select '<21: rename>', * from d; @@ -42,130 +42,130 @@ $CLICKHOUSE_CLIENT -q " # Do various things during a refresh. # First make a nonempty view. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " drop table d; truncate src; insert into src values (1); create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done # Stop refreshes. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<23: simple refresh>', * from e; system stop view e;" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] do sleep 0.5 done # Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure # we wait for a slow refresh, not a previous fast one.) -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " insert into src select * from numbers(1000) settings max_block_size=1; system start view e;" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] do sleep 0.5 done # Rename. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " rename table e to f; select '<24: rename during refresh>', * from f; select '<25: rename during refresh>', view, status from refreshes where view = 'f'; alter table f modify refresh after 10 year;" # Cancel. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " system cancel view f;" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] do sleep 0.5 done # Check that another refresh doesn't immediately start after the cancelled one. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; system refresh view f;" -while [ "`$CLICKHOUSE_CLIENT -q "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] do sleep 0.5 done # Drop. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " drop table f; select '<28: drop during refresh>', view, status from refreshes; select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" # Try OFFSET and RANDOMIZE FOR. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; show create g; system test view g set fake time '2050-02-03 15:30:13';" -while [ "`$CLICKHOUSE_CLIENT -q "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " with '2050-02-10 04:00:00'::DateTime as expected select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" # Send data 'TO' an existing table. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " drop table g; create table dest (x Int64) engine MergeTree order by x; truncate src; insert into src values (1); create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; show create h;" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<30: to existing table>', * from dest; insert into src values (2);" -while [ "`$CLICKHOUSE_CLIENT -q "select count() from dest -- $LINENO" | xargs`" != '2' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<31: to existing table>', * from dest; drop table dest; drop table h;" # Retries. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" -$CLICKHOUSE_CLIENT -q "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; create table src2 (x Int8) engine Memory; insert into src2 values (1); exchange tables src and src2; drop table src2;" -while [ "`$CLICKHOUSE_CLIENT -q "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<31.6: did retry>', x from h2; drop table h2" # EMPTY -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" -while [ "`$CLICKHOUSE_CLIENT -q "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] do sleep 0.5 done -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; drop table i; drop table j;" # APPEND -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; select '<33: append>', * from k; system refresh view k; @@ -177,7 +177,7 @@ $CLICKHOUSE_CLIENT -q " system wait view k; select '<35: append>', * from k order by x;" # ALTER to non-APPEND -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " alter table k modify refresh every 10 year; system wait view k; system refresh view k; @@ -187,7 +187,7 @@ $CLICKHOUSE_CLIENT -q " truncate table src;" # APPEND + TO + regular materialized view reading from it. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create table mid (x Int64) engine MergeTree order by x; create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; create materialized view m (x Int64) engine Memory as select x*10 as x from mid; @@ -204,19 +204,19 @@ $CLICKHOUSE_CLIENT -q " drop table mid;" # Failing to create inner table. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); drop table n;" # Reading from table that doesn't exist yet. -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; drop table o;" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -nq " drop table refreshes;" diff --git a/tests/queries/0_stateless/02941_variant_type_1.sh b/tests/queries/0_stateless/02941_variant_type_1.sh index ef1d1deff4e..c12bece6d54 100755 --- a/tests/queries/0_stateless/02941_variant_type_1.sh +++ b/tests/queries/0_stateless/02941_variant_type_1.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test1_insert() { echo "test1 insert" - $CH_CLIENT -mq "insert into test select number, NULL from numbers(3); + $CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); insert into test select number + 3, number from numbers(3); insert into test select number + 6, ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 9, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(3); @@ -21,7 +21,7 @@ insert into test select number + 15, range(number + 1)::Array(UInt64) from numbe function test1_select() { echo "test1 select" - $CH_CLIENT -mq "select v from test order by id; + $CH_CLIENT -nmq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; @@ -36,7 +36,7 @@ select v.\`Array(UInt64)\`.size0 from test order by id;" function test2_insert() { echo "test2 insert" - $CH_CLIENT -mq "insert into test select number, NULL from numbers(3); + $CH_CLIENT -nmq "insert into test select number, NULL from numbers(3); insert into test select number + 3, number % 2 ? NULL : number from numbers(3); insert into test select number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(3); insert into test select number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') from numbers(3); @@ -47,7 +47,7 @@ insert into test select number + 15, number % 2 ? CAST(NULL, 'Variant(String, UI function test2_select() { echo "test2 select" - $CH_CLIENT -mq "select v from test order by id; + $CH_CLIENT -nmq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; @@ -68,7 +68,7 @@ function test3_insert() function test3_select() { echo "test3 select" - $CH_CLIENT -mq "select v from test order by id; + $CH_CLIENT -nmq "select v from test order by id; select v.String from test order by id; select v.UInt64 from test order by id; select v.\`LowCardinality(String)\` from test order by id; diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index 7017edac525..e93dfac8510 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test4_insert() { echo "test4 insert" - $CH_CLIENT -mq "insert into test select number, NULL from numbers(100000); + $CH_CLIENT -nmq "insert into test select number, NULL from numbers(100000); insert into test select number + 100000, number from numbers(100000); insert into test select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000); insert into test select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); @@ -21,7 +21,7 @@ insert into test select number + 500000, range(number % 20 + 1)::Array(UInt64) f function test4_select { echo "test4 select" - $CH_CLIENT -mq "select v from test format Null; + $CH_CLIENT -nmq "select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; select count() from test where isNotNull(v.String); diff --git a/tests/queries/0_stateless/02941_variant_type_3.sh b/tests/queries/0_stateless/02941_variant_type_3.sh index 5d923a47e9b..cc0fde5b689 100755 --- a/tests/queries/0_stateless/02941_variant_type_3.sh +++ b/tests/queries/0_stateless/02941_variant_type_3.sh @@ -10,7 +10,7 @@ CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspic function test5_insert() { echo "test5 insert" - $CH_CLIENT -mq " + $CH_CLIENT -nmq " insert into test select number, NULL from numbers(200000); insert into test select number + 200000, number % 2 ? NULL : number from numbers(200000); insert into test select number + 400000, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) from numbers(200000); @@ -22,7 +22,7 @@ insert into test select number + 1000000, number % 2 ? CAST(NULL, 'Variant(Strin function test5_select() { echo "test5 select" - $CH_CLIENT -mq " + $CH_CLIENT -nmq " select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; diff --git a/tests/queries/0_stateless/02941_variant_type_4.sh b/tests/queries/0_stateless/02941_variant_type_4.sh index 88f6a475d46..93a1770d05e 100755 --- a/tests/queries/0_stateless/02941_variant_type_4.sh +++ b/tests/queries/0_stateless/02941_variant_type_4.sh @@ -17,7 +17,7 @@ function test6_insert() function test6_select() { echo "test6 select" - $CH_CLIENT -mq "select v from test format Null; + $CH_CLIENT -nmq "select v from test format Null; select count() from test where isNotNull(v); select v.String from test format Null; select count() from test where isNotNull(v.String); diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh index f9e84dc2b50..cb099bb59ae 100755 --- a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh @@ -10,7 +10,7 @@ disk_name="s3_cache_02944" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; INSERT INTO test SELECT randomString(100); @@ -33,7 +33,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -47,7 +47,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -63,7 +63,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" @@ -77,7 +77,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" $CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" diff --git a/tests/queries/0_stateless/02960_polygon_bound_bug.sh b/tests/queries/0_stateless/02960_polygon_bound_bug.sh index 1e9be36da55..0c3db01a77c 100755 --- a/tests/queries/0_stateless/02960_polygon_bound_bug.sh +++ b/tests/queries/0_stateless/02960_polygon_bound_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL -m -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; +$CLICKHOUSE_LOCAL -nm -q "CREATE TABLE test_table (geom MultiPolygon) engine=MergeTree ORDER BY geom; INSERT INTO test_table SELECT * FROM file('$CURDIR/data_parquet/02960_polygon_bound_bug.parquet', Parquet); CREATE DICTIONARY test_dict (geom MultiPolygon) PRIMARY KEY geom SOURCE (CLICKHOUSE(TABLE 'test_table')) LIFETIME(MIN 0 MAX 0) LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)); SELECT dictHas(test_dict,(174.84729269276494,-36.99524960275426));" diff --git a/tests/queries/0_stateless/02961_storage_config_volume_priority.sh b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh index 1bd86ee0c75..145b921a750 100755 --- a/tests/queries/0_stateless/02961_storage_config_volume_priority.sh +++ b/tests/queries/0_stateless/02961_storage_config_volume_priority.sh @@ -24,7 +24,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='error'; SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must be unique across the policy' @@ -40,7 +40,7 @@ cat $config_path \ > $config_path_tmp mv $config_path_tmp $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='error'; SYSTEM RELOAD CONFIG" 2>&1 | grep -c 'volume_priority values must cover the range from 1 to N (lowest priority specified) without gaps' diff --git a/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh b/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh index 3b57941452d..24fe964b824 100755 --- a/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh +++ b/tests/queries/0_stateless/02963_remote_read_small_buffer_size_bug.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="02963_remote_read_bug" -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, s String) @@ -22,7 +22,7 @@ OPTIMIZE TABLE test FINAL; query_id=$(random_str 10) -$CLICKHOUSE_CLIENT -m --query_id "$query_id" --query " +$CLICKHOUSE_CLIENT -nm --query_id "$query_id" --query " WITH RANDOM_SET AS ( SELECT rand32() % 10000 FROM numbers(100) ) @@ -37,7 +37,7 @@ SETTINGS merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem = 1, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 1; " -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " SYSTEM FLUSH LOGS; -- This threshold was determined experimentally - before the fix this ratio had values around 50K diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index b4271c3d29b..1089eb4051f 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh index ed13bf3321b..7a0e2d9bfdb 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh index 9abe870f4e4..c4b44ce11c5 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists num_1; drop table if exists num_2; diff --git a/tests/queries/0_stateless/02969_auto_format_detection.sh b/tests/queries/0_stateless/02969_auto_format_detection.sh index ab21dbb3df8..88d6575e499 100755 --- a/tests/queries/0_stateless/02969_auto_format_detection.sh +++ b/tests/queries/0_stateless/02969_auto_format_detection.sh @@ -24,12 +24,12 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array( $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE', auto, 'a UInt64, b String, c Array(UInt64), d Tuple(a UInt64, b String)')" -$CLICKHOUSE_LOCAL -mq " +$CLICKHOUSE_LOCAL -nmq " desc file('$DATA_FILE'); desc file('$DATA_FILE'); " -$CLICKHOUSE_LOCAL -mq " +$CLICKHOUSE_LOCAL -nmq " desc file('$DATA_FILE', JSONEachRow); desc file('$DATA_FILE'); " @@ -39,7 +39,7 @@ $CLICKHOUSE_LOCAL -q "select * from generateRandom('a UInt64, b String, c Array( $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}')" $CLICKHOUSE_LOCAL -q "desc file('$DATA_FILE.{1,2}') settings schema_inference_mode='union'" 2>&1 | grep -c "CANNOT_DETECT_FORMAT" -$CLICKHOUSE_LOCAL -mq " +$CLICKHOUSE_LOCAL -nmq " desc file('$DATA_FILE.2'); desc file('$DATA_FILE.{1,2}'); " diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh index a46f0018ad3..d543f7195a9 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_DATABASE="$new_database" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data; create table data (key Int) engine=MergeTree() order by key; insert into data values (1); @@ -29,7 +29,7 @@ $CLICKHOUSE_CLIENT -m -q " # suppress output $CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table data; attach table data (key Int) engine=MergeTree() order by key settings diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh index 522fdd6096a..eec05c81344 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh @@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -q "create database $n CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT/--database=$CLICKHOUSE_DATABASE/--database=$new_database} CLICKHOUSE_DATABASE="$new_database" -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data_read; drop table if exists data_write; @@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -m -q " # suppress output $CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table data_read; attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key settings @@ -57,7 +57,7 @@ echo "Files before DETACH TABLE" # sed to match any part, since in case of fault injection part name may not be all_0_0_0 but all_1_1_0 clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive $path" | tail -n+2 | sed 's/all_[^_]*_[^_]*_0/all_X_X_X/g' -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " detach table data_read; detach table data_write; " diff --git a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh index fca8b7a5e97..ef4866cb69e 100755 --- a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh index 08d5ae3a6b2..dd4b1f7cec0 100755 --- a/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_insert_several_blocks_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh index 678b1db4d68..33386c76edb 100755 --- a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh index 606fda6d4fd..b66ef83abf2 100755 --- a/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_mv_generates_several_blocks_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh index b571631dc8f..f9e1838f491 100755 --- a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_nonreplicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh index e4358011e50..698e70d4064 100755 --- a/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh +++ b/tests/queries/0_stateless/03008_deduplication_several_mv_into_one_table_replicated.sh @@ -36,7 +36,7 @@ for insert_method in "InsertSelect" "InsertValues"; do fi echo "$THIS_RUN" - $CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " + $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $ENGINE \ diff --git a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh index c13674b9bd1..8eea7940774 100755 --- a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh +++ b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query "drop table if exists test_s3_mt" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " create table test_s3_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) settings disk = disk( name = 03008_s3_plain_rewritable, @@ -19,7 +19,7 @@ settings disk = disk( secret_access_key = clickhouse); " -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " insert into test_s3_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5); insert into test_s3_mt (*) select number, number, number from numbers_mt(10000); select count(*) from test_s3_mt; @@ -31,13 +31,13 @@ ${CLICKHOUSE_CLIENT} --query "optimize table test_s3_mt final" ${CLICKHOUSE_CLIENT} -m --query " alter table test_s3_mt add projection test_s3_mt_projection (select * order by b)" 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " alter table test_s3_mt update c = 0 where a % 2 = 1; alter table test_s3_mt add column d Int64 after c; alter table test_s3_mt drop column c; " 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " detach table test_s3_mt; attach table test_s3_mt; " diff --git a/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh b/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh index 278ad6f0654..cba5317fcfa 100755 --- a/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh +++ b/tests/queries/0_stateless/03032_dynamically_resize_filesystem_cache_2.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) disk_name="s3_cache" -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; INSERT INTO test SELECT randomString(1000); @@ -26,7 +26,7 @@ sed -i "s|$prev_max_size<\/max_size>|$new_max_size<\/max_siz # echo $prev_max_size # echo $new_max_size -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" @@ -36,7 +36,7 @@ $CLICKHOUSE_CLIENT --query "SELECT current_size <= max_size FROM system.filesyst sed -i "s|$new_max_size<\/max_size>|$prev_max_size<\/max_size>|" $config_path -$CLICKHOUSE_CLIENT -m --query " +$CLICKHOUSE_CLIENT -nm --query " set send_logs_level='fatal'; SYSTEM RELOAD CONFIG" diff --git a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh index 8183b636549..9ea86105a3a 100755 --- a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" - $CH_CLIENT -m -q "system start merges test; optimize table test final" + $CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from (select sumMerge(sum) as sum from test group by id, _part) group by sum order by sum, count()" $CH_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh index ddafa9db210..9a2a6dd957c 100755 --- a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh @@ -16,7 +16,7 @@ function test() $CH_CLIENT -q "insert into test select number, -1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -m -q "system start merges test; optimize table test final" + $CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh index b1046ca08ba..0199035a3df 100755 --- a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -m -q "system start merges test; optimize table test final" + $CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh index f3693a2e7b1..e2ea5bc3466 100755 --- a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" - $CH_CLIENT -m -q "system start merges test; optimize table test final" + $CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select count(), sum from test group by sum order by sum, count()" $CH_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh index bba92a2ee06..43607cf95a8 100755 --- a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh @@ -17,7 +17,7 @@ function test() $CH_CLIENT -q "insert into test select number, -1, number >= 75000 ? 2 : 1, 'str_' || toString(number) from numbers(50000, 100000)" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -m -q "system start merges test; optimize table test final" + $CH_CLIENT -nm -q "system start merges test; optimize table test final" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "drop table test" } diff --git a/tests/queries/0_stateless/03151_unload_index_race.sh b/tests/queries/0_stateless/03151_unload_index_race.sh index 4cdf06abae2..7e9dfa7cddc 100755 --- a/tests/queries/0_stateless/03151_unload_index_race.sh +++ b/tests/queries/0_stateless/03151_unload_index_race.sh @@ -42,8 +42,8 @@ function thread_alter_settings() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - $CLICKHOUSE_CLIENT --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM" - $CLICKHOUSE_CLIENT --query "SYSTEM UNLOAD PRIMARY KEY t" + $CLICKHOUSE_CLIENT -n --query "ALTER TABLE t MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns=0.$RANDOM" + $CLICKHOUSE_CLIENT -n --query "SYSTEM UNLOAD PRIMARY KEY t" sleep 0.0$RANDOM done } @@ -52,7 +52,7 @@ function thread_query_table() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - COUNT=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM t where not ignore(*);") + COUNT=$($CLICKHOUSE_CLIENT -n --query "SELECT count() FROM t where not ignore(*);") if [ "$COUNT" -ne "2000" ]; then echo "$COUNT" fi diff --git a/tests/queries/0_stateless/03172_error_log_table_not_empty.sh b/tests/queries/0_stateless/03172_error_log_table_not_empty.sh index d1afafa77a5..22a2fd82c64 100755 --- a/tests/queries/0_stateless/03172_error_log_table_not_empty.sh +++ b/tests/queries/0_stateless/03172_error_log_table_not_empty.sh @@ -15,7 +15,7 @@ errors_222=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHER errors_333=$($CLICKHOUSE_CLIENT -q "SELECT sum(value) FROM system.error_log WHERE code = 333") # Throw three random errors: 111, 222 and 333 and wait for more than collect_interval_milliseconds to ensure system.error_log is flushed -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -mn -q " SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } @@ -24,14 +24,14 @@ SYSTEM FLUSH LOGS; " # Check that the three random errors are propagated -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -mn -q " SELECT sum(value) > $errors_111 FROM system.error_log WHERE code = 111; SELECT sum(value) > $errors_222 FROM system.error_log WHERE code = 222; SELECT sum(value) > $errors_333 FROM system.error_log WHERE code = 333; " # Ensure that if we throw them again, they're still propagated -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -mn -q " SELECT throwIf(true, 'error_log', toInt16(111)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 111 } SELECT throwIf(true, 'error_log', toInt16(222)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 222 } SELECT throwIf(true, 'error_log', toInt16(333)) SETTINGS allow_custom_error_code_in_throwif=1; -- { serverError 333 } @@ -39,7 +39,7 @@ SELECT sleep(2) format NULL; SYSTEM FLUSH LOGS; " -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -mn -q " SELECT sum(value) > $(($errors_111+1)) FROM system.error_log WHERE code = 111; SELECT sum(value) > $(($errors_222+1)) FROM system.error_log WHERE code = 222; SELECT sum(value) > $(($errors_333+1)) FROM system.error_log WHERE code = 333; diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index fdd0fcd8ec5..60e8a6e9faa 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -22,20 +22,20 @@ SELECT toTypeName(array), toTypeName(float) FROM file('$CURDIR/data_hive/partiti SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE number = 42; """ -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; SELECT a FROM file('$CURDIR/data_hive/partitioning/a=b/a=b/sample.parquet') LIMIT 1; """ -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth1/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "INCORRECT_DATA" -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; @@ -45,14 +45,14 @@ SELECT *, non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_exis $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; SELECT *, non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" -$CLICKHOUSE_LOCAL -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -62,7 +62,7 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" -$CLICKHOUSE_CLIENT -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -71,7 +71,7 @@ SELECT *, non_existing_column FROM s3('http://localhost:11111/test/hive_partitio SELECT *, column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = 'Elizabeth' LIMIT 10; """ -$CLICKHOUSE_CLIENT -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -79,7 +79,7 @@ SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" -$CLICKHOUSE_CLIENT -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; SELECT *, column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; diff --git a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh index 6bfe82a91a6..e0c8f08e695 100755 --- a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh +++ b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # In this test we restore from "/tests/queries/0_stateless/backups/mt_250_parts.zip" backup_name="$($CURDIR/helpers/install_predefined_backup.sh mt_250_parts.zip)" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " DROP TABLE IF EXISTS manyparts; CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merge_tree_clear_old_temporary_directories_interval_seconds=1, temporary_directories_lifetime=1; " @@ -16,7 +16,7 @@ CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merg # RESTORE must protect its temporary directories from removing. ${CLICKHOUSE_CLIENT} --query "RESTORE TABLE default.mt_250_parts AS manyparts FROM Disk('backups', '${backup_name}') SETTINGS allow_different_table_def=true" | grep -o "RESTORED" -${CLICKHOUSE_CLIENT} -m --query " +${CLICKHOUSE_CLIENT} -nm --query " SELECT count(), sum(x) FROM manyparts; DROP TABLE manyparts; "