From cef8bcc01169ae320f0c519f197df45efa26b217 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Tue, 5 Jan 2021 10:10:19 +0000 Subject: [PATCH 001/931] (feature) Use Map data type for system logs tables --- src/Core/Settings.cpp | 42 +++---- src/Core/Settings.h | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 3 +- src/Interpreters/ProfileEventsExt.cpp | 47 +++---- src/Interpreters/ProfileEventsExt.h | 4 +- src/Interpreters/QueryLog.cpp | 117 ++++++++---------- src/Interpreters/QueryThreadLog.cpp | 10 +- .../System/StorageSystemProcesses.cpp | 16 +-- 8 files changed, 100 insertions(+), 141 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 2a8dd2acdc0..a72992d4af7 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -56,40 +57,27 @@ void Settings::loadSettingsFromConfig(const String & path, const Poco::Util::Abs } } -void Settings::dumpToArrayColumns(IColumn * column_names_, IColumn * column_values_, bool changed_only) +void Settings::dumpToMapColumn(IColumn * column, bool changed_only) { /// Convert ptr and make simple check - auto * column_names = (column_names_) ? &typeid_cast(*column_names_) : nullptr; - auto * column_values = (column_values_) ? &typeid_cast(*column_values_) : nullptr; + auto * column_map = column ? &typeid_cast(*column) : nullptr; + if (!column_map) + return; - size_t count = 0; + auto & offsets = column_map->getNestedColumn().getOffsets(); + auto & tuple_column = column_map->getNestedData(); + auto & key_column = tuple_column.getColumn(0); + auto & value_column = tuple_column.getColumn(1); + size_t size = 0; for (const auto & setting : all(changed_only ? SKIP_UNCHANGED : SKIP_NONE)) { - if (column_names) - { - auto name = setting.getName(); - column_names->getData().insertData(name.data(), name.size()); - } - if (column_values) - column_values->getData().insert(setting.getValueString()); - ++count; - } - - if (column_names) - { - auto & offsets = column_names->getOffsets(); - offsets.push_back(offsets.back() + count); - } - - /// Nested columns case - bool the_same_offsets = column_names && column_values && column_names->getOffsetsPtr() == column_values->getOffsetsPtr(); - - if (column_values && !the_same_offsets) - { - auto & offsets = column_values->getOffsets(); - offsets.push_back(offsets.back() + count); + auto name = setting.getName(); + key_column.insertData(name.data(), name.size()); + value_column.insert(setting.getValueString()); + size++; } + offsets.push_back(offsets.back() + size); } void Settings::addProgramOptions(boost::program_options::options_description & options) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 00d4682332d..4453a74c713 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -523,7 +523,7 @@ struct Settings : public BaseSettings void loadSettingsFromConfig(const String & path, const Poco::Util::AbstractConfiguration & config); /// Dumps profile events to two columns of type Array(String) - void dumpToArrayColumns(IColumn * column_names, IColumn * column_values, bool changed_only = true); + void dumpToMapColumn(IColumn * column, bool changed_only = true); /// Adds program options to set the settings from a command line. /// (Don't forget to call notify() on the `variables_map` after parsing it!) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 9b087b3d2e5..5a553f89931 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -618,7 +618,8 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat } } - if (!create.attach && !settings.allow_experimental_map_type) + // enable allow_experimental_map_type for system tables + if (create.database != "system" && !create.attach && !settings.allow_experimental_map_type) { for (const auto & name_and_type_pair : properties.columns.getAllPhysical()) { diff --git a/src/Interpreters/ProfileEventsExt.cpp b/src/Interpreters/ProfileEventsExt.cpp index bca845c4248..ec3131d39a3 100644 --- a/src/Interpreters/ProfileEventsExt.cpp +++ b/src/Interpreters/ProfileEventsExt.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -11,14 +12,18 @@ namespace ProfileEvents { /// Put implementation here to avoid extra linking dependencies for clickhouse_common_io -void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names_, DB::IColumn * column_values_, bool nonzero_only) +void dumpToMapColumn(const Counters & counters, DB::IColumn * column, bool nonzero_only) { - /// Convert ptr and make simple check - auto * column_names = (column_names_) ? &typeid_cast(*column_names_) : nullptr; - auto * column_values = (column_values_) ? &typeid_cast(*column_values_) : nullptr; + auto * column_map = column ? &typeid_cast(*column) : nullptr; + if (!column_map) + return; + + auto & offsets = column_map->getNestedColumn().getOffsets(); + auto & tuple_column = column_map->getNestedData(); + auto & key_column = tuple_column.getColumn(0); + auto & value_column = tuple_column.getColumn(1); size_t size = 0; - for (Event event = 0; event < Counters::num_counters; ++event) { UInt64 value = counters[event].load(std::memory_order_relaxed); @@ -26,34 +31,12 @@ void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names_, if (nonzero_only && 0 == value) continue; - ++size; - - if (column_names) - { - const char * desc = ProfileEvents::getName(event); - column_names->getData().insertData(desc, strlen(desc)); - } - - if (column_values) - column_values->getData().insert(value); - } - - if (column_names) - { - auto & offsets = column_names->getOffsets(); - offsets.push_back(offsets.back() + size); - } - - if (column_values) - { - /// Nested columns case - bool the_same_offsets = column_names && column_names->getOffsetsPtr().get() == column_values->getOffsetsPtr().get(); - if (!the_same_offsets) - { - auto & offsets = column_values->getOffsets(); - offsets.push_back(offsets.back() + size); - } + const char * desc = ProfileEvents::getName(event); + key_column.insertData(desc, strlen(desc)); + value_column.insert(value); + size++; } + offsets.push_back(offsets.back() + size); } } diff --git a/src/Interpreters/ProfileEventsExt.h b/src/Interpreters/ProfileEventsExt.h index 2ae9941b67f..7d513f0cd02 100644 --- a/src/Interpreters/ProfileEventsExt.h +++ b/src/Interpreters/ProfileEventsExt.h @@ -6,7 +6,7 @@ namespace ProfileEvents { -/// Dumps profile events to two columns Array(String) and Array(UInt64) -void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names, DB::IColumn * column_value, bool nonzero_only = true); +/// Dumps profile events to columns Map(String, UInt64) +void dumpToMapColumn(const Counters & counters, DB::IColumn * column, bool nonzero_only = true); } diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 78c4072cd4c..5b62b73e841 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -10,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +21,7 @@ #include #include #include +#include namespace DB @@ -35,67 +38,61 @@ Block QueryLogElement::createBlock() {"ExceptionWhileProcessing", static_cast(EXCEPTION_WHILE_PROCESSING)} }); - return - { - {std::move(query_status_datatype), "type"}, - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(6), "event_time_microseconds"}, - {std::make_shared(), "query_start_time"}, - {std::make_shared(6), "query_start_time_microseconds"}, - {std::make_shared(), "query_duration_ms"}, + return { + {std::move(query_status_datatype), "type"}, + {std::make_shared(), "event_date"}, + {std::make_shared(), "event_time"}, + {std::make_shared(6), "event_time_microseconds"}, + {std::make_shared(), "query_start_time"}, + {std::make_shared(6), "query_start_time_microseconds"}, + {std::make_shared(), "query_duration_ms"}, - {std::make_shared(), "read_rows"}, - {std::make_shared(), "read_bytes"}, - {std::make_shared(), "written_rows"}, - {std::make_shared(), "written_bytes"}, - {std::make_shared(), "result_rows"}, - {std::make_shared(), "result_bytes"}, - {std::make_shared(), "memory_usage"}, + {std::make_shared(), "read_rows"}, + {std::make_shared(), "read_bytes"}, + {std::make_shared(), "written_rows"}, + {std::make_shared(), "written_bytes"}, + {std::make_shared(), "result_rows"}, + {std::make_shared(), "result_bytes"}, + {std::make_shared(), "memory_usage"}, - {std::make_shared(), "current_database"}, - {std::make_shared(), "query"}, - {std::make_shared(), "normalized_query_hash"}, + {std::make_shared(), "current_database"}, + {std::make_shared(), "query"}, + {std::make_shared(), "normalized_query_hash"}, {std::make_shared(std::make_shared()), "query_kind"}, - {std::make_shared( - std::make_shared(std::make_shared())), "databases"}, - {std::make_shared( - std::make_shared(std::make_shared())), "tables"}, - {std::make_shared( - std::make_shared(std::make_shared())), "columns"}, - {std::make_shared(), "exception_code"}, - {std::make_shared(), "exception"}, - {std::make_shared(), "stack_trace"}, + {std::make_shared(std::make_shared(std::make_shared())), "databases"}, + {std::make_shared(std::make_shared(std::make_shared())), "tables"}, + {std::make_shared(std::make_shared(std::make_shared())), "columns"}, + {std::make_shared(), "exception_code"}, + {std::make_shared(), "exception"}, + {std::make_shared(), "stack_trace"}, - {std::make_shared(), "is_initial_query"}, - {std::make_shared(), "user"}, - {std::make_shared(), "query_id"}, - {DataTypeFactory::instance().get("IPv6"), "address"}, - {std::make_shared(), "port"}, - {std::make_shared(), "initial_user"}, - {std::make_shared(), "initial_query_id"}, - {DataTypeFactory::instance().get("IPv6"), "initial_address"}, - {std::make_shared(), "initial_port"}, - {std::make_shared(), "interface"}, - {std::make_shared(), "os_user"}, - {std::make_shared(), "client_hostname"}, - {std::make_shared(), "client_name"}, - {std::make_shared(), "client_revision"}, - {std::make_shared(), "client_version_major"}, - {std::make_shared(), "client_version_minor"}, - {std::make_shared(), "client_version_patch"}, - {std::make_shared(), "http_method"}, - {std::make_shared(), "http_user_agent"}, - {std::make_shared(), "forwarded_for"}, - {std::make_shared(), "quota_key"}, + {std::make_shared(), "is_initial_query"}, + {std::make_shared(), "user"}, + {std::make_shared(), "query_id"}, + {DataTypeFactory::instance().get("IPv6"), "address"}, + {std::make_shared(), "port"}, + {std::make_shared(), "initial_user"}, + {std::make_shared(), "initial_query_id"}, + {DataTypeFactory::instance().get("IPv6"), "initial_address"}, + {std::make_shared(), "initial_port"}, + {std::make_shared(), "interface"}, + {std::make_shared(), "os_user"}, + {std::make_shared(), "client_hostname"}, + {std::make_shared(), "client_name"}, + {std::make_shared(), "client_revision"}, + {std::make_shared(), "client_version_major"}, + {std::make_shared(), "client_version_minor"}, + {std::make_shared(), "client_version_patch"}, + {std::make_shared(), "http_method"}, + {std::make_shared(), "http_user_agent"}, + {std::make_shared(), "forwarded_for"}, + {std::make_shared(), "quota_key"}, - {std::make_shared(), "revision"}, + {std::make_shared(), "revision"}, {std::make_shared(std::make_shared()), "thread_ids"}, - {std::make_shared(std::make_shared()), "ProfileEvents.Names"}, - {std::make_shared(std::make_shared()), "ProfileEvents.Values"}, - {std::make_shared(std::make_shared()), "Settings.Names"}, - {std::make_shared(std::make_shared()), "Settings.Values"} + {std::make_shared(std::make_shared(), std::make_shared()), "ProfileEvents"}, + {std::make_shared(std::make_shared(), std::make_shared()), "Settings"}, }; } @@ -165,26 +162,22 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const if (profile_counters) { - auto * column_names = columns[i++].get(); - auto * column_values = columns[i++].get(); - ProfileEvents::dumpToArrayColumns(*profile_counters, column_names, column_values, true); + auto * column = columns[i++].get(); + ProfileEvents::dumpToMapColumn(*profile_counters, column, true); } else { columns[i++]->insertDefault(); - columns[i++]->insertDefault(); } if (query_settings) { - auto * column_names = columns[i++].get(); - auto * column_values = columns[i++].get(); - query_settings->dumpToArrayColumns(column_names, column_values, true); + auto * column = columns[i++].get(); + query_settings->dumpToMapColumn(column, true); } else { columns[i++]->insertDefault(); - columns[i++]->insertDefault(); } } diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp index 5d325c05549..1f4441f6fd5 100644 --- a/src/Interpreters/QueryThreadLog.cpp +++ b/src/Interpreters/QueryThreadLog.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -66,8 +67,7 @@ Block QueryThreadLogElement::createBlock() {std::make_shared(), "revision"}, - {std::make_shared(std::make_shared()), "ProfileEvents.Names"}, - {std::make_shared(std::make_shared()), "ProfileEvents.Values"} + {std::make_shared(std::make_shared(), std::make_shared()), "ProfileEvents"}, }; } @@ -104,14 +104,12 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const if (profile_counters) { - auto * column_names = columns[i++].get(); - auto * column_values = columns[i++].get(); - dumpToArrayColumns(*profile_counters, column_names, column_values, true); + auto * column = columns[i++].get(); + ProfileEvents::dumpToMapColumn(*profile_counters, column, true); } else { columns[i++]->insertDefault(); - columns[i++]->insertDefault(); } } diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index 178eedd9ad7..9b3136431c4 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -123,28 +123,24 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, const Contex } { - IColumn * column_profile_events_names = res_columns[i++].get(); - IColumn * column_profile_events_values = res_columns[i++].get(); + IColumn * column = res_columns[i++].get(); if (process.profile_counters) - ProfileEvents::dumpToArrayColumns(*process.profile_counters, column_profile_events_names, column_profile_events_values, true); + ProfileEvents::dumpToMapColumn(*process.profile_counters, column, true); else { - column_profile_events_names->insertDefault(); - column_profile_events_values->insertDefault(); + column->insertDefault(); } } { - IColumn * column_settings_names = res_columns[i++].get(); - IColumn * column_settings_values = res_columns[i++].get(); + IColumn * column = res_columns[i++].get(); if (process.query_settings) - process.query_settings->dumpToArrayColumns(column_settings_names, column_settings_values, true); + process.query_settings->dumpToMapColumn(column, true); else { - column_settings_names->insertDefault(); - column_settings_values->insertDefault(); + column->insertDefault(); } } } From dc0f1f7a373bd4bd1e254fc3e2528b3eaccc760f Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Tue, 5 Jan 2021 10:16:24 +0000 Subject: [PATCH 002/931] Remove unused include --- src/Interpreters/QueryLog.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 5b62b73e841..2ef9c12f79f 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include From afb2cbe8130aac15f29074963294f536fe5c0b82 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Tue, 5 Jan 2021 10:17:26 +0000 Subject: [PATCH 003/931] Better code format --- src/Interpreters/QueryLog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 2ef9c12f79f..e8de1987f02 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -37,7 +37,8 @@ Block QueryLogElement::createBlock() {"ExceptionWhileProcessing", static_cast(EXCEPTION_WHILE_PROCESSING)} }); - return { + return + { {std::move(query_status_datatype), "type"}, {std::make_shared(), "event_date"}, {std::make_shared(), "event_time"}, From 3dda607059617690586f222ef03f99b1dcc09840 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Tue, 5 Jan 2021 11:22:53 +0000 Subject: [PATCH 004/931] Update tests --- .../integration/test_distributed_inter_server_secret/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index b1daf2271d0..00cf3bce25d 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -83,7 +83,7 @@ def get_query_user_info(node, query_pattern): def get_query_setting_on_shard(node, query_pattern, setting): node.query("SYSTEM FLUSH LOGS") return node.query(""" - SELECT (arrayFilter(x -> ((x.1) = '{}'), arrayZip(Settings.Names, Settings.Values))[1]).2 + SELECT Settings.Values['%s'] FROM system.query_log WHERE query LIKE '%{}%' AND @@ -91,7 +91,7 @@ def get_query_setting_on_shard(node, query_pattern, setting): query NOT LIKE '%system.query_log%' AND type = 'QueryFinish' LIMIT 1 - """.format(setting, query_pattern)).strip() + """.format(setting, setting, query_pattern)).strip() def test_insecure(): n1.query('SELECT * FROM dist_insecure') From 3b62c5b50be27339ef0917533be2d3c5422fddde Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Wed, 6 Jan 2021 07:26:16 +0000 Subject: [PATCH 005/931] Fix test --- tests/integration/test_distributed_inter_server_secret/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 00cf3bce25d..52aa3decde3 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -83,7 +83,7 @@ def get_query_user_info(node, query_pattern): def get_query_setting_on_shard(node, query_pattern, setting): node.query("SYSTEM FLUSH LOGS") return node.query(""" - SELECT Settings.Values['%s'] + SELECT Settings.Values['{}'] FROM system.query_log WHERE query LIKE '%{}%' AND From bcf495a10738840ba05e6233ae9861c00c354e40 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Thu, 21 Jan 2021 12:15:44 +0800 Subject: [PATCH 006/931] Update tests --- src/Storages/System/StorageSystemProcesses.cpp | 7 +++---- .../00634_performance_introspection_and_logging.sh | 7 +++---- .../00731_long_merge_tree_select_opened_files.sh | 2 +- .../00933_test_fix_extra_seek_on_compressed_cache.sh | 2 +- .../01076_array_join_prewhere_const_folding.sql | 7 ++----- tests/queries/0_stateless/01231_log_queries_min_type.sql | 3 +-- tests/queries/0_stateless/01268_procfs_metrics.sh | 2 +- .../0_stateless/01343_min_bytes_to_use_mmap_io.sql | 2 +- .../0_stateless/01344_min_bytes_to_use_mmap_io_index.sql | 2 +- .../01360_materialized_view_with_join_on_query_log.sql | 1 + tests/queries/0_stateless/01413_rows_events.sql | 9 +++------ tests/queries/0_stateless/01475_read_subcolumns.sql | 8 ++++---- tests/queries/0_stateless/01533_multiple_nested.sql | 8 ++++---- 13 files changed, 26 insertions(+), 34 deletions(-) diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index 9b3136431c4..8f35017c86a 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -59,10 +60,8 @@ NamesAndTypesList StorageSystemProcesses::getNamesAndTypes() {"query", std::make_shared()}, {"thread_ids", std::make_shared(std::make_shared())}, - {"ProfileEvents.Names", std::make_shared(std::make_shared())}, - {"ProfileEvents.Values", std::make_shared(std::make_shared())}, - {"Settings.Names", std::make_shared(std::make_shared())}, - {"Settings.Values", std::make_shared(std::make_shared())}, + {"ProfileEvents", std::make_shared(std::make_shared(), std::make_shared())}, + {"Settings", std::make_shared(std::make_shared(), std::make_shared())}, }; } diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index c645bea23b3..366a578bf05 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -40,16 +40,15 @@ $CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS" $CLICKHOUSE_CLIENT $settings -q " WITH any(query_duration_ms*1000) AS duration, - sumIf(PV, PN = 'RealTimeMicroseconds') AS threads_realtime, - sumIf(PV, PN IN ('UserTimeMicroseconds', 'SystemTimeMicroseconds', 'OSIOWaitMicroseconds', 'OSCPUWaitMicroseconds')) AS threads_time_user_system_io + sum(ProfileEvents['RealTimeMicroseconds']) AS threads_realtime, + sum(ProfileEvents['UserTimeMicroseconds'] + ProfileEvents['SystemTimeMicroseconds'] + ProfileEvents['OSIOWaitMicroseconds'] + ProfileEvents['OSCPUWaitMicroseconds']) AS threads_time_user_system_io SELECT -- duration, threads_realtime, threads_time_user_system_io, threads_realtime >= 0.99 * duration, threads_realtime >= threads_time_user_system_io, any(length(thread_ids)) >= 1 FROM - (SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-1 AND type=2 ORDER BY event_time DESC LIMIT 1) - ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV" + (SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-1 AND type=2 ORDER BY event_time DESC LIMIT 1)" # Clean rm "$server_logs_file" diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh index 25a742a481a..19fbe0ba0ae 100755 --- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh +++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh @@ -28,6 +28,6 @@ $CLICKHOUSE_CLIENT $settings -q "$touching_many_parts_query" &> /dev/null $CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS" -$CLICKHOUSE_CLIENT $settings -q "SELECT pi.Values FROM system.query_log ARRAY JOIN ProfileEvents as pi WHERE query='$touching_many_parts_query' and pi.Names = 'FileOpen' ORDER BY event_time DESC LIMIT 1;" +$CLICKHOUSE_CLIENT $settings -q "SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE query='$touching_many_parts_query' ORDER BY event_time DESC LIMIT 1;" $CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS merge_tree_table;" diff --git a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh index 2b9a69d19d4..0679d1f9359 100755 --- a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh +++ b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh @@ -20,7 +20,7 @@ $CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query_id="test-query-uncompresse $CLICKHOUSE_CLIENT --query="SYSTEM FLUSH LOGS" -$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1" +$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents['Seek'], ProfileEvents['ReadCompressedBytes'], ProfileEvents['UncompressedCacheHits'] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table" diff --git a/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql index 7b80004864d..dabaece460b 100644 --- a/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql +++ b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql @@ -2,9 +2,6 @@ SET log_queries = 1; SELECT 1 LIMIT 0; SYSTEM FLUSH LOGS; -SELECT arrayJoin AS kv_key -FROM system.query_log -ARRAY JOIN ProfileEvents.Names AS arrayJoin -PREWHERE has(arrayMap(key -> key, ProfileEvents.Names), 'Query') -WHERE arrayJoin = 'Query' +SELECT * FROM system.query_log +PREWHERE ProfileEvents['Query'] > 0 LIMIT 0; diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql index bfeeca96d4b..b07d17acc51 100644 --- a/tests/queries/0_stateless/01231_log_queries_min_type.sql +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -22,5 +22,4 @@ select count() from system.query_log where query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute and - type = 'ExceptionWhileProcessing' and - has(Settings.Names, 'max_rows_to_read'); + type = 'ExceptionWhileProcessing' and Settings['max_rows_to_read'] != ''; diff --git a/tests/queries/0_stateless/01268_procfs_metrics.sh b/tests/queries/0_stateless/01268_procfs_metrics.sh index cad9b786667..58d1b327427 100755 --- a/tests/queries/0_stateless/01268_procfs_metrics.sh +++ b/tests/queries/0_stateless/01268_procfs_metrics.sh @@ -24,7 +24,7 @@ function show_processes_func() # These two system metrics for the generating query above are guaranteed to be nonzero when ProcFS is mounted at /proc $CLICKHOUSE_CLIENT -q " SELECT count() > 0 FROM system.processes\ - WHERE has(ProfileEvents.Names, 'OSCPUVirtualTimeMicroseconds') AND has(ProfileEvents.Names, 'OSReadChars')\ + WHERE ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0 AND ProfileEvents['OSReadChars'] = 0 \ SETTINGS max_threads = 1 " | grep '1' && break; done diff --git a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql index 62c5d20d714..82e2119908f 100644 --- a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql +++ b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql @@ -6,6 +6,6 @@ SET min_bytes_to_use_mmap_io = 1; SELECT * FROM test_01343; SYSTEM FLUSH LOGS; -SELECT PE.Values FROM system.query_log ARRAY JOIN ProfileEvents AS PE WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND PE.Names = 'CreatedReadBufferMMap' AND type = 2 ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND type = 2 ORDER BY event_time DESC LIMIT 1; DROP TABLE test_01343; diff --git a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql index 544c0af7925..905a064ded4 100644 --- a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql +++ b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql @@ -6,6 +6,6 @@ SET min_bytes_to_use_mmap_io = 1; SELECT * FROM test_01344 WHERE x = 'Hello, world'; SYSTEM FLUSH LOGS; -SELECT PE.Values FROM system.query_log ARRAY JOIN ProfileEvents AS PE WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND PE.Names = 'CreatedReadBufferMMap' AND type = 2 ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND type = 2 ORDER BY event_time DESC LIMIT 1; DROP TABLE test_01344; diff --git a/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql b/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql index 57e8e6f4c74..71add3c73df 100644 --- a/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql +++ b/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql @@ -8,6 +8,7 @@ SET log_queries=1; SELECT 1; SYSTEM FLUSH LOGS; +SET allow_experimental_map_type = 1; CREATE MATERIALIZED VIEW slow_log Engine=Memory AS ( SELECT * FROM diff --git a/tests/queries/0_stateless/01413_rows_events.sql b/tests/queries/0_stateless/01413_rows_events.sql index 9063f5de8d4..7e761196a15 100644 --- a/tests/queries/0_stateless/01413_rows_events.sql +++ b/tests/queries/0_stateless/01413_rows_events.sql @@ -6,23 +6,20 @@ SYSTEM FLUSH LOGS; SELECT written_rows FROM system.query_log WHERE query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; -SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents - WHERE ProfileEvents.Names = 'InsertedRows' AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; INSERT INTO /* test 01413, query 2 */ rows_events_test VALUES (2,2), (3,3); SYSTEM FLUSH LOGS; SELECT written_rows FROM system.query_log WHERE query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; -SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents - WHERE ProfileEvents.Names = 'InsertedRows' AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; SELECT * FROM /* test 01413, query 3 */ rows_events_test WHERE v = 2; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; -SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents - WHERE ProfileEvents.Names = 'SelectedRows' AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['SelectedRows'] as value FROM system.query_log WHERE query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; DROP TABLE rows_events_test; diff --git a/tests/queries/0_stateless/01475_read_subcolumns.sql b/tests/queries/0_stateless/01475_read_subcolumns.sql index ce85dd72abf..864b89c8c1c 100644 --- a/tests/queries/0_stateless/01475_read_subcolumns.sql +++ b/tests/queries/0_stateless/01475_read_subcolumns.sql @@ -7,7 +7,7 @@ SYSTEM DROP MARK CACHE; SELECT a.size0 FROM t_arr; SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT a.size0 FROM %t_arr%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); @@ -24,7 +24,7 @@ SYSTEM DROP MARK CACHE; SELECT t.u FROM t_tup; SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT t._ FROM %t_tup%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); @@ -38,7 +38,7 @@ SYSTEM DROP MARK CACHE; SELECT n.null FROM t_nul; SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT n.null FROM %t_nul%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); @@ -57,7 +57,7 @@ SYSTEM DROP MARK CACHE; SELECT m.values FROM t_map; SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT m.% FROM %t_map%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); diff --git a/tests/queries/0_stateless/01533_multiple_nested.sql b/tests/queries/0_stateless/01533_multiple_nested.sql index 6374d6fca21..cf1cc627273 100644 --- a/tests/queries/0_stateless/01533_multiple_nested.sql +++ b/tests/queries/0_stateless/01533_multiple_nested.sql @@ -8,7 +8,7 @@ CREATE TABLE nested col2 Nested(a UInt32, n Nested(s String, b UInt32)), col3 Nested(n1 Nested(a UInt32, b UInt32), n2 Nested(s String, t String)) ) -ENGINE = MergeTree +ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; @@ -33,7 +33,7 @@ SELECT col1.a FROM nested FORMAT Null; -- 4 files: (col1.size0, col1.a) x2 SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col1.a FROM %nested%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); @@ -43,7 +43,7 @@ SELECT col3.n2.s FROM nested FORMAT Null; -- 6 files: (col3.size0, col3.n2.size1, col3.n2.s) x2 SYSTEM FLUSH LOGS; -SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')] +SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col3.n2.s FROM %nested%')) AND event_time > now() - INTERVAL 10 SECOND AND current_database = currentDatabase(); @@ -55,7 +55,7 @@ CREATE TABLE nested id UInt32, col1 Nested(a UInt32, n Nested(s String, b UInt32)) ) -ENGINE = MergeTree +ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0; From c972977c17cb5e6c02e08bc80ae351f62f086383 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Thu, 21 Jan 2021 13:08:40 +0800 Subject: [PATCH 007/931] Update tests --- tests/queries/0_stateless/01268_procfs_metrics.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01268_procfs_metrics.sh b/tests/queries/0_stateless/01268_procfs_metrics.sh index 58d1b327427..d5bd99724ca 100755 --- a/tests/queries/0_stateless/01268_procfs_metrics.sh +++ b/tests/queries/0_stateless/01268_procfs_metrics.sh @@ -24,7 +24,7 @@ function show_processes_func() # These two system metrics for the generating query above are guaranteed to be nonzero when ProcFS is mounted at /proc $CLICKHOUSE_CLIENT -q " SELECT count() > 0 FROM system.processes\ - WHERE ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0 AND ProfileEvents['OSReadChars'] = 0 \ + WHERE ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0 AND ProfileEvents['OSReadChars'] > 0 \ SETTINGS max_threads = 1 " | grep '1' && break; done From ca039f52195aa04e0dcfa71babb70c91ab58c25a Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Thu, 21 Jan 2021 14:55:13 +0800 Subject: [PATCH 008/931] Update more integration tests --- tests/integration/test_distributed_inter_server_secret/test.py | 2 +- tests/integration/test_merge_tree_s3_with_cache/test.py | 2 +- tests/integration/test_profile_events_s3/test.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 52aa3decde3..e3954988db0 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -83,7 +83,7 @@ def get_query_user_info(node, query_pattern): def get_query_setting_on_shard(node, query_pattern, setting): node.query("SYSTEM FLUSH LOGS") return node.query(""" - SELECT Settings.Values['{}'] + SELECT Settings['{}'] FROM system.query_log WHERE query LIKE '%{}%' AND diff --git a/tests/integration/test_merge_tree_s3_with_cache/test.py b/tests/integration/test_merge_tree_s3_with_cache/test.py index d5d6db2fb77..c5d784c4dde 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/test.py +++ b/tests/integration/test_merge_tree_s3_with_cache/test.py @@ -27,7 +27,7 @@ def get_query_stat(instance, hint): result = {} instance.query("SYSTEM FLUSH LOGS") events = instance.query(''' - SELECT ProfileEvents.Names, ProfileEvents.Values + SELECT ProfileEvents.keys, ProfileEvents.values FROM system.query_log ARRAY JOIN ProfileEvents WHERE type != 1 AND query LIKE '%{}%' diff --git a/tests/integration/test_profile_events_s3/test.py b/tests/integration/test_profile_events_s3/test.py index 3d65a489610..4fdd96a742c 100644 --- a/tests/integration/test_profile_events_s3/test.py +++ b/tests/integration/test_profile_events_s3/test.py @@ -86,7 +86,7 @@ def get_query_stat(instance, hint): result = init_list.copy() instance.query("SYSTEM FLUSH LOGS") events = instance.query(''' - SELECT ProfileEvents.Names, ProfileEvents.Values + SELECT ProfileEvents.keys, ProfileEvents.values FROM system.query_log ARRAY JOIN ProfileEvents WHERE type != 1 AND query LIKE '%{}%' From 76da6014d29794a6db63bf9b559fd432c5d09b5b Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Thu, 21 Jan 2021 16:36:13 +0800 Subject: [PATCH 009/931] Update more integration tests2 --- tests/integration/test_distributed_inter_server_secret/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index e3954988db0..e7639bc6edd 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -91,7 +91,7 @@ def get_query_setting_on_shard(node, query_pattern, setting): query NOT LIKE '%system.query_log%' AND type = 'QueryFinish' LIMIT 1 - """.format(setting, setting, query_pattern)).strip() + """.format(setting, query_pattern)).strip() def test_insecure(): n1.query('SELECT * FROM dist_insecure') From 339133f32aba4c33e0ab90c3cb36c8f0ebdc72ed Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Fri, 22 Jan 2021 13:16:13 +0800 Subject: [PATCH 010/931] Support Map for OpenTelemetrySpanLog && improve docs update --- docker/test/performance-comparison/compare.sh | 32 +++++++++---------- docs/en/operations/system-tables/processes.md | 14 ++++---- docs/en/operations/system-tables/query_log.md | 22 ++++++------- .../system-tables/query_thread_log.md | 12 +++---- docs/es/operations/system-tables.md | 9 ++---- docs/fr/operations/system-tables.md | 9 ++---- docs/ja/operations/system-tables.md | 9 ++---- docs/ru/operations/system-tables/query_log.md | 24 ++++++-------- .../system-tables/query_thread_log.md | 12 +++---- docs/zh/operations/system-tables/query_log.md | 12 +++---- .../system-tables/query_thread_log.md | 6 ++-- src/Interpreters/OpenTelemetrySpanLog.cpp | 19 ++++++----- .../01455_opentelemetry_distributed.sh | 6 ++-- 13 files changed, 78 insertions(+), 108 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 9a0d8093a55..b03126b2f88 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -371,7 +371,7 @@ do done # for each query run, prepare array of metrics from query log -clickhouse-local --query " +clickhouse-local --allow_experimental_map_type 1 --query " create view query_runs as select * from file('analyze/query-runs.tsv', TSV, 'test text, query_index int, query_id text, version UInt8, time float'); @@ -400,10 +400,10 @@ create view right_query_log as select * '$(cat "right-query-log.tsv.columns")'); create view query_logs as - select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values, + select 0 version, query_id, ProfileEvents.keys, ProfileEvents.values, query_duration_ms, memory_usage from left_query_log union all - select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values, + select 1 version, query_id, ProfileEvents.keys, ProfileEvents.values, query_duration_ms, memory_usage from right_query_log ; @@ -415,7 +415,7 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric- with ( -- sumMapState with the list of all keys with '-0.' values. Negative zero is because -- sumMap removes keys with positive zeros. - with (select groupUniqArrayArray(ProfileEvents.Names) from query_logs) as all_names + with (select groupUniqArrayArray(ProfileEvents.keys) from query_logs) as all_names select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))]) ) as all_metrics select test, query_index, version, query_id, @@ -424,8 +424,8 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric- [ all_metrics, arrayReduce('sumMapState', - [(ProfileEvents.Names, - arrayMap(x->toFloat64(x), ProfileEvents.Values))] + [(ProfileEvents.keys, + arrayMap(x->toFloat64(x), ProfileEvents.values))] ), arrayReduce('sumMapState', [( ['client_time', 'server_time', 'memory_usage'], @@ -492,7 +492,7 @@ do file="analyze/tmp/${prefix// /_}.tsv" grep "^$prefix " "analyze/query-run-metrics-for-stats.tsv" > "$file" & printf "%s\0\n" \ - "clickhouse-local \ + "clickhouse-local --allow_experimental_map_type 1 \ --file \"$file\" \ --structure 'test text, query text, run int, version UInt8, metrics Array(float)' \ --query \"$(cat "$script_dir/eqmed.sql")\" \ @@ -511,7 +511,7 @@ numactl --show numactl --cpunodebind=all --membind=all numactl --show numactl --cpunodebind=all --membind=all parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log -clickhouse-local --query " +clickhouse-local --allow_experimental_map_type 1 --query " -- Join the metric names back to the metric statistics we've calculated, and make -- a denormalized table of them -- statistics for all metrics for all queries. -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: @@ -552,7 +552,7 @@ build_log_column_definitions cat analyze/errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||: -clickhouse-local --query " +clickhouse-local --allow_experimental_map_type 1 --query " create view query_display_names as select * from file('analyze/query-display-names.tsv', TSV, 'test text, query_index int, query_display_name text') @@ -869,7 +869,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts for version in {right,left} do rm -rf data - clickhouse-local --query " + clickhouse-local --allow_experimental_map_type 1 --query " create view query_profiles as with 0 as left, 1 as right select * from file('analyze/query-profiles.tsv', TSV, @@ -900,7 +900,7 @@ create table unstable_run_metrics engine File(TSVWithNamesAndTypes, 'unstable-run-metrics.$version.rep') as select test, query_index, query_id, - ProfileEvents.Values value, ProfileEvents.Names metric + ProfileEvents.values value, ProfileEvents.keys metric from query_log array join ProfileEvents join unstable_query_runs using (query_id) ; @@ -1062,7 +1062,7 @@ build_log_column_definitions rm -rf metrics ||: mkdir metrics -clickhouse-local --query " +clickhouse-local --allow_experimental_map_type 1 --query " create view right_async_metric_log as select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes, '$(cat right-async-metric-log.tsv.columns)') @@ -1125,15 +1125,15 @@ function upload_results then echo Database for test results is not specified, will not upload them. return 0 - fi + fi # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 - # so I have to extract host and port with clickhouse-local. I tried to use + # so I have to extract host and port with clickhouse-local --allow_experimental_map_type 1. I tried to use # Poco URI parser to support this in the client, but it's broken and can't # parse host:port. set +x # Don't show password in the log clickhouse-client \ - $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") \ + $(clickhouse-local --allow_experimental_map_type 1 --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") \ --secure \ --user "${CHPC_DATABASE_USER}" \ --password "${CHPC_DATABASE_PASSWORD}" \ @@ -1167,7 +1167,7 @@ function upload_results } # Check that local and client are in PATH -clickhouse-local --version > /dev/null +clickhouse-local --allow_experimental_map_type 1 --version > /dev/null clickhouse-client --version > /dev/null case "$stage" in diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index a379fc4a07a..76e3110c667 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -34,14 +34,14 @@ initial_port: 47588 interface: 1 os_user: bharatnc client_hostname: tower -client_name: ClickHouse +client_name: ClickHouse client_revision: 54437 client_version_major: 20 client_version_minor: 7 client_version_patch: 2 http_method: 0 -http_user_agent: -quota_key: +http_user_agent: +quota_key: elapsed: 0.000582537 is_cancelled: 0 read_rows: 0 @@ -53,12 +53,10 @@ memory_usage: 0 peak_memory_usage: 0 query: SELECT * from system.processes LIMIT 10 FORMAT Vertical; thread_ids: [67] -ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ContextLock','RWLockAcquiredReadLocks'] -ProfileEvents.Values: [1,1,36,1,10,1,89,16,1] -Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage'] -Settings.Values: ['0','in_order','1','10000000000'] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} +Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'} -1 rows in set. Elapsed: 0.002 sec. +1 rows in set. Elapsed: 0.002 sec. ``` [Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 32b2bdf2133..776b9488af0 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -77,10 +77,8 @@ Columns: - `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events) -- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column. -- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. -- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column. +- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events) +- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. **Example** @@ -108,8 +106,8 @@ memory_usage: 0 current_database: default query: INSERT INTO test1 VALUES exception_code: 0 -exception: -stack_trace: +exception: +stack_trace: is_initial_query: 1 user: default query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef @@ -122,20 +120,18 @@ initial_port: 33452 interface: 1 os_user: bharatnc client_hostname: tower -client_name: ClickHouse +client_name: ClickHouse client_revision: 54437 client_version_major: 20 client_version_minor: 7 client_version_patch: 2 http_method: 0 -http_user_agent: -quota_key: +http_user_agent: +quota_key: revision: 54440 thread_ids: [] -ProfileEvents.Names: [] -ProfileEvents.Values: [] -Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage','allow_introspection_functions'] -Settings.Values: ['0','random','1','10000000000','1'] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} +Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'} ``` **See Also** diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 0ae2e7d5d3b..33bf60cd69a 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -58,8 +58,7 @@ Columns: - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request. - `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events). -- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column. +- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events). **Example** @@ -98,17 +97,16 @@ initial_port: 33452 interface: 1 os_user: bharatnc client_hostname: tower -client_name: ClickHouse +client_name: ClickHouse client_revision: 54437 client_version_major: 20 client_version_minor: 7 client_version_patch: 2 http_method: 0 -http_user_agent: -quota_key: +http_user_agent: +quota_key: revision: 54440 -ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars'] -ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} ``` **See Also** diff --git a/docs/es/operations/system-tables.md b/docs/es/operations/system-tables.md index 18e7f7227da..247e851bdef 100644 --- a/docs/es/operations/system-tables.md +++ b/docs/es/operations/system-tables.md @@ -625,10 +625,8 @@ Columna: - `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` columna. -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parámetro a 1. -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` columna. +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics. The description of them could be found in the table [sistema.evento](#system_tables-events) +- `Settings` (Map(String, String)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parámetro a 1. Cada consulta crea una o dos filas en el `query_log` tabla, dependiendo del estado de la consulta: @@ -698,8 +696,7 @@ Columna: - `http_user_agent` (String) — The `UserAgent` encabezado pasado en la solicitud HTTP. - `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). - `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` columna. +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [sistema.evento](#system_tables-events) De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [Sistema abierto.](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. diff --git a/docs/fr/operations/system-tables.md b/docs/fr/operations/system-tables.md index bf875892478..de5a061a27a 100644 --- a/docs/fr/operations/system-tables.md +++ b/docs/fr/operations/system-tables.md @@ -625,10 +625,8 @@ Colonne: - `quota_key` (String) — The “quota key” spécifié dans le [quota](quotas.md) (voir `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [système.événement](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` colonne. -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` paramètre 1. -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` colonne. +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics. The description of them could be found in the table [système.événement](#system_tables-events) +- `Settings` (Map(String, UInt64)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` paramètre 1. Chaque requête crée une ou deux lignes dans le `query_log` le tableau, en fonction de l'état de la requête: @@ -698,8 +696,7 @@ Colonne: - `http_user_agent` (String) — The `UserAgent` en-tête passé dans la requête HTTP. - `quota_key` (String) — The “quota key” spécifié dans le [quota](quotas.md) (voir `keyed`). - `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [système.événement](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` colonne. +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [système.événement](#system_tables-events) Par défaut, les journaux sont ajoutés à la table à des intervalles de 7,5 secondes. Vous pouvez définir cet intervalle dans la [query_thread_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) configuration du serveur (voir `flush_interval_milliseconds` paramètre). Pour vider les journaux de force du tampon mémoire dans la table, utilisez le `SYSTEM FLUSH LOGS` requête. diff --git a/docs/ja/operations/system-tables.md b/docs/ja/operations/system-tables.md index 095038b2b72..be0c3356247 100644 --- a/docs/ja/operations/system-tables.md +++ b/docs/ja/operations/system-tables.md @@ -625,10 +625,8 @@ ClickHouseはこのテーブルを作成します。 [query_log](server-configur - `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` パラメータは1。 -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` 列。 +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events) +- `Settings` (Map(String, String)) — Settings 列。 それぞれのクエリでは、一つまたは二つの行が `query_log` クエリのステータスに応じて、テーブル: @@ -698,8 +696,7 @@ ClickHouseはこのテーブルを作成します。 [query_thread_log](server-c - `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。 - `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`). - `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。 +- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events) 既定では、ログは7.5秒間隔でテーブルに追加されます。 この間隔は [query_thread_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) サーバ設定(参照 `flush_interval_milliseconds` 変数)。 ログをメモリバッファからテーブルに強制的にフラッシュするには、 `SYSTEM FLUSH LOGS` クエリ。 diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index 39f685288d8..d4b4a0808f2 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -46,7 +46,7 @@ ClickHouse не удаляет данные из таблица автомати - `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — потребление RAM запросом. - `query` ([String](../../sql-reference/data-types/string.md)) — текст запроса. - `exception` ([String](../../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению. -- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения. +- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения. - `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен. - `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md)) — вид запроса. Возможные значения: - 1 — запрос был инициирован клиентом. @@ -77,10 +77,8 @@ ClickHouse не удаляет данные из таблица автомати - `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. - `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — количество потоков, участвующих в обработке запросов. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events -- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`. -- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1. -- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — значения настроек, которые перечислены в столбце `Settings.Names`. +- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events +- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1. **Пример** @@ -108,8 +106,8 @@ memory_usage: 0 current_database: default query: INSERT INTO test1 VALUES exception_code: 0 -exception: -stack_trace: +exception: +stack_trace: is_initial_query: 1 user: default query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef @@ -122,20 +120,18 @@ initial_port: 33452 interface: 1 os_user: bharatnc client_hostname: tower -client_name: ClickHouse +client_name: ClickHouse client_revision: 54437 client_version_major: 20 client_version_minor: 7 client_version_patch: 2 http_method: 0 -http_user_agent: -quota_key: +http_user_agent: +quota_key: revision: 54440 thread_ids: [] -ProfileEvents.Names: [] -ProfileEvents.Values: [] -Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage','allow_introspection_functions'] -Settings.Values: ['0','random','1','10000000000','1'] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} +Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'} ``` **Смотрите также** diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index 052baf98035..ad769450eb1 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -57,8 +57,7 @@ ClickHouse не удаляет данные из таблицы автомати - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`. - `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events). -- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`. +- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events). **Пример** @@ -97,17 +96,16 @@ initial_port: 33452 interface: 1 os_user: bharatnc client_hostname: tower -client_name: ClickHouse +client_name: ClickHouse client_revision: 54437 client_version_major: 20 client_version_minor: 7 client_version_patch: 2 http_method: 0 -http_user_agent: -quota_key: +http_user_agent: +quota_key: revision: 54440 -ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars'] -ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} ``` **Смотрите также** diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index 6d8d7a39699..5d634c4581b 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -80,10 +80,8 @@ ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/sys - `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `thread_numbers` ([数组(UInt32)](../../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events) -- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 -- `Settings.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 -- `Settings.Values` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` 列。 +- `ProfileEvents` ([Map(String, UInt64))](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events) +- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 **示例** @@ -132,10 +130,8 @@ http_user_agent: quota_key: revision: 54434 thread_ids: [] -ProfileEvents.Names: [] -ProfileEvents.Values: [] -Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage'] -Settings.Values: ['0','random','1','10000000000'] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} +Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'} ``` **另请参阅** diff --git a/docs/zh/operations/system-tables/query_thread_log.md b/docs/zh/operations/system-tables/query_thread_log.md index 8043be8bd75..33583f3b730 100644 --- a/docs/zh/operations/system-tables/query_thread_log.md +++ b/docs/zh/operations/system-tables/query_thread_log.md @@ -61,8 +61,7 @@ ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/sys - `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。 - `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. -- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events). -- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。 +- `ProfileEvents` ([数组(字符串, UInt64)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events). **示例** @@ -108,8 +107,7 @@ http_method: 0 http_user_agent: quota_key: revision: 54434 -ProfileEvents.Names: ['ContextLock','RealTimeMicroseconds','UserTimeMicroseconds','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds'] -ProfileEvents.Values: [1,97,81,5,81] +ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1} ... ``` diff --git a/src/Interpreters/OpenTelemetrySpanLog.cpp b/src/Interpreters/OpenTelemetrySpanLog.cpp index e1df145cf51..0967d3062c4 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.cpp +++ b/src/Interpreters/OpenTelemetrySpanLog.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include namespace DB @@ -31,10 +32,8 @@ Block OpenTelemetrySpanLogElement::createBlock() {std::make_shared(), "start_time_us"}, {std::make_shared(), "finish_time_us"}, {std::make_shared(), "finish_date"}, - {std::make_shared(std::make_shared()), - "attribute.names"}, - {std::make_shared(std::make_shared()), - "attribute.values"} + {std::make_shared(std::make_shared(), std::make_shared()), + "attribute"}, }; } @@ -50,17 +49,17 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(start_time_us); columns[i++]->insert(finish_time_us); columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000)); - columns[i++]->insert(attribute_names); + // The user might add some ints values, and we will have Int Field, and the // insert will fail because the column requires Strings. Convert the fields // here, because it's hard to remember to convert them in all other places. - Array string_values; - string_values.reserve(attribute_values.size()); - for (const auto & value : attribute_values) + + Map map(attribute_names.size()); + for (size_t attr_idx = 0; attr_idx < map.size(); ++attr_idx) { - string_values.push_back(toString(value)); + map[attr_idx] = Tuple{attribute_names[attr_idx], toString(attribute_values[attr_idx])}; } - columns[i++]->insert(string_values); + columns[i++]->insert(map); } diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index d8a8dde966e..f4eaf5f0f2b 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -26,7 +26,7 @@ select count(*) "'"'"initial query spans with proper parent"'"'" from (select *, attribute_name, attribute_value from system.opentelemetry_span_log - array join attribute.names as attribute_name, + array join attribute.keys as attribute_name, attribute.values as attribute_value) o join system.query_log on query_id = o.attribute_value where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) @@ -41,7 +41,7 @@ select count(*) "'"'"initial query spans with proper parent"'"'" -- same non-empty value for all 'query' spans in this trace. select uniqExact(value) "'"'"unique non-empty tracestate values"'"'" from system.opentelemetry_span_log - array join attribute.names as name, attribute.values as value + array join attribute.keys as name, attribute.values as value where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) and operation_name = 'query' @@ -108,7 +108,7 @@ ${CLICKHOUSE_CLIENT} -q " -- expect 200 * 0.1 = 20 sampled events on average select if(c > 1 and c < 50, 'OK', 'fail: ' || toString(c)) from system.opentelemetry_span_log - array join attribute.names as name, attribute.values as value + array join attribute.keys as name, attribute.values as value where name = 'clickhouse.query_id' and operation_name = 'query' and parent_span_id = 0 -- only account for the initial queries From c4ef31ba64792b0d5c662b732e114828568fac4b Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Sat, 23 Jan 2021 12:41:53 +0800 Subject: [PATCH 011/931] Update tests in opentelemetry_distributed --- tests/queries/0_stateless/01455_opentelemetry_distributed.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index f4eaf5f0f2b..8dfb10f7e1f 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -104,9 +104,8 @@ wait ${CLICKHOUSE_CLIENT} -q "system flush logs" ${CLICKHOUSE_CLIENT} -q " - with count(*) as c -- expect 200 * 0.1 = 20 sampled events on average - select if(c > 1 and c < 50, 'OK', 'fail: ' || toString(c)) + select if(count() > 1 and count() < 50, 'OK', 'Fail') from system.opentelemetry_span_log array join attribute.keys as name, attribute.values as value where name = 'clickhouse.query_id' From bb40852556c7404aa911a53151a9a0aaaf7fe8fc Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Mon, 25 Jan 2021 10:54:11 +0000 Subject: [PATCH 012/931] Use mapKeys mapValues in tests --- .../0_stateless/01455_opentelemetry_distributed.sh | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index 8dfb10f7e1f..34209f052aa 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -26,8 +26,8 @@ select count(*) "'"'"initial query spans with proper parent"'"'" from (select *, attribute_name, attribute_value from system.opentelemetry_span_log - array join attribute.keys as attribute_name, - attribute.values as attribute_value) o + array join mapKeys(attribute) as attribute_name, + mapValues(attribute) as attribute_value) o join system.query_log on query_id = o.attribute_value where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) and operation_name = 'query' @@ -41,7 +41,7 @@ select count(*) "'"'"initial query spans with proper parent"'"'" -- same non-empty value for all 'query' spans in this trace. select uniqExact(value) "'"'"unique non-empty tracestate values"'"'" from system.opentelemetry_span_log - array join attribute.keys as name, attribute.values as value + array join mapKeys(attribute) as name, mapValues(attribute) as value where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) and operation_name = 'query' @@ -107,10 +107,8 @@ ${CLICKHOUSE_CLIENT} -q " -- expect 200 * 0.1 = 20 sampled events on average select if(count() > 1 and count() < 50, 'OK', 'Fail') from system.opentelemetry_span_log - array join attribute.keys as name, attribute.values as value - where name = 'clickhouse.query_id' - and operation_name = 'query' + where operation_name = 'query' and parent_span_id = 0 -- only account for the initial queries - and value like '$query_id-%' + and attribute['clickhouse.query_id'] like '$query_id-%' ; " From c1742e3dca1370b54274104e5bf2ea18202ad33f Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Fri, 29 Jan 2021 09:44:03 +0800 Subject: [PATCH 013/931] Merge Master --- tests/queries/0_stateless/01413_rows_events.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01413_rows_events.sql b/tests/queries/0_stateless/01413_rows_events.sql index 1cc45e03416..0a0da9b4b12 100644 --- a/tests/queries/0_stateless/01413_rows_events.sql +++ b/tests/queries/0_stateless/01413_rows_events.sql @@ -6,7 +6,7 @@ SYSTEM FLUSH LOGS; SELECT written_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; -SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; INSERT INTO /* test 01413, query 2 */ rows_events_test VALUES (2,2), (3,3); From 84ffd76853a7a3b4531b22002adaa8d386507b29 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 1 Jan 2021 14:43:11 +0000 Subject: [PATCH 014/931] Initial table sync and replication pre-startup --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 77 ++++++ .../PostgreSQL/PostgreSQLReplicaConsumer.h | 37 +++ .../PostgreSQLReplicationHandler.cpp | 247 ++++++++++++++++++ .../PostgreSQL/PostgreSQLReplicationHandler.h | 74 ++++++ .../PostgreSQLReplicationSettings.cpp | 40 +++ .../PostgreSQLReplicationSettings.h | 21 ++ .../StorageMaterializePostgreSQL.cpp | 113 ++++++++ .../PostgreSQL/StorageMaterializePostgreSQL.h | 40 +++ src/Storages/registerStorages.cpp | 2 + 9 files changed, 651 insertions(+) create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h create mode 100644 src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp create mode 100644 src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp new file mode 100644 index 00000000000..017613500ad --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -0,0 +1,77 @@ +#include "PostgreSQLReplicaConsumer.h" + +#include +#include + +namespace DB +{ + +PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( + const std::string & table_name_, + const std::string & conn_str, + const std::string & replication_slot_name_, + const std::string & publication_name_, + const LSNPosition & start_lsn) + : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) + , replication_slot_name(replication_slot_name_) + , publication_name(publication_name_) + , table_name(table_name_) + , connection(std::make_shared(conn_str)) + , current_lsn(start_lsn) +{ + replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); +} + + +void PostgreSQLReplicaConsumer::run() +{ + auto options = fmt::format(" (\"proto_version\" '1', \"publication_names\" '{}')", publication_name); + startReplication(replication_slot_name, current_lsn.lsn, -1, options); +} + + +void PostgreSQLReplicaConsumer::startReplication( + const std::string & slot_name, const std::string start_lsn, const int64_t /* timeline */, const std::string & plugin_args) +{ + std::string query_str = fmt::format("START_REPLICATION SLOT {} LOGICAL {}", slot_name, start_lsn); + + if (!plugin_args.empty()) + query_str += plugin_args; + + auto tx = std::make_unique(*replication_connection->conn()); + tx->exec(query_str); + + //pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); + //pqxx::result result{tx->exec(query_str)}; + //pqxx::row row{result[0]}; + //for (auto res : row) + //{ + // if (std::size(res)) + // LOG_TRACE(log, "GOT {}", res.as()); + // else + // LOG_TRACE(log, "GOT NULL"); + //} + + // while (true) + // { + // const std::vector * row{stream.read_row()}; + + // if (!row) + // { + // LOG_TRACE(log, "STREAM REPLICATION END"); + // stream.complete(); + // tx->commit(); + // break; + // } + // LOG_TRACE(log, "STARTED REPLICATION. GOT ROW SIZE", row->size()); + + // for (const auto idx : ext::range(0, row->size())) + // { + // auto current = (*row)[idx]; + // LOG_TRACE(log, "Started replication. GOT: {}", current); + // } + + //} +} + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h new file mode 100644 index 00000000000..800f444765a --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include "PostgreSQLReplicationHandler.h" +#include "pqxx/pqxx" + +namespace DB +{ + +class PostgreSQLReplicaConsumer +{ +public: + PostgreSQLReplicaConsumer( + const std::string & table_name_, + const std::string & conn_str_, + const std::string & replication_slot_name_, + const std::string & publication_name_, + const LSNPosition & start_lsn); + + void run(); + +private: + void startReplication( + const std::string & slot_name, const std::string start_lsn, const int64_t timeline, const std::string & plugin_args); + + Poco::Logger * log; + const std::string replication_slot_name; + const std::string publication_name; + + const std::string table_name; + PostgreSQLConnectionPtr connection, replication_connection; + + LSNPosition current_lsn; +}; + +} + diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp new file mode 100644 index 00000000000..7fbcf1d9ff2 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -0,0 +1,247 @@ +#include "PostgreSQLReplicationHandler.h" +#include "PostgreSQLReplicaConsumer.h" + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; + extern const int LOGICAL_ERROR; +} + +PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( + const std::string & database_name_, + const std::string & table_name_, + const std::string & conn_str, + const std::string & replication_slot_, + const std::string & publication_name_) + : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) + , database_name(database_name_) + , table_name(table_name_) + , replication_slot(replication_slot_) + , publication_name(publication_name_) + , connection(std::make_shared(conn_str)) +{ + /// Create a replication connection, through which it is possible to execute only commands from streaming replication protocol + /// interface. Passing 'database' as the value instructs walsender to connect to the database specified in the dbname parameter, + /// which will allow the connection to be used for logical replication from that database. + replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); + + /// Used commands require a specific transaction isolation mode. + replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); + + /// Non temporary replication slot should be the same at restart. + if (replication_slot.empty()) + replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); + + /// Temporary replication slot is used to determine a start lsn position and to acquire a snapshot for initial table synchronization. + temp_replication_slot = replication_slot + "_temp"; +} + + +void PostgreSQLReplicationHandler::startup() +{ + tx = std::make_shared(*connection->conn()); + if (publication_name.empty()) + { + publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); + + if (!isPublicationExist()) + createPublication(); + } + else if (!isPublicationExist()) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Publication name '{}' is spesified in table arguments, but it does not exist", publication_name); + } + tx->commit(); + + startReplication(); +} + + +bool PostgreSQLReplicationHandler::isPublicationExist() +{ + std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); + pqxx::result result{tx->exec(query_str)}; + bool publication_exists = (result[0][0].as() == "t"); + + if (publication_exists) + LOG_TRACE(log, "Publication {} already exists. Using existing version", publication_name); + + return publication_exists; +} + + +void PostgreSQLReplicationHandler::createPublication() +{ + /* * It is also important that change replica identity for this table to be able to receive old values of updated rows: + * ALTER TABLE pgbench_accounts REPLICA IDENTITY FULL; + * * TRUNCATE and DDL are not included in PUBLICATION. + * * 'ONLY' means just a table, without descendants. + */ + std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_name); + try + { + tx->exec(query_str); + LOG_TRACE(log, "Created publication {}", publication_name); + } + catch (pqxx::undefined_table const &) + { + throw Exception(fmt::format("PostgreSQL table {}.{} does not exist", database_name, table_name), ErrorCodes::UNKNOWN_TABLE); + } +} + + +void PostgreSQLReplicationHandler::startReplication() +{ + auto ntx = std::make_shared(*replication_connection->conn()); + + /// But it should not actually exist. May exist if failed to drop it before. + if (isReplicationSlotExist(ntx, temp_replication_slot)) + dropReplicationSlot(ntx, temp_replication_slot, true); + + std::string snapshot_name; + LSNPosition start_lsn; + + createTempReplicationSlot(ntx, start_lsn, snapshot_name); + + loadFromSnapshot(snapshot_name); + + /// Do not need this replication slot anymore (snapshot loaded and start lsn determined, will continue replication protocol + /// with another slot, which should be the same at restart (and reused) to minimize memory usage) + /// Non temporary replication slot should be deleted with drop table only. + LOG_DEBUG(log, "Dropping temporaty replication slot"); + dropReplicationSlot(ntx, temp_replication_slot, true); + + if (!isReplicationSlotExist(ntx, replication_slot)) + createReplicationSlot(ntx); + + PostgreSQLReplicaConsumer consumer( + table_name, + connection->conn_str(), + replication_slot, + publication_name, + start_lsn); + + LOG_DEBUG(log, "Commiting replication transaction"); + ntx->commit(); + + consumer.run(); +} + + +bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name) +{ + std::string query_str = fmt::format("SELECT active, restart_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); + pqxx::result result{ntx->exec(query_str)}; + + /// Replication slot does not exist + if (result.empty()) + return false; + + bool is_active = result[0][0].as(); + LOG_TRACE(log, "Replication slot {} already exists (active: {}). Restart lsn position is {}", + slot_name, is_active, result[0][0].as()); + + return true; +} + + +void PostgreSQLReplicationHandler::createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name) +{ + std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", temp_replication_slot); + try + { + pqxx::result result{ntx->exec(query_str)}; + start_lsn.lsn = result[0][1].as(); + snapshot_name = result[0][2].as(); + LOG_TRACE(log, "Created temporary replication slot: {}, start lsn: {}, snapshot: {}", + temp_replication_slot, start_lsn.lsn, snapshot_name); + } + catch (Exception & e) + { + e.addMessage("while creating PostgreSQL replication slot {}", temp_replication_slot); + throw; + } +} + + +void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx) +{ + std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput", replication_slot); + try + { + pqxx::result result{ntx->exec(query_str)}; + LOG_TRACE(log, "Created replication slot: {}, start lsn: {}", replication_slot, result[0][1].as()); + } + catch (Exception & e) + { + e.addMessage("while creating PostgreSQL replication slot {}", replication_slot); + throw; + } +} + + +void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, std::string & slot_name, bool use_replication_api) +{ + if (use_replication_api) + { + std::string query_str = fmt::format("DROP_REPLICATION_SLOT {}", slot_name); + ntx->exec(query_str); + } + else + { + pqxx::work work(*connection->conn()); + std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); + work.exec(query_str); + work.commit(); + } +} + + +/// Only used when MaterializePostgreSQL table is dropped. +void PostgreSQLReplicationHandler::checkAndDropReplicationSlot() +{ + auto ntx = std::make_shared(*replication_connection->conn()); + if (isReplicationSlotExist(ntx, replication_slot)) + dropReplicationSlot(ntx, replication_slot, false); + ntx->commit(); +} + + +void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) +{ + auto stx = std::make_unique(*connection->conn()); + /// Required to execute the following command. + stx->set_variable("transaction_isolation", "'repeatable read'"); + + std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); + stx->exec(query_str); + + LOG_DEBUG(log, "Created transaction snapshot"); + query_str = fmt::format("SELECT * FROM {}", table_name); + pqxx::result result{stx->exec(query_str)}; + if (!result.empty()) + { + pqxx::row row{result[0]}; + for (auto res : row) + { + if (std::size(res)) + LOG_TRACE(log, "GOT {}", res.as()); + else + LOG_TRACE(log, "GOT NULL"); + } + } + LOG_DEBUG(log, "Done loading from snapshot"); + stx->commit(); +} + + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h new file mode 100644 index 00000000000..06379f2ad4a --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include "pqxx/pqxx" + + +/* Implementation of logical streaming replication protocol: https://www.postgresql.org/docs/10/protocol-logical-replication.html. + */ + +namespace DB +{ + +struct LSNPosition +{ + std::string lsn; + + uint64_t getValue() + { + uint64_t upper_half, lower_half, result; + std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); + result = (upper_half << 32) + lower_half; + LOG_DEBUG(&Poco::Logger::get("LSNParsing"), + "Created replication slot. upper half: {}, lower_half: {}, start lsn: {}", + upper_half, lower_half, result); + return result; + } +}; + + +class PostgreSQLReplicationHandler +{ +public: + friend class PGReplicaLSN; + PostgreSQLReplicationHandler( + const std::string & database_name_, + const std::string & table_name_, + const std::string & conn_str_, + const std::string & replication_slot_name_, + const std::string & publication_name_); + + void startup(); + void checkAndDropReplicationSlot(); + +private: + using NontransactionPtr = std::shared_ptr; + + bool isPublicationExist(); + void createPublication(); + + bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); + void createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name); + void createReplicationSlot(NontransactionPtr ntx); + void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name, bool use_replication_api); + + void startReplication(); + void loadFromSnapshot(std::string & snapshot_name); + + Poco::Logger * log; + const std::string database_name, table_name; + + std::string replication_slot, publication_name; + std::string temp_replication_slot; + + PostgreSQLConnectionPtr connection; + PostgreSQLConnectionPtr replication_connection; + std::shared_ptr tx; + + //LSNPosition start_lsn, final_lsn; +}; + + +} + diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp new file mode 100644 index 00000000000..98173f7ca07 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp @@ -0,0 +1,40 @@ +#include "PostgreSQLReplicationSettings.h" +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNKNOWN_SETTING; +} + +IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) + +void MaterializePostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) +{ + if (storage_def.settings) + { + try + { + applyChanges(storage_def.settings->changes); + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::UNKNOWN_SETTING) + e.addMessage("for storage " + storage_def.engine->name); + throw; + } + } + else + { + auto settings_ast = std::make_shared(); + settings_ast->is_standalone = false; + storage_def.set(storage_def.settings, settings_ast); + } +} +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h new file mode 100644 index 00000000000..c85b3572356 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace DB +{ + class ASTStorage; + + +#define LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS(M) \ + M(String, postgresql_replication_slot_name, "", "PostgreSQL replication slot name.", 0) \ + M(String, postgresql_publication_name, "", "PostgreSQL publication name.", 0) \ + +DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) + +struct MaterializePostgreSQLSettings : public BaseSettings +{ + void loadFromQuery(ASTStorage & storage_def); +}; + +} diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp new file mode 100644 index 00000000000..385eb8a8706 --- /dev/null +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -0,0 +1,113 @@ +#include "StorageMaterializePostgreSQL.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "PostgreSQLReplicationSettings.h" +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( + const StorageID & table_id_, + const String & remote_table_name_, + const ColumnsDescription & columns_, + const ConstraintsDescription & constraints_, + const Context & context_, + std::shared_ptr replication_handler_) + : IStorage(table_id_) + , remote_table_name(remote_table_name_) + , global_context(context_) + , replication_handler(std::move(replication_handler_)) +{ + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns_); + storage_metadata.setConstraints(constraints_); + setInMemoryMetadata(storage_metadata); + +} + + +void StorageMaterializePostgreSQL::startup() +{ + replication_handler->startup(); +} + + +void StorageMaterializePostgreSQL::shutdown() +{ + //replication_handler->dropReplicationSlot(); +} + + +void registerStorageMaterializePostgreSQL(StorageFactory & factory) +{ + auto creator_fn = [](const StorageFactory::Arguments & args) + { + ASTs & engine_args = args.engine_args; + bool has_settings = args.storage_def->settings; + auto postgresql_replication_settings = std::make_unique(); + + if (has_settings) + postgresql_replication_settings->loadFromQuery(*args.storage_def); + + if (engine_args.size() != 5) + throw Exception("Storage MaterializePostgreSQL requires 5 parameters: " + "PostgreSQL('host:port', 'database', 'table', 'username', 'password'", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.local_context); + + auto parsed_host_port = parseAddress(engine_args[0]->as().value.safeGet(), 5432); + const String & remote_table = engine_args[2]->as().value.safeGet(); + const String & remote_database = engine_args[1]->as().value.safeGet(); + + String connection_str; + connection_str = fmt::format("dbname={} host={} port={} user={} password={}", + remote_database, + parsed_host_port.first, std::to_string(parsed_host_port.second), + engine_args[3]->as().value.safeGet(), + engine_args[4]->as().value.safeGet()); + + auto global_context(args.context.getGlobalContext()); + auto replication_slot_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_replication_slot_name.value); + auto publication_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_publication_name.value); + + PostgreSQLReplicationHandler replication_handler(remote_database, remote_table, connection_str, replication_slot_name, publication_name); + + return StorageMaterializePostgreSQL::create( + args.table_id, remote_table, args.columns, args.constraints, global_context, + std::make_shared(replication_handler)); + }; + + factory.registerStorage( + "MaterializePostgreSQL", + creator_fn, + StorageFactory::StorageFeatures{ .supports_settings = true, .source_access_type = AccessType::POSTGRES, + }); +} + +} + diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h new file mode 100644 index 00000000000..ef0eb4e75cd --- /dev/null +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -0,0 +1,40 @@ +#pragma once + +#include "config_core.h" + +#include +#include +#include +#include "PostgreSQLReplicationHandler.h" +#include "pqxx/pqxx" + +namespace DB +{ + +class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage +{ + friend struct ext::shared_ptr_helper; + +public: + StorageMaterializePostgreSQL( + const StorageID & table_id_, + const String & remote_table_name_, + const ColumnsDescription & columns_, + const ConstraintsDescription & constraints_, + const Context & context_, + std::shared_ptr replication_handler_); + + String getName() const override { return "MaterializePostgreSQL"; } + + void startup() override; + void shutdown() override; + +private: + String remote_table_name; + Context global_context; + + std::shared_ptr replication_handler; +}; + +} + diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 0022ee6bd4f..bd32de1c315 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -60,6 +60,7 @@ void registerStorageEmbeddedRocksDB(StorageFactory & factory); #if USE_LIBPQXX void registerStoragePostgreSQL(StorageFactory & factory); +void registerStorageMaterializePostgreSQL(StorageFactory & factory); #endif void registerStorages() @@ -117,6 +118,7 @@ void registerStorages() #if USE_LIBPQXX registerStoragePostgreSQL(factory); + registerStorageMaterializePostgreSQL(factory); #endif } From a1bcc5fb3987e8473a6d115aa33138fa34ea2873 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 19 Jan 2021 15:29:22 +0000 Subject: [PATCH 015/931] Decode replication messages --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 235 ++++++++++++++---- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 11 +- .../PostgreSQLReplicationHandler.cpp | 24 +- .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 +- 4 files changed, 217 insertions(+), 55 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 017613500ad..5010f574555 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -2,9 +2,19 @@ #include #include +#include +#include +#include +#include +#include +#include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( const std::string & table_name_, @@ -23,55 +33,194 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( } +void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, size_t size, String & result) +{ + assert(size > pos + 2); + char current = unhex2(message + pos); + pos += 2; + while (pos < size && current != '\0') + { + result += current; + current = unhex2(message + pos); + pos += 2; + } +} + + +Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos) +{ + assert(size > pos + 8); + Int32 result = (UInt32(unhex2(message + pos)) << 24) + | (UInt32(unhex2(message + pos + 2)) << 16) + | (UInt32(unhex2(message + pos + 4)) << 8) + | (UInt32(unhex2(message + pos + 6))); + pos += 8; + return result; +} + + +Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos) +{ + assert(size > pos + 4); + Int16 result = (UInt32(unhex2(message + pos)) << 8) + | (UInt32(unhex2(message + pos + 2))); + pos += 4; + return result; +} + + +Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos) +{ + assert(size > pos + 2); + Int8 result = unhex2(message + pos); + pos += 2; + return result; +} + + +Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) +{ + assert(size > pos + 16); + Int64 result = (UInt64(unhex4(message + pos)) << 48) + | (UInt64(unhex4(message + pos + 4)) << 32) + | (UInt64(unhex4(message + pos + 8)) << 16) + | (UInt64(unhex4(message + pos + 12))); + pos += 16; + return result; +} + + +void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, size_t /* size */) +{ + Int16 num_columns = readInt16(message, pos); + /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data + LOG_DEBUG(log, "num_columns {}", num_columns); + for (int k = 0; k < num_columns; ++k) + { + char identifier = readInt8(message, pos); + Int32 col_len = readInt32(message, pos); + String result; + for (int i = 0; i < col_len; ++i) + { + result += readInt8(message, pos); + } + LOG_DEBUG(log, "identifier {}, col_len {}, result {}", identifier, col_len, result); + } + //readString(message, pos, size, result); +} + + +void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replication_message, size_t size) +{ + /// Skip '\x' + size_t pos = 2; + char type = readInt8(replication_message, pos); + + LOG_TRACE(log, "TYPE: {}", type); + switch (type) + { + case 'B': // Begin + { + Int64 transaction_end_lsn = readInt64(replication_message, pos); + Int64 transaction_commit_timestamp = readInt64(replication_message, pos); + LOG_DEBUG(log, "transaction lsn {}, transaction commit timespamp {}", + transaction_end_lsn, transaction_commit_timestamp); + break; + } + case 'C': // Commit + { + readInt8(replication_message, pos); + Int64 commit_lsn = readInt64(replication_message, pos); + Int64 transaction_end_lsn = readInt64(replication_message, pos); + /// Since postgres epoch + Int64 transaction_commit_timestamp = readInt64(replication_message, pos); + LOG_DEBUG(log, "commit lsn {}, transaction lsn {}, transaction commit timestamp {}", + commit_lsn, transaction_end_lsn, transaction_commit_timestamp); + break; + } + case 'O': // Origin + break; + case 'R': // Relation + { + Int32 relation_id = readInt32(replication_message, pos); + String relation_namespace, relation_name; + readString(replication_message, pos, size, relation_namespace); + readString(replication_message, pos, size, relation_name); + Int8 replica_identity = readInt8(replication_message, pos); + Int16 num_columns = readInt16(replication_message, pos); + + LOG_DEBUG(log, + "Replication message type 'R', relation_id: {}, namespace: {}, relation name {}, replica identity {}, columns number {}", + relation_id, relation_namespace, relation_name, replica_identity, num_columns); + + Int8 key; + Int32 data_type_id, type_modifier; + for (uint16_t i = 0; i < num_columns; ++i) + { + String column_name; + key = readInt8(replication_message, pos); + readString(replication_message, pos, size, column_name); + data_type_id = readInt32(replication_message, pos); + type_modifier = readInt32(replication_message, pos); + LOG_DEBUG(log, "Key {}, column name {}, data type id {}, type modifier {}", key, column_name, data_type_id, type_modifier); + } + + break; + } + case 'Y': // Type + break; + case 'I': // Insert + { + Int32 relation_id = readInt32(replication_message, pos); + Int8 new_tuple = readInt8(replication_message, pos); + LOG_DEBUG(log, "relationID {}, newTuple {}", relation_id, new_tuple); + readTupleData(replication_message, pos, size); + break; + } + case 'U': // Update + break; + case 'D': // Delete + break; + case 'T': // Truncate + break; + default: + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected byte1 value {} while parsing replication message", type); + } +} + + void PostgreSQLReplicaConsumer::run() { - auto options = fmt::format(" (\"proto_version\" '1', \"publication_names\" '{}')", publication_name); - startReplication(replication_slot_name, current_lsn.lsn, -1, options); -} - - -void PostgreSQLReplicaConsumer::startReplication( - const std::string & slot_name, const std::string start_lsn, const int64_t /* timeline */, const std::string & plugin_args) -{ - std::string query_str = fmt::format("START_REPLICATION SLOT {} LOGICAL {}", slot_name, start_lsn); - - if (!plugin_args.empty()) - query_str += plugin_args; - auto tx = std::make_unique(*replication_connection->conn()); - tx->exec(query_str); + /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. + std::string query_str = fmt::format( + "select data FROM pg_logical_slot_peek_binary_changes(" + "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", + replication_slot_name, publication_name); + pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); - //pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); - //pqxx::result result{tx->exec(query_str)}; - //pqxx::row row{result[0]}; - //for (auto res : row) - //{ - // if (std::size(res)) - // LOG_TRACE(log, "GOT {}", res.as()); - // else - // LOG_TRACE(log, "GOT NULL"); - //} + while (true) + { + const std::vector * row{stream.read_row()}; - // while (true) - // { - // const std::vector * row{stream.read_row()}; + if (!row) + { + LOG_TRACE(log, "STREAM REPLICATION END"); + stream.complete(); + tx->commit(); + break; + } - // if (!row) - // { - // LOG_TRACE(log, "STREAM REPLICATION END"); - // stream.complete(); - // tx->commit(); - // break; - // } - // LOG_TRACE(log, "STARTED REPLICATION. GOT ROW SIZE", row->size()); - - // for (const auto idx : ext::range(0, row->size())) - // { - // auto current = (*row)[idx]; - // LOG_TRACE(log, "Started replication. GOT: {}", current); - // } - - //} + for (const auto idx : ext::range(0, row->size())) + { + LOG_TRACE(log, "Replication message: {}", (*row)[idx]); + decodeReplicationMessage((*row)[idx].c_str(), (*row)[idx].size()); + } + } } + } + + diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 800f444765a..e2833676412 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -1,6 +1,6 @@ #pragma once -#include +#include "PostgreSQLConnection.h" #include "PostgreSQLReplicationHandler.h" #include "pqxx/pqxx" @@ -18,10 +18,19 @@ public: const LSNPosition & start_lsn); void run(); + void createSubscription(); private: + void readString(const char * message, size_t & pos, size_t size, String & result); + Int64 readInt64(const char * message, size_t & pos); + Int32 readInt32(const char * message, size_t & pos); + Int16 readInt16(const char * message, size_t & pos); + Int8 readInt8(const char * message, size_t & pos); + void readTupleData(const char * message, size_t & pos, size_t size); + void startReplication( const std::string & slot_name, const std::string start_lsn, const int64_t timeline, const std::string & plugin_args); + void decodeReplicationMessage(const char * replication_message, size_t size); Poco::Logger * log; const std::string replication_slot_name; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 7fbcf1d9ff2..befaf17db11 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -35,11 +35,11 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( /// Used commands require a specific transaction isolation mode. replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - /// Non temporary replication slot should be the same at restart. + /// Non temporary replication slot. Should be the same at restart. if (replication_slot.empty()) replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); - /// Temporary replication slot is used to determine a start lsn position and to acquire a snapshot for initial table synchronization. + /// Temporary replication slot is used to acquire a snapshot for initial table synchronization and to determine starting lsn position. temp_replication_slot = replication_slot + "_temp"; } @@ -51,6 +51,8 @@ void PostgreSQLReplicationHandler::startup() { publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); + /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL + /// table is dropped. if (!isPublicationExist()) createPublication(); } @@ -70,6 +72,7 @@ bool PostgreSQLReplicationHandler::isPublicationExist() { std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); pqxx::result result{tx->exec(query_str)}; + assert(!result.empty()); bool publication_exists = (result[0][0].as() == "t"); if (publication_exists) @@ -81,11 +84,7 @@ bool PostgreSQLReplicationHandler::isPublicationExist() void PostgreSQLReplicationHandler::createPublication() { - /* * It is also important that change replica identity for this table to be able to receive old values of updated rows: - * ALTER TABLE pgbench_accounts REPLICA IDENTITY FULL; - * * TRUNCATE and DDL are not included in PUBLICATION. - * * 'ONLY' means just a table, without descendants. - */ + /// 'ONLY' means just a table, without descendants. std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_name); try { @@ -96,6 +95,10 @@ void PostgreSQLReplicationHandler::createPublication() { throw Exception(fmt::format("PostgreSQL table {}.{} does not exist", database_name, table_name), ErrorCodes::UNKNOWN_TABLE); } + + /// TODO: check replica identity + /// Requires changed replica identity for included table to be able to receive old values of updated rows. + /// (ALTER TABLE table_name REPLICA IDENTITY FULL) } @@ -103,7 +106,7 @@ void PostgreSQLReplicationHandler::startReplication() { auto ntx = std::make_shared(*replication_connection->conn()); - /// But it should not actually exist. May exist if failed to drop it before. + /// Normally temporary replication slot should not exist. if (isReplicationSlotExist(ntx, temp_replication_slot)) dropReplicationSlot(ntx, temp_replication_slot, true); @@ -116,10 +119,9 @@ void PostgreSQLReplicationHandler::startReplication() /// Do not need this replication slot anymore (snapshot loaded and start lsn determined, will continue replication protocol /// with another slot, which should be the same at restart (and reused) to minimize memory usage) - /// Non temporary replication slot should be deleted with drop table only. - LOG_DEBUG(log, "Dropping temporaty replication slot"); dropReplicationSlot(ntx, temp_replication_slot, true); + /// Non temporary replication slot should be deleted with drop table only. if (!isReplicationSlotExist(ntx, replication_slot)) createReplicationSlot(ntx); @@ -203,6 +205,8 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, st work.exec(query_str); work.commit(); } + + LOG_TRACE(log, "Replication slot {} is dropped", slot_name); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 06379f2ad4a..3dc94b0c776 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include "PostgreSQLConnection.h" #include "pqxx/pqxx" From 60ebb86d9745b7c2b9026b764efdfa1e4208369e Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 27 Jan 2021 15:29:28 +0000 Subject: [PATCH 016/931] Add stream and buffer classes --- .../PostgreSQLReplicaBlockInputStream.cpp | 149 ++++++++++++++++++ .../PostgreSQLReplicaBlockInputStream.h | 47 ++++++ .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 9 ++ .../PostgreSQL/PostgreSQLReplicaConsumer.h | 6 + .../PostgreSQLReplicaConsumerBuffer.cpp | 38 +++++ .../PostgreSQLReplicaConsumerBuffer.h | 39 +++++ .../PostgreSQLReplicationHandler.cpp | 3 + .../PostgreSQL/PostgreSQLReplicationHandler.h | 3 + .../PostgreSQLReplicationSettings.cpp | 4 +- .../PostgreSQLReplicationSettings.h | 6 +- .../PostgreSQL/StorageMaterializePostgreSQL.h | 40 ----- ...reSQL.cpp => StoragePostgreSQLReplica.cpp} | 54 +++++-- .../PostgreSQL/StoragePostgreSQLReplica.h | 57 +++++++ src/Storages/PostgreSQL/buffer_fwd.h | 9 ++ src/Storages/registerStorages.cpp | 4 +- 15 files changed, 406 insertions(+), 62 deletions(-) create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h delete mode 100644 src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h rename src/Storages/PostgreSQL/{StorageMaterializePostgreSQL.cpp => StoragePostgreSQLReplica.cpp} (66%) create mode 100644 src/Storages/PostgreSQL/StoragePostgreSQLReplica.h create mode 100644 src/Storages/PostgreSQL/buffer_fwd.h diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp new file mode 100644 index 00000000000..04ee68eb3aa --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp @@ -0,0 +1,149 @@ +#include "PostgreSQLReplicaBlockInputStream.h" + +#include +#include +#include + + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace DB +{ + +PostgreSQLReplicaBlockInputStream::PostgreSQLReplicaBlockInputStream( + StoragePostgreSQLReplica & storage_, + ConsumerBufferPtr buffer_, + const StorageMetadataPtr & metadata_snapshot_, + std::shared_ptr context_, + const Names & columns, + size_t max_block_size_) + : storage(storage_) + , buffer(buffer_) + , metadata_snapshot(metadata_snapshot_) + , context(context_) + , column_names(columns) + , max_block_size(max_block_size_) + , non_virtual_header(metadata_snapshot->getSampleBlockNonMaterialized()) + , sample_block(non_virtual_header) + , virtual_header(metadata_snapshot->getSampleBlockForColumns({}, storage.getVirtuals(), storage.getStorageID())) +{ + for (const auto & column : virtual_header) + sample_block.insert(column); +} + + +PostgreSQLReplicaBlockInputStream::~PostgreSQLReplicaBlockInputStream() +{ +} + + +void PostgreSQLReplicaBlockInputStream::readPrefixImpl() +{ +} + + +Block PostgreSQLReplicaBlockInputStream::readImpl() +{ + if (!buffer || finished) + return Block(); + + finished = true; + + MutableColumns result_columns = non_virtual_header.cloneEmptyColumns(); + MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); + + auto input_format = FormatFactory::instance().getInputFormat( + "Values", *buffer, non_virtual_header, *context, max_block_size); + + InputPort port(input_format->getPort().getHeader(), input_format.get()); + connect(input_format->getPort(), port); + port.setNeeded(); + + auto read_rabbitmq_message = [&] + { + size_t new_rows = 0; + + while (true) + { + auto status = input_format->prepare(); + + switch (status) + { + case IProcessor::Status::Ready: + input_format->work(); + break; + + case IProcessor::Status::Finished: + input_format->resetParser(); + return new_rows; + + case IProcessor::Status::PortFull: + { + auto chunk = port.pull(); + + auto chunk_rows = chunk.getNumRows(); + new_rows += chunk_rows; + + auto columns = chunk.detachColumns(); + + for (size_t i = 0, s = columns.size(); i < s; ++i) + { + result_columns[i]->insertRangeFrom(*columns[i], 0, columns[i]->size()); + } + break; + } + case IProcessor::Status::NeedData: + case IProcessor::Status::Async: + case IProcessor::Status::ExpandPipeline: + throw Exception("Source processor returned status " + IProcessor::statusToName(status), ErrorCodes::LOGICAL_ERROR); + } + } + }; + + size_t total_rows = 0; + + while (true) + { + if (buffer->eof()) + break; + + auto new_rows = read_rabbitmq_message(); + + if (new_rows) + { + //auto timestamp = buffer->getTimestamp(); + //for (size_t i = 0; i < new_rows; ++i) + //{ + // virtual_columns[0]->insert(timestamp); + //} + + total_rows = total_rows + new_rows; + } + + buffer->allowNext(); + + if (total_rows >= max_block_size || !checkTimeLimit()) + break; + } + + if (total_rows == 0) + return Block(); + + auto result_block = non_virtual_header.cloneWithColumns(std::move(result_columns)); + auto virtual_block = virtual_header.cloneWithColumns(std::move(virtual_columns)); + + for (const auto & column : virtual_block.getColumnsWithTypeAndName()) + result_block.insert(column); + + return result_block; +} + + +void PostgreSQLReplicaBlockInputStream::readSuffixImpl() +{ +} + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h new file mode 100644 index 00000000000..995c640682a --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include "StoragePostgreSQLReplica.h" +#include "PostgreSQLReplicaConsumerBuffer.h" +#include "buffer_fwd.h" + + +namespace DB +{ + +class PostgreSQLReplicaBlockInputStream : public IBlockInputStream +{ + +public: + PostgreSQLReplicaBlockInputStream( + StoragePostgreSQLReplica & storage_, + ConsumerBufferPtr buffer_, + const StorageMetadataPtr & metadata_snapshot_, + std::shared_ptr context_, + const Names & columns, + size_t max_block_size_); + + ~PostgreSQLReplicaBlockInputStream() override; + + String getName() const override { return storage.getName(); } + Block getHeader() const override { return sample_block; } + + void readPrefixImpl() override; + Block readImpl() override; + void readSuffixImpl() override; + +private: + StoragePostgreSQLReplica & storage; + ConsumerBufferPtr buffer; + StorageMetadataPtr metadata_snapshot; + std::shared_ptr context; + Names column_names; + const size_t max_block_size; + + bool finished = false; + const Block non_virtual_header; + Block sample_block; + const Block virtual_header; +}; + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 5010f574555..1d432bfa5cb 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -17,12 +17,14 @@ namespace ErrorCodes } PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( + Context & context_, const std::string & table_name_, const std::string & conn_str, const std::string & replication_slot_name_, const std::string & publication_name_, const LSNPosition & start_lsn) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) + , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) , table_name(table_name_) @@ -30,6 +32,9 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( , current_lsn(start_lsn) { replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); + + wal_reader_task = context.getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ WALReaderFunc(); }); + wal_reader_task->deactivate(); } @@ -221,6 +226,10 @@ void PostgreSQLReplicaConsumer::run() } +void PostgreSQLReplicaConsumer::WALReaderFunc() +{ +} + } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index e2833676412..d64c11d72c4 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -2,6 +2,7 @@ #include "PostgreSQLConnection.h" #include "PostgreSQLReplicationHandler.h" +#include #include "pqxx/pqxx" namespace DB @@ -11,6 +12,7 @@ class PostgreSQLReplicaConsumer { public: PostgreSQLReplicaConsumer( + Context & context_, const std::string & table_name_, const std::string & conn_str_, const std::string & replication_slot_name_, @@ -32,7 +34,10 @@ private: const std::string & slot_name, const std::string start_lsn, const int64_t timeline, const std::string & plugin_args); void decodeReplicationMessage(const char * replication_message, size_t size); + void WALReaderFunc(); + Poco::Logger * log; + Context & context; const std::string replication_slot_name; const std::string publication_name; @@ -40,6 +45,7 @@ private: PostgreSQLConnectionPtr connection, replication_connection; LSNPosition current_lsn; + BackgroundSchedulePool::TaskHolder wal_reader_task; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp new file mode 100644 index 00000000000..e8c4ba3d55a --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp @@ -0,0 +1,38 @@ +#include "PostgreSQLReplicaConsumerBuffer.h" + + +namespace DB +{ + +PostgreSQLReplicaConsumerBuffer::PostgreSQLReplicaConsumerBuffer( + uint64_t max_block_size_) + : ReadBuffer(nullptr, 0) + , rows_data(max_block_size_) +{ +} + + +PostgreSQLReplicaConsumerBuffer::~PostgreSQLReplicaConsumerBuffer() +{ + BufferBase::set(nullptr, 0, 0); +} + + +bool PostgreSQLReplicaConsumerBuffer::nextImpl() +{ + if (!allowed) + return false; + + if (rows_data.tryPop(current_row_data)) + { + auto * new_position = const_cast(current_row_data.data.data()); + BufferBase::set(new_position, current_row_data.data.size(), 0); + allowed = false; + + return true; + } + + return false; +} + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h new file mode 100644 index 00000000000..8c8de3a8b68 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "buffer_fwd.h" + + +namespace DB +{ + +class PostgreSQLReplicaConsumerBuffer : public ReadBuffer +{ + +public: + PostgreSQLReplicaConsumerBuffer( + uint64_t max_block_size_); + + ~PostgreSQLReplicaConsumerBuffer() override; + + void allowNext() { allowed = true; } + +private: + bool nextImpl() override; + + struct RowData + { + String data; + RowData() : data("") {} + }; + + RowData current_row_data; + ConcurrentBoundedQueue rows_data; + bool allowed = true; +}; + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index befaf17db11..18b6aca5de1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -15,12 +15,14 @@ namespace ErrorCodes } PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( + Context & context_, const std::string & database_name_, const std::string & table_name_, const std::string & conn_str, const std::string & replication_slot_, const std::string & publication_name_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) + , context(context_) , database_name(database_name_) , table_name(table_name_) , replication_slot(replication_slot_) @@ -126,6 +128,7 @@ void PostgreSQLReplicationHandler::startReplication() createReplicationSlot(ntx); PostgreSQLReplicaConsumer consumer( + context, table_name, connection->conn_str(), replication_slot, diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 3dc94b0c776..55ea8dbae41 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -2,6 +2,7 @@ #include #include "PostgreSQLConnection.h" +#include #include "pqxx/pqxx" @@ -33,6 +34,7 @@ class PostgreSQLReplicationHandler public: friend class PGReplicaLSN; PostgreSQLReplicationHandler( + Context & context_, const std::string & database_name_, const std::string & table_name_, const std::string & conn_str_, @@ -57,6 +59,7 @@ private: void loadFromSnapshot(std::string & snapshot_name); Poco::Logger * log; + Context & context; const std::string database_name, table_name; std::string replication_slot, publication_name; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp index 98173f7ca07..fa5ebb0edf3 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp @@ -13,9 +13,9 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) +IMPLEMENT_SETTINGS_TRAITS(PostgreSQLReplicationSettingsTraits, LIST_OF_POSTGRESQL_REPLICATION_SETTINGS) -void MaterializePostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) +void PostgreSQLReplicationSettings::loadFromQuery(ASTStorage & storage_def) { if (storage_def.settings) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h index c85b3572356..1c3ca6ff73d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h @@ -7,13 +7,13 @@ namespace DB class ASTStorage; -#define LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS(M) \ +#define LIST_OF_POSTGRESQL_REPLICATION_SETTINGS(M) \ M(String, postgresql_replication_slot_name, "", "PostgreSQL replication slot name.", 0) \ M(String, postgresql_publication_name, "", "PostgreSQL publication name.", 0) \ -DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) +DECLARE_SETTINGS_TRAITS(PostgreSQLReplicationSettingsTraits, LIST_OF_POSTGRESQL_REPLICATION_SETTINGS) -struct MaterializePostgreSQLSettings : public BaseSettings +struct PostgreSQLReplicationSettings : public BaseSettings { void loadFromQuery(ASTStorage & storage_def); }; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h deleted file mode 100644 index ef0eb4e75cd..00000000000 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#include "config_core.h" - -#include -#include -#include -#include "PostgreSQLReplicationHandler.h" -#include "pqxx/pqxx" - -namespace DB -{ - -class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage -{ - friend struct ext::shared_ptr_helper; - -public: - StorageMaterializePostgreSQL( - const StorageID & table_id_, - const String & remote_table_name_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const Context & context_, - std::shared_ptr replication_handler_); - - String getName() const override { return "MaterializePostgreSQL"; } - - void startup() override; - void shutdown() override; - -private: - String remote_table_name; - Context global_context; - - std::shared_ptr replication_handler; -}; - -} - diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp similarity index 66% rename from src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp rename to src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 385eb8a8706..657f8e5aa47 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -1,9 +1,10 @@ -#include "StorageMaterializePostgreSQL.h" +#include "StoragePostgreSQLReplica.h" #include #include + #include -#include + #include #include #include @@ -12,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +21,7 @@ #include #include "PostgreSQLReplicationSettings.h" #include +#include "PostgreSQLReplicaBlockInputStream.h" namespace DB @@ -29,51 +32,66 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( +StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, const String & remote_table_name_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const Context & context_, - std::shared_ptr replication_handler_) + const PostgreSQLReplicationHandler & replication_handler_, + std::unique_ptr replication_settings_) : IStorage(table_id_) , remote_table_name(remote_table_name_) , global_context(context_) - , replication_handler(std::move(replication_handler_)) + , replication_settings(std::move(replication_settings_)) + , replication_handler(std::make_unique(replication_handler_)) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - } -void StorageMaterializePostgreSQL::startup() +void StoragePostgreSQLReplica::startup() { replication_handler->startup(); } -void StorageMaterializePostgreSQL::shutdown() +void StoragePostgreSQLReplica::shutdown() { //replication_handler->dropReplicationSlot(); } -void registerStorageMaterializePostgreSQL(StorageFactory & factory) +Pipe StoragePostgreSQLReplica::read( + const Names & column_names, + const StorageMetadataPtr & metadata_snapshot, + SelectQueryInfo & /* query_info */, + const Context & /* context */, + QueryProcessingStage::Enum /* processed_stage */, + size_t /* max_block_size */, + unsigned /* num_streams */) +{ + auto sample_block = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID()); + return Pipe(); +} + + +void registerStoragePostgreSQLReplica(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; bool has_settings = args.storage_def->settings; - auto postgresql_replication_settings = std::make_unique(); + auto postgresql_replication_settings = std::make_unique(); if (has_settings) postgresql_replication_settings->loadFromQuery(*args.storage_def); if (engine_args.size() != 5) - throw Exception("Storage MaterializePostgreSQL requires 5 parameters: " + throw Exception("Storage PostgreSQLReplica requires 5 parameters: " "PostgreSQL('host:port', 'database', 'table', 'username', 'password'", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); @@ -95,19 +113,25 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) auto replication_slot_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_replication_slot_name.value); auto publication_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_publication_name.value); - PostgreSQLReplicationHandler replication_handler(remote_database, remote_table, connection_str, replication_slot_name, publication_name); + PostgreSQLReplicationHandler replication_handler(global_context, remote_database, remote_table, connection_str, replication_slot_name, publication_name); - return StorageMaterializePostgreSQL::create( + return StoragePostgreSQLReplica::create( args.table_id, remote_table, args.columns, args.constraints, global_context, - std::make_shared(replication_handler)); + replication_handler, std::move(postgresql_replication_settings)); }; factory.registerStorage( - "MaterializePostgreSQL", + "PostgreSQLReplica", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, .source_access_type = AccessType::POSTGRES, }); } +NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const +{ + return NamesAndTypesList{ + }; +} + } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h new file mode 100644 index 00000000000..860ab8f6d6c --- /dev/null +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -0,0 +1,57 @@ +#pragma once + +#include "config_core.h" + +#include +#include +#include +#include "PostgreSQLReplicationHandler.h" +#include "PostgreSQLReplicationSettings.h" +#include "buffer_fwd.h" +#include "pqxx/pqxx" + +namespace DB +{ + +class StoragePostgreSQLReplica final : public ext::shared_ptr_helper, public IStorage +{ + friend struct ext::shared_ptr_helper; + +public: + String getName() const override { return "PostgreSQLReplica"; } + + void startup() override; + void shutdown() override; + + NamesAndTypesList getVirtuals() const override; + + Pipe read( + const Names & column_names, + const StorageMetadataPtr & metadata_snapshot, + SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) override; + + +protected: + StoragePostgreSQLReplica( + const StorageID & table_id_, + const String & remote_table_name_, + const ColumnsDescription & columns_, + const ConstraintsDescription & constraints_, + const Context & context_, + const PostgreSQLReplicationHandler & replication_handler_, + std::unique_ptr replication_settings_); + +private: + String remote_table_name; + Context global_context; + + std::unique_ptr replication_settings; + std::unique_ptr replication_handler; +}; + +} + diff --git a/src/Storages/PostgreSQL/buffer_fwd.h b/src/Storages/PostgreSQL/buffer_fwd.h new file mode 100644 index 00000000000..40ffd64aad3 --- /dev/null +++ b/src/Storages/PostgreSQL/buffer_fwd.h @@ -0,0 +1,9 @@ +#pragma once + +namespace DB +{ + +class PostgreSQLReplicaConsumerBuffer; +using ConsumerBufferPtr = std::shared_ptr; + +} diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index bd32de1c315..0f5a3acaa86 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -60,7 +60,7 @@ void registerStorageEmbeddedRocksDB(StorageFactory & factory); #if USE_LIBPQXX void registerStoragePostgreSQL(StorageFactory & factory); -void registerStorageMaterializePostgreSQL(StorageFactory & factory); +void registerStoragePostgreSQLReplica(StorageFactory & factory); #endif void registerStorages() @@ -118,7 +118,7 @@ void registerStorages() #if USE_LIBPQXX registerStoragePostgreSQL(factory); - registerStorageMaterializePostgreSQL(factory); + registerStoragePostgreSQLReplica(factory); #endif } From b842ce114bc1c163cd1a7a67d1de8687ce136015 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 27 Jan 2021 21:46:19 +0000 Subject: [PATCH 017/931] Initial sync into ReplacingMergeTree table, select via nested table --- .../compose/docker_compose_postgres.yml | 1 + src/Core/Settings.h | 1 + .../PostgreSQLBlockInputStream.cpp | 5 +- src/DataStreams/PostgreSQLBlockInputStream.h | 4 +- .../PostgreSQLDictionarySource.cpp | 15 +- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 113 ++++++--- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 45 +++- .../PostgreSQLReplicationHandler.cpp | 113 +++++---- .../PostgreSQL/PostgreSQLReplicationHandler.h | 28 +-- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 226 ++++++++++++++---- .../PostgreSQL/StoragePostgreSQLReplica.h | 29 ++- src/Storages/StoragePostgreSQL.cpp | 3 +- .../__init__.py | 0 .../configs/log_conf.xml | 11 + .../test_storage_postgresql_replica/test.py | 87 +++++++ .../test_postgresql_replica.reference | 50 ++++ 16 files changed, 575 insertions(+), 156 deletions(-) create mode 100644 tests/integration/test_storage_postgresql_replica/__init__.py create mode 100644 tests/integration/test_storage_postgresql_replica/configs/log_conf.xml create mode 100644 tests/integration/test_storage_postgresql_replica/test.py create mode 100644 tests/integration/test_storage_postgresql_replica/test_postgresql_replica.reference diff --git a/docker/test/integration/runner/compose/docker_compose_postgres.yml b/docker/test/integration/runner/compose/docker_compose_postgres.yml index fff4fb1fa42..7b3bee8de08 100644 --- a/docker/test/integration/runner/compose/docker_compose_postgres.yml +++ b/docker/test/integration/runner/compose/docker_compose_postgres.yml @@ -7,6 +7,7 @@ services: POSTGRES_PASSWORD: mysecretpassword ports: - 5432:5432 + command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=5"] networks: default: aliases: diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 9bb9ad30f15..8c830f2dac1 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -60,6 +60,7 @@ class IColumn; M(Milliseconds, replace_running_query_max_wait_ms, 5000, "The wait time for running query with the same query_id to finish when setting 'replace_running_query' is active.", 0) \ M(Milliseconds, kafka_max_wait_ms, 5000, "The wait time for reading from Kafka before retry.", 0) \ M(Milliseconds, rabbitmq_max_wait_ms, 5000, "The wait time for reading from RabbitMQ before retry.", 0) \ + M(UInt64, postgresql_replica_max_rows_to_insert, 65536, "Maximum number of rows in PostgreSQL batch insertion in PostgreSQLReplica storage engine", 0) \ M(UInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.", 0) \ M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \ M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \ diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index da6a83fb930..4646a8a9d32 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -28,13 +28,13 @@ namespace ErrorCodes } PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( - ConnectionPtr connection_, + std::unique_ptr tx_, const std::string & query_str_, const Block & sample_block, const UInt64 max_block_size_) : query_str(query_str_) , max_block_size(max_block_size_) - , connection(connection_) + , tx(std::move(tx_)) { description.init(sample_block); for (const auto idx : ext::range(0, description.sample_block.columns())) @@ -48,7 +48,6 @@ PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( void PostgreSQLBlockInputStream::readPrefix() { - tx = std::make_unique(*connection); stream = std::make_unique(*tx, pqxx::from_query, std::string_view(query_str)); } diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index b88c81cca0a..1e52b48c7cf 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -20,7 +20,7 @@ class PostgreSQLBlockInputStream : public IBlockInputStream { public: PostgreSQLBlockInputStream( - ConnectionPtr connection_, + std::unique_ptr tx_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); @@ -48,7 +48,7 @@ private: ExternalResultDescription description; ConnectionPtr connection; - std::unique_ptr tx; + std::unique_ptr tx; std::unique_ptr stream; struct ArrayInfo diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index aa852404750..8ede0bc8813 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -68,8 +68,9 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar BlockInputStreamPtr PostgreSQLDictionarySource::loadAll() { LOG_TRACE(log, load_all_query); + auto tx = std::make_unique(*connection->conn()); return std::make_shared( - connection->conn(), load_all_query, sample_block, max_block_size); + std::move(tx), load_all_query, sample_block, max_block_size); } @@ -77,20 +78,23 @@ BlockInputStreamPtr PostgreSQLDictionarySource::loadUpdatedAll() { auto load_update_query = getUpdateFieldAndDate(); LOG_TRACE(log, load_update_query); - return std::make_shared(connection->conn(), load_update_query, sample_block, max_block_size); + auto tx = std::make_unique(*connection->conn()); + return std::make_shared(std::move(tx), load_update_query, sample_block, max_block_size); } BlockInputStreamPtr PostgreSQLDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); - return std::make_shared(connection->conn(), query, sample_block, max_block_size); + auto tx = std::make_unique(*connection->conn()); + return std::make_shared(std::move(tx), query, sample_block, max_block_size); } BlockInputStreamPtr PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); - return std::make_shared(connection->conn(), query, sample_block, max_block_size); + auto tx = std::make_unique(*connection->conn()); + return std::make_shared(std::move(tx), query, sample_block, max_block_size); } @@ -112,7 +116,8 @@ std::string PostgreSQLDictionarySource::doInvalidateQuery(const std::string & re Block invalidate_sample_block; ColumnPtr column(ColumnString::create()); invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared(), "Sample Block")); - PostgreSQLBlockInputStream block_input_stream(connection->conn(), request, invalidate_sample_block, 1); + auto tx = std::make_unique(*connection->conn()); + PostgreSQLBlockInputStream block_input_stream(std::move(tx), request, invalidate_sample_block, 1); return readInvalidateQuery(block_input_stream); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 1d432bfa5cb..076863eb8dd 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { @@ -16,8 +17,12 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +static const auto wal_reader_reschedule_ms = 500; +static const auto max_thread_work_duration_ms = 60000; +static const auto max_empty_slot_reads = 20; + PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( - Context & context_, + std::shared_ptr context_, const std::string & table_name_, const std::string & conn_str, const std::string & replication_slot_name_, @@ -33,11 +38,56 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( { replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); - wal_reader_task = context.getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ WALReaderFunc(); }); + wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ WALReaderFunc(); }); wal_reader_task->deactivate(); } +void PostgreSQLReplicaConsumer::startSynchronization() +{ + //wal_reader_task->activateAndSchedule(); +} + + +void PostgreSQLReplicaConsumer::stopSynchronization() +{ + stop_synchronization.store(true); + if (wal_reader_task) + wal_reader_task->deactivate(); +} + + +void PostgreSQLReplicaConsumer::WALReaderFunc() +{ + size_t count_empty_slot_reads = 0; + auto start_time = std::chrono::steady_clock::now(); + + LOG_TRACE(log, "Starting synchronization thread"); + + while (!stop_synchronization) + { + if (!readFromReplicationSlot() && ++count_empty_slot_reads == max_empty_slot_reads) + { + LOG_TRACE(log, "Reschedule synchronization. Replication slot is empty."); + break; + } + else + count_empty_slot_reads = 0; + + auto end_time = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + if (duration.count() > max_thread_work_duration_ms) + { + LOG_TRACE(log, "Reschedule synchronization. Thread work duration limit exceeded."); + break; + } + } + + if (!stop_synchronization) + wal_reader_task->scheduleAfter(wal_reader_reschedule_ms); +} + + void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, size_t size, String & result) { assert(size > pos + 2); @@ -195,39 +245,48 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio } -void PostgreSQLReplicaConsumer::run() +/// Read binary changes from replication slot via copy command. +bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { - auto tx = std::make_unique(*replication_connection->conn()); - /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. - std::string query_str = fmt::format( - "select data FROM pg_logical_slot_peek_binary_changes(" - "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", - replication_slot_name, publication_name); - pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); - - while (true) + bool slot_empty = true; + try { - const std::vector * row{stream.read_row()}; + auto tx = std::make_unique(*replication_connection->conn()); + /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. + std::string query_str = fmt::format( + "select data FROM pg_logical_slot_peek_binary_changes(" + "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", + replication_slot_name, publication_name); + pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); - if (!row) + while (true) { - LOG_TRACE(log, "STREAM REPLICATION END"); - stream.complete(); - tx->commit(); - break; - } + const std::vector * row{stream.read_row()}; - for (const auto idx : ext::range(0, row->size())) - { - LOG_TRACE(log, "Replication message: {}", (*row)[idx]); - decodeReplicationMessage((*row)[idx].c_str(), (*row)[idx].size()); + if (!row) + { + LOG_TRACE(log, "STREAM REPLICATION END"); + stream.complete(); + tx->commit(); + break; + } + + slot_empty = false; + + for (const auto idx : ext::range(0, row->size())) + { + LOG_TRACE(log, "Replication message: {}", (*row)[idx]); + decodeReplicationMessage((*row)[idx].c_str(), (*row)[idx].size()); + } } } -} + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + return false; + } - -void PostgreSQLReplicaConsumer::WALReaderFunc() -{ + return !slot_empty; } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index d64c11d72c4..bb4b4c5033b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -1,43 +1,63 @@ #pragma once #include "PostgreSQLConnection.h" -#include "PostgreSQLReplicationHandler.h" #include +#include #include "pqxx/pqxx" namespace DB { +struct LSNPosition +{ + std::string lsn; + + uint64_t getValue() + { + uint64_t upper_half, lower_half, result; + std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); + result = (upper_half << 32) + lower_half; + //LOG_DEBUG(&Poco::Logger::get("LSNParsing"), + // "Created replication slot. upper half: {}, lower_half: {}, start lsn: {}", + // upper_half, lower_half, result); + return result; + } +}; + + class PostgreSQLReplicaConsumer { public: PostgreSQLReplicaConsumer( - Context & context_, + std::shared_ptr context_, const std::string & table_name_, const std::string & conn_str_, const std::string & replication_slot_name_, const std::string & publication_name_, const LSNPosition & start_lsn); - void run(); - void createSubscription(); + /// Start reading WAL from current_lsn position. Initial data sync from created snapshot already done. + void startSynchronization(); + void stopSynchronization(); private: + /// Executed by wal_reader_task. A separate thread reads wal and advances lsn when rows were written via copyData. + void WALReaderFunc(); + + /// Start changes stream from WAL via copy command (up to max_block_size changes). + bool readFromReplicationSlot(); + void decodeReplicationMessage(const char * replication_message, size_t size); + + /// Methods to parse replication message data. + void readTupleData(const char * message, size_t & pos, size_t size); void readString(const char * message, size_t & pos, size_t size, String & result); Int64 readInt64(const char * message, size_t & pos); Int32 readInt32(const char * message, size_t & pos); Int16 readInt16(const char * message, size_t & pos); Int8 readInt8(const char * message, size_t & pos); - void readTupleData(const char * message, size_t & pos, size_t size); - - void startReplication( - const std::string & slot_name, const std::string start_lsn, const int64_t timeline, const std::string & plugin_args); - void decodeReplicationMessage(const char * replication_message, size_t size); - - void WALReaderFunc(); Poco::Logger * log; - Context & context; + std::shared_ptr context; const std::string replication_slot_name; const std::string publication_name; @@ -46,6 +66,7 @@ private: LSNPosition current_lsn; BackgroundSchedulePool::TaskHolder wal_reader_task; + std::atomic stop_synchronization = false; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 18b6aca5de1..f6571ce5a2e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -1,8 +1,14 @@ #include "PostgreSQLReplicationHandler.h" #include "PostgreSQLReplicaConsumer.h" +#include #include #include +#include +#include +#include +#include +#include namespace DB { @@ -15,14 +21,12 @@ namespace ErrorCodes } PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( - Context & context_, const std::string & database_name_, const std::string & table_name_, const std::string & conn_str, const std::string & replication_slot_, const std::string & publication_name_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) - , context(context_) , database_name(database_name_) , table_name(table_name_) , replication_slot(replication_slot_) @@ -46,8 +50,10 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } -void PostgreSQLReplicationHandler::startup() +void PostgreSQLReplicationHandler::startup(StoragePtr storage, std::shared_ptr context_) { + helper_table = storage; + context = context_; tx = std::make_shared(*connection->conn()); if (publication_name.empty()) { @@ -70,6 +76,13 @@ void PostgreSQLReplicationHandler::startup() } +void PostgreSQLReplicationHandler::shutdown() +{ + if (consumer) + consumer->stopSynchronization(); +} + + bool PostgreSQLReplicationHandler::isPublicationExist() { std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); @@ -115,19 +128,20 @@ void PostgreSQLReplicationHandler::startReplication() std::string snapshot_name; LSNPosition start_lsn; - createTempReplicationSlot(ntx, start_lsn, snapshot_name); - - loadFromSnapshot(snapshot_name); - - /// Do not need this replication slot anymore (snapshot loaded and start lsn determined, will continue replication protocol - /// with another slot, which should be the same at restart (and reused) to minimize memory usage) - dropReplicationSlot(ntx, temp_replication_slot, true); - - /// Non temporary replication slot should be deleted with drop table only. + /// Non temporary replication slot should be deleted with drop table only and created only once, reused after detach. if (!isReplicationSlotExist(ntx, replication_slot)) + { + /// Temporary replication slot + createTempReplicationSlot(ntx, start_lsn, snapshot_name); + /// Initial table synchronization from created snapshot + loadFromSnapshot(snapshot_name); + /// Do not need this replication slot anymore (snapshot loaded and start lsn determined + dropReplicationSlot(ntx, temp_replication_slot, true); + /// Non-temporary replication slot createReplicationSlot(ntx); + } - PostgreSQLReplicaConsumer consumer( + consumer = std::make_shared( context, table_name, connection->conn_str(), @@ -138,7 +152,50 @@ void PostgreSQLReplicationHandler::startReplication() LOG_DEBUG(log, "Commiting replication transaction"); ntx->commit(); - consumer.run(); + consumer->startSynchronization(); +} + + +void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) +{ + LOG_DEBUG(log, "Creating transaction snapshot"); + + try + { + auto stx = std::make_unique(*connection->conn()); + + /// Specific isolation level is required to read from snapshot. + stx->set_variable("transaction_isolation", "'repeatable read'"); + + std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); + stx->exec(query_str); + + /// Load from snapshot, which will show table state before creation of replication slot. + query_str = fmt::format("SELECT * FROM {}", table_name); + + Context insert_context(*context); + insert_context.makeQueryContext(); + + auto insert = std::make_shared(); + insert->table_id = helper_table->getStorageID(); + + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); + + const StorageInMemoryMetadata & storage_metadata = helper_table->getInMemoryMetadata(); + auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); + + PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); + + copyData(input, *block_io.out); + } + catch (Exception & e) + { + e.addMessage("while initial data sync for table {}.{}", database_name, table_name); + throw; + } + + LOG_DEBUG(log, "Done loading from snapshot"); } @@ -223,32 +280,4 @@ void PostgreSQLReplicationHandler::checkAndDropReplicationSlot() } -void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) -{ - auto stx = std::make_unique(*connection->conn()); - /// Required to execute the following command. - stx->set_variable("transaction_isolation", "'repeatable read'"); - - std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); - stx->exec(query_str); - - LOG_DEBUG(log, "Created transaction snapshot"); - query_str = fmt::format("SELECT * FROM {}", table_name); - pqxx::result result{stx->exec(query_str)}; - if (!result.empty()) - { - pqxx::row row{result[0]}; - for (auto res : row) - { - if (std::size(res)) - LOG_TRACE(log, "GOT {}", res.as()); - else - LOG_TRACE(log, "GOT NULL"); - } - } - LOG_DEBUG(log, "Done loading from snapshot"); - stx->commit(); -} - - } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 55ea8dbae41..af4465b863a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -2,6 +2,7 @@ #include #include "PostgreSQLConnection.h" +#include "PostgreSQLReplicaConsumer.h" #include #include "pqxx/pqxx" @@ -12,36 +13,19 @@ namespace DB { -struct LSNPosition -{ - std::string lsn; - - uint64_t getValue() - { - uint64_t upper_half, lower_half, result; - std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); - result = (upper_half << 32) + lower_half; - LOG_DEBUG(&Poco::Logger::get("LSNParsing"), - "Created replication slot. upper half: {}, lower_half: {}, start lsn: {}", - upper_half, lower_half, result); - return result; - } -}; - - class PostgreSQLReplicationHandler { public: friend class PGReplicaLSN; PostgreSQLReplicationHandler( - Context & context_, const std::string & database_name_, const std::string & table_name_, const std::string & conn_str_, const std::string & replication_slot_name_, const std::string & publication_name_); - void startup(); + void startup(StoragePtr storage_, std::shared_ptr context_); + void shutdown(); void checkAndDropReplicationSlot(); private: @@ -57,9 +41,10 @@ private: void startReplication(); void loadFromSnapshot(std::string & snapshot_name); + Context createQueryContext(); + void getTableOutput(const Context & query_context); Poco::Logger * log; - Context & context; const std::string database_name, table_name; std::string replication_slot, publication_name; @@ -69,6 +54,9 @@ private: PostgreSQLConnectionPtr replication_connection; std::shared_ptr tx; + std::shared_ptr consumer; + std::shared_ptr context; + StoragePtr helper_table; //LSNPosition start_lsn, final_lsn; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 657f8e5aa47..9ae90e24429 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -1,28 +1,36 @@ #include "StoragePostgreSQLReplica.h" -#include -#include - #include +#include +#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include + +#include +#include + #include #include -#include -#include "PostgreSQLReplicationSettings.h" -#include -#include "PostgreSQLReplicaBlockInputStream.h" +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "PostgreSQLReplicationSettings.h" +#include "PostgreSQLReplicaBlockInputStream.h" +#include + +#include +#include namespace DB { @@ -32,50 +40,177 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } +static auto nested_storage_suffix = "_ReplacingMergeTree"; + StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, const String & remote_table_name_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, + const String & relative_data_path_, + const StorageInMemoryMetadata & storage_metadata, const Context & context_, const PostgreSQLReplicationHandler & replication_handler_, std::unique_ptr replication_settings_) : IStorage(table_id_) , remote_table_name(remote_table_name_) - , global_context(context_) + , relative_data_path(relative_data_path_) + , global_context(std::make_shared(context_)) , replication_settings(std::move(replication_settings_)) , replication_handler(std::make_unique(replication_handler_)) { - StorageInMemoryMetadata storage_metadata; - storage_metadata.setColumns(columns_); - storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); + relative_data_path.resize(relative_data_path.size() - 1); + relative_data_path += nested_storage_suffix; +} + + +std::shared_ptr StoragePostgreSQLReplica::getMaterializedColumnsDeclaration( + const String name, const String type, UInt64 default_value) +{ + auto column_declaration = std::make_shared(); + + column_declaration->name = name; + column_declaration->type = makeASTFunction(type); + + column_declaration->default_specifier = "MATERIALIZED"; + column_declaration->default_expression = std::make_shared(default_value); + + column_declaration->children.emplace_back(column_declaration->type); + column_declaration->children.emplace_back(column_declaration->default_expression); + + return column_declaration; +} + + +ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_type) +{ + WhichDataType which(data_type); + + if (which.isNullable()) + return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + + if (which.isArray()) + return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + + return std::make_shared(data_type->getName()); +} + + +std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage() +{ + auto columns_declare_list = std::make_shared(); + + auto columns_expression_list = std::make_shared(); + auto metadata_snapshot = getInMemoryMetadataPtr(); + for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) + { + const auto & column_declaration = std::make_shared(); + column_declaration->name = column_type_and_name.name; + column_declaration->type = getColumnDeclaration(column_type_and_name.type); + columns_expression_list->children.emplace_back(column_declaration); + } + columns_declare_list->set(columns_declare_list->columns, columns_expression_list); + + columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_sign", "Int8", UInt64(1))); + columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_version", "UInt64", UInt64(1))); + + return columns_declare_list; +} + + +ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() +{ + auto create_table_query = std::make_shared(); + + auto table_id = getStorageID(); + create_table_query->table = table_id.table_name + nested_storage_suffix; + create_table_query->database = table_id.database_name; + create_table_query->if_not_exists = true; + + create_table_query->set(create_table_query->columns_list, getColumnsListFromStorage()); + + auto storage = std::make_shared(); + storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared("_version"))); + + auto primary_key_ast = getInMemoryMetadataPtr()->getPrimaryKeyAST(); + if (primary_key_ast) + storage->set(storage->order_by, primary_key_ast); + /// else + + //storage->set(storage->partition_by, ?); + + create_table_query->set(create_table_query->storage, storage); + + return create_table_query; } void StoragePostgreSQLReplica::startup() { - replication_handler->startup(); + Context context_copy(*global_context); + const auto ast_create = getCreateHelperTableQuery(); + + Poco::File path(relative_data_path); + if (!path.exists()) + { + LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), + "Creating helper table {}", getStorageID().table_name + nested_storage_suffix); + InterpreterCreateQuery interpreter(ast_create, context_copy); + interpreter.execute(); + } + + nested_storage = createTableFromAST(ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; + nested_storage->startup(); + + replication_handler->startup(nested_storage, global_context); +} + + +void StoragePostgreSQLReplica::drop() +{ + /// TODO: Under lock? Make sure synchronization stopped. + replication_handler->checkAndDropReplicationSlot(); + + nested_storage->drop(); + + relative_data_path.resize(relative_data_path.size() - 1); + Poco::File path(relative_data_path); + if (path.exists()) + path.remove(true); } void StoragePostgreSQLReplica::shutdown() { - //replication_handler->dropReplicationSlot(); + replication_handler->shutdown(); } Pipe StoragePostgreSQLReplica::read( const Names & column_names, - const StorageMetadataPtr & metadata_snapshot, - SelectQueryInfo & /* query_info */, - const Context & /* context */, - QueryProcessingStage::Enum /* processed_stage */, - size_t /* max_block_size */, - unsigned /* num_streams */) + const StorageMetadataPtr & /* metadata_snapshot */, + SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) { - auto sample_block = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID()); - return Pipe(); + StoragePtr storage = DatabaseCatalog::instance().getTable(nested_storage->getStorageID(), *global_context); + auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + + const StorageMetadataPtr & nested_metadata = storage->getInMemoryMetadataPtr(); + Pipe pipe = storage->read( + column_names, + nested_metadata, query_info, context, + processed_stage, max_block_size, num_streams); + + pipe.addTableLock(lock); + return pipe; +} + + +NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const +{ + return NamesAndTypesList{}; } @@ -98,6 +233,21 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) for (auto & engine_arg : engine_args) engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.local_context); + StorageInMemoryMetadata metadata; + metadata.setColumns(args.columns); + metadata.setConstraints(args.constraints); + + if (!args.storage_def->order_by && args.storage_def->primary_key) + args.storage_def->set(args.storage_def->order_by, args.storage_def->primary_key->clone()); + + if (!args.storage_def->order_by) + throw Exception("Storage PostgreSQLReplica needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); + + if (args.storage_def->primary_key) + metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.context); + else + metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->order_by->ptr(), metadata.columns, args.context); + auto parsed_host_port = parseAddress(engine_args[0]->as().value.safeGet(), 5432); const String & remote_table = engine_args[2]->as().value.safeGet(); const String & remote_database = engine_args[1]->as().value.safeGet(); @@ -113,25 +263,19 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) auto replication_slot_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_replication_slot_name.value); auto publication_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_publication_name.value); - PostgreSQLReplicationHandler replication_handler(global_context, remote_database, remote_table, connection_str, replication_slot_name, publication_name); + PostgreSQLReplicationHandler replication_handler(remote_database, remote_table, connection_str, replication_slot_name, publication_name); return StoragePostgreSQLReplica::create( - args.table_id, remote_table, args.columns, args.constraints, global_context, + args.table_id, remote_table, args.relative_data_path, metadata, global_context, replication_handler, std::move(postgresql_replication_settings)); }; factory.registerStorage( "PostgreSQLReplica", creator_fn, - StorageFactory::StorageFeatures{ .supports_settings = true, .source_access_type = AccessType::POSTGRES, + StorageFactory::StorageFeatures{ .supports_settings = true, .supports_sort_order = true, .source_access_type = AccessType::POSTGRES, }); } -NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const -{ - return NamesAndTypesList{ - }; -} - } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 860ab8f6d6c..18b0504d32c 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -2,6 +2,18 @@ #include "config_core.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + #include #include #include @@ -21,6 +33,7 @@ public: String getName() const override { return "PostgreSQLReplica"; } void startup() override; + void drop() override; void shutdown() override; NamesAndTypesList getVirtuals() const override; @@ -39,18 +52,28 @@ protected: StoragePostgreSQLReplica( const StorageID & table_id_, const String & remote_table_name_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, + const String & relative_data_path_, + const StorageInMemoryMetadata & storage_metadata, const Context & context_, const PostgreSQLReplicationHandler & replication_handler_, std::unique_ptr replication_settings_); private: + std::shared_ptr getMaterializedColumnsDeclaration( + const String name, const String type, UInt64 default_value); + std::shared_ptr getColumnsListFromStorage(); + ASTPtr getColumnDeclaration(const DataTypePtr & data_type); + ASTPtr getCreateHelperTableQuery(); + String remote_table_name; - Context global_context; + String relative_data_path; + std::shared_ptr global_context; std::unique_ptr replication_settings; std::unique_ptr replication_handler; + + /// ReplacingMergeTree table + StoragePtr nested_storage; }; } diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 78ec8c34e41..08affa518e7 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -83,8 +83,9 @@ Pipe StoragePostgreSQL::read( sample_block.insert({ column_data.type, column_data.name }); } + auto tx = std::make_unique(*connection->conn()); return Pipe(std::make_shared( - std::make_shared(connection->conn(), query, sample_block, max_block_size_))); + std::make_shared(std::move(tx), query, sample_block, max_block_size_))); } diff --git a/tests/integration/test_storage_postgresql_replica/__init__.py b/tests/integration/test_storage_postgresql_replica/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_storage_postgresql_replica/configs/log_conf.xml b/tests/integration/test_storage_postgresql_replica/configs/log_conf.xml new file mode 100644 index 00000000000..f9d15e572aa --- /dev/null +++ b/tests/integration/test_storage_postgresql_replica/configs/log_conf.xml @@ -0,0 +1,11 @@ + + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py new file mode 100644 index 00000000000..3d98486fb4b --- /dev/null +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -0,0 +1,87 @@ +import pytest +import time +import psycopg2 +import os.path as p + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True) + +postgres_table_template = """ + CREATE TABLE IF NOT EXISTS {} ( + key Integer NOT NULL, value Integer, PRIMARY KEY (key)) + """ + +def get_postgres_conn(database=False): + if database == True: + conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" + else: + conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn = psycopg2.connect(conn_string) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn.autocommit = True + return conn + +def create_postgres_db(cursor, name): + cursor.execute("CREATE DATABASE {}".format(name)) + +def create_postgres_table(cursor, table_name): + cursor.execute(postgres_table_template.format(table_name)) + +def postgresql_replica_check_result(result, check=False, ref_file='test_postgresql_replica.reference'): + fpath = p.join(p.dirname(__file__), ref_file) + with open(fpath) as reference: + if check: + assert TSV(result) == TSV(reference) + else: + return TSV(result) == TSV(reference) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + conn = get_postgres_conn() + cursor = conn.cursor() + create_postgres_db(cursor, 'postgres_database') + instance.query(''' + CREATE DATABASE postgres_database + ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') + + instance.query('CREATE DATABASE test') + yield cluster + + finally: + cluster.shutdown() + +@pytest.fixture(autouse=True) +def rabbitmq_setup_teardown(): + yield # run test + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') + + +def test_initial_load_from_snapshot(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + result = instance.query('SELECT * FROM test.postgresql_replica;') + postgresql_replica_check_result(result, True) + + +if __name__ == '__main__': + cluster.start() + input("Cluster created, press any key to destroy...") + cluster.shutdown() diff --git a/tests/integration/test_storage_postgresql_replica/test_postgresql_replica.reference b/tests/integration/test_storage_postgresql_replica/test_postgresql_replica.reference new file mode 100644 index 00000000000..959bb2aad74 --- /dev/null +++ b/tests/integration/test_storage_postgresql_replica/test_postgresql_replica.reference @@ -0,0 +1,50 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 From 179a558a048162d9ca40b091da3e4633f4269a65 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 30 Jan 2021 12:05:14 +0000 Subject: [PATCH 018/931] Setup connection in the background, better drop table --- src/Interpreters/InterpreterDropQuery.cpp | 8 + .../PostgreSQLReplicationHandler.cpp | 70 ++++++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 14 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 140 ++++++++++-------- .../PostgreSQL/StoragePostgreSQLReplica.h | 9 +- .../test_storage_postgresql_replica/test.py | 27 ++++ 6 files changed, 177 insertions(+), 91 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index be4908582a5..68c9c2b41cf 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -21,6 +21,9 @@ # include #endif +#if USE_LIBPQXX +# include +#endif namespace DB { @@ -162,6 +165,11 @@ BlockIO InterpreterDropQuery::executeToTableImpl(const ASTDropQuery & query, Dat table->shutdown(); +#if USE_LIBPQXX + if (table->getName() == "PostgreSQLReplica") + table->as()->shutdownFinal(); +#endif + TableExclusiveLockHolder table_lock; if (database->getUUID() == UUIDHelpers::Nil) table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index f6571ce5a2e..5a68ef65925 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -20,17 +20,21 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +static const auto reschedule_ms = 500; + PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & table_name_, const std::string & conn_str, - const std::string & replication_slot_, - const std::string & publication_name_) + std::shared_ptr context_, + const std::string & publication_name_, + const std::string & replication_slot_name_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) + , context(context_) , database_name(database_name_) , table_name(table_name_) - , replication_slot(replication_slot_) , publication_name(publication_name_) + , replication_slot(replication_slot_name_) , connection(std::make_shared(conn_str)) { /// Create a replication connection, through which it is possible to execute only commands from streaming replication protocol @@ -38,39 +42,43 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( /// which will allow the connection to be used for logical replication from that database. replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); - /// Used commands require a specific transaction isolation mode. - replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - /// Non temporary replication slot. Should be the same at restart. if (replication_slot.empty()) replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); /// Temporary replication slot is used to acquire a snapshot for initial table synchronization and to determine starting lsn position. temp_replication_slot = replication_slot + "_temp"; + + startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); + startup_task->deactivate(); } -void PostgreSQLReplicationHandler::startup(StoragePtr storage, std::shared_ptr context_) +void PostgreSQLReplicationHandler::startup(StoragePtr storage) { helper_table = storage; - context = context_; - tx = std::make_shared(*connection->conn()); - if (publication_name.empty()) - { - publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); + startup_task->activateAndSchedule(); +} - /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL - /// table is dropped. - if (!isPublicationExist()) - createPublication(); - } - else if (!isPublicationExist()) + +void PostgreSQLReplicationHandler::waitConnectionAndStart() +{ + try { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Publication name '{}' is spesified in table arguments, but it does not exist", publication_name); + /// Used commands require a specific transaction isolation mode. + replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); + } + catch (pqxx::broken_connection const & pqxx_error) + { + LOG_ERROR(log, "Unable to set up connection for table {}.{}. Reconnection attempt continues. Error message: {}", + database_name, table_name, pqxx_error.what()); + startup_task->scheduleAfter(reschedule_ms); + } + catch (Exception & e) + { + e.addMessage("while setting up connection for {}.{}", database_name, table_name); + throw; } - tx->commit(); startReplication(); } @@ -119,6 +127,24 @@ void PostgreSQLReplicationHandler::createPublication() void PostgreSQLReplicationHandler::startReplication() { + tx = std::make_shared(*connection->conn()); + if (publication_name.empty()) + { + publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); + + /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL + /// table is dropped. + if (!isPublicationExist()) + createPublication(); + } + else if (!isPublicationExist()) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Publication name '{}' is spesified in table arguments, but it does not exist", publication_name); + } + tx->commit(); + auto ntx = std::make_shared(*replication_connection->conn()); /// Normally temporary replication slot should not exist. diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index af4465b863a..2e85bae5cb9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -3,6 +3,7 @@ #include #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaConsumer.h" +#include #include #include "pqxx/pqxx" @@ -21,16 +22,18 @@ public: const std::string & database_name_, const std::string & table_name_, const std::string & conn_str_, - const std::string & replication_slot_name_, - const std::string & publication_name_); + std::shared_ptr context_, + const std::string & publication_slot_name_, + const std::string & replication_slot_name_); - void startup(StoragePtr storage_, std::shared_ptr context_); + void startup(StoragePtr storage_); void shutdown(); void checkAndDropReplicationSlot(); private: using NontransactionPtr = std::shared_ptr; + void waitConnectionAndStart(); bool isPublicationExist(); void createPublication(); @@ -45,17 +48,18 @@ private: void getTableOutput(const Context & query_context); Poco::Logger * log; + std::shared_ptr context; const std::string database_name, table_name; - std::string replication_slot, publication_name; + std::string publication_name, replication_slot; std::string temp_replication_slot; PostgreSQLConnectionPtr connection; PostgreSQLConnectionPtr replication_connection; std::shared_ptr tx; + BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; - std::shared_ptr context; StoragePtr helper_table; //LSNPosition start_lsn, final_lsn; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 9ae90e24429..bca77f314cd 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -44,22 +45,30 @@ static auto nested_storage_suffix = "_ReplacingMergeTree"; StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, - const String & remote_table_name_, + const String & remote_database_name, + const String & remote_table_name, + const String & connection_str, const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - const PostgreSQLReplicationHandler & replication_handler_, std::unique_ptr replication_settings_) : IStorage(table_id_) - , remote_table_name(remote_table_name_) , relative_data_path(relative_data_path_) - , global_context(std::make_shared(context_)) + , global_context(std::make_shared(context_.getGlobalContext())) , replication_settings(std::move(replication_settings_)) - , replication_handler(std::make_unique(replication_handler_)) { setInMemoryMetadata(storage_metadata); relative_data_path.resize(relative_data_path.size() - 1); relative_data_path += nested_storage_suffix; + + replication_handler = std::make_unique( + remote_database_name, + remote_table_name, + connection_str, + global_context, + global_context->getMacros()->expand(replication_settings->postgresql_replication_slot_name.value), + global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value) + ); } @@ -144,47 +153,6 @@ ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() } -void StoragePostgreSQLReplica::startup() -{ - Context context_copy(*global_context); - const auto ast_create = getCreateHelperTableQuery(); - - Poco::File path(relative_data_path); - if (!path.exists()) - { - LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), - "Creating helper table {}", getStorageID().table_name + nested_storage_suffix); - InterpreterCreateQuery interpreter(ast_create, context_copy); - interpreter.execute(); - } - - nested_storage = createTableFromAST(ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; - nested_storage->startup(); - - replication_handler->startup(nested_storage, global_context); -} - - -void StoragePostgreSQLReplica::drop() -{ - /// TODO: Under lock? Make sure synchronization stopped. - replication_handler->checkAndDropReplicationSlot(); - - nested_storage->drop(); - - relative_data_path.resize(relative_data_path.size() - 1); - Poco::File path(relative_data_path); - if (path.exists()) - path.remove(true); -} - - -void StoragePostgreSQLReplica::shutdown() -{ - replication_handler->shutdown(); -} - - Pipe StoragePostgreSQLReplica::read( const Names & column_names, const StorageMetadataPtr & /* metadata_snapshot */, @@ -208,6 +176,62 @@ Pipe StoragePostgreSQLReplica::read( } +void StoragePostgreSQLReplica::startup() +{ + Context context_copy(*global_context); + const auto ast_create = getCreateHelperTableQuery(); + + Poco::File path(relative_data_path); + if (!path.exists()) + { + LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), + "Creating helper table {}", getStorageID().table_name + nested_storage_suffix); + InterpreterCreateQuery interpreter(ast_create, context_copy); + interpreter.execute(); + } + else + LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), + "Directory already exists {}", relative_data_path); + + nested_storage = createTableFromAST(ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; + nested_storage->startup(); + + replication_handler->startup(nested_storage); +} + + +void StoragePostgreSQLReplica::shutdown() +{ + replication_handler->shutdown(); +} + + +void StoragePostgreSQLReplica::shutdownFinal() +{ + /// TODO: Under lock? Make sure synchronization stopped. + replication_handler->checkAndDropReplicationSlot(); + dropNested(); +} + + +void StoragePostgreSQLReplica::dropNested() +{ + auto table_id = nested_storage->getStorageID(); + auto ast_drop = std::make_shared(); + + ast_drop->kind = ASTDropQuery::Drop; + ast_drop->table = table_id.table_name; + ast_drop->database = table_id.database_name; + ast_drop->if_exists = true; + + auto drop_context(*global_context); + drop_context.makeQueryContext(); + + auto interpreter = InterpreterDropQuery(ast_drop, drop_context); + interpreter.execute(); +} + + NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const { return NamesAndTypesList{}; @@ -252,22 +276,18 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) const String & remote_table = engine_args[2]->as().value.safeGet(); const String & remote_database = engine_args[1]->as().value.safeGet(); - String connection_str; - connection_str = fmt::format("dbname={} host={} port={} user={} password={}", - remote_database, - parsed_host_port.first, std::to_string(parsed_host_port.second), - engine_args[3]->as().value.safeGet(), - engine_args[4]->as().value.safeGet()); - - auto global_context(args.context.getGlobalContext()); - auto replication_slot_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_replication_slot_name.value); - auto publication_name = global_context.getMacros()->expand(postgresql_replication_settings->postgresql_publication_name.value); - - PostgreSQLReplicationHandler replication_handler(remote_database, remote_table, connection_str, replication_slot_name, publication_name); + /// No connection is made here, see Storages/PostgreSQL/PostgreSQLConnection.cpp + PostgreSQLConnection connection( + remote_database, + parsed_host_port.first, + parsed_host_port.second, + engine_args[3]->as().value.safeGet(), + engine_args[4]->as().value.safeGet()); return StoragePostgreSQLReplica::create( - args.table_id, remote_table, args.relative_data_path, metadata, global_context, - replication_handler, std::move(postgresql_replication_settings)); + args.table_id, remote_database, remote_table, connection.conn_str(), + args.relative_data_path, metadata, args.context, + std::move(postgresql_replication_settings)); }; factory.registerStorage( diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 18b0504d32c..c02c9696d87 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -33,7 +33,6 @@ public: String getName() const override { return "PostgreSQLReplica"; } void startup() override; - void drop() override; void shutdown() override; NamesAndTypesList getVirtuals() const override; @@ -47,15 +46,17 @@ public: size_t max_block_size, unsigned num_streams) override; + void shutdownFinal(); protected: StoragePostgreSQLReplica( const StorageID & table_id_, - const String & remote_table_name_, + const String & remote_database_name, + const String & remote_table_name, + const String & connection_str, const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - const PostgreSQLReplicationHandler & replication_handler_, std::unique_ptr replication_settings_); private: @@ -64,8 +65,8 @@ private: std::shared_ptr getColumnsListFromStorage(); ASTPtr getColumnDeclaration(const DataTypePtr & data_type); ASTPtr getCreateHelperTableQuery(); + void dropNested(); - String remote_table_name; String relative_data_path; std::shared_ptr global_context; diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 3d98486fb4b..ccf0b2eee13 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -78,9 +78,36 @@ def test_initial_load_from_snapshot(started_cluster): ''') result = instance.query('SELECT * FROM test.postgresql_replica;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) +def test_no_connection_at_startup(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + started_cluster.pause_container('postgres1') + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + time.sleep(3) + started_cluster.unpause_container('postgres1') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) == 0: + result = instance.query('SELECT count() FROM test.postgresql_replica;') + time.sleep(1); + print(result) + + result = instance.query('SELECT * FROM test.postgresql_replica;') + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 4aadd0c3f218465589c9a36a285bccc88a9b8889 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 31 Jan 2021 19:03:03 +0000 Subject: [PATCH 019/931] Replicate insert queries --- .../PostgreSQLBlockInputStream.cpp | 195 +--------------- src/DataStreams/PostgreSQLBlockInputStream.h | 14 +- .../PostgreSQLReplicaBlockInputStream.cpp | 149 ------------- .../PostgreSQLReplicaBlockInputStream.h | 47 ---- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 148 +++++++++++-- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 40 +++- .../PostgreSQLReplicaConsumerBuffer.cpp | 38 ---- .../PostgreSQLReplicaConsumerBuffer.h | 39 ---- .../PostgreSQLReplicationHandler.cpp | 27 ++- .../PostgreSQL/PostgreSQLReplicationHandler.h | 9 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 20 +- .../PostgreSQL/StoragePostgreSQLReplica.h | 2 +- src/Storages/PostgreSQL/buffer_fwd.h | 9 - .../PostgreSQL/insertPostgreSQLValue.cpp | 208 ++++++++++++++++++ .../PostgreSQL/insertPostgreSQLValue.h | 29 +++ .../test_storage_postgresql_replica/test.py | 66 +++++- 16 files changed, 513 insertions(+), 527 deletions(-) delete mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp delete mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h delete mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp delete mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h delete mode 100644 src/Storages/PostgreSQL/buffer_fwd.h create mode 100644 src/Storages/PostgreSQL/insertPostgreSQLValue.cpp create mode 100644 src/Storages/PostgreSQL/insertPostgreSQLValue.h diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index 4646a8a9d32..a52ca1e58a4 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -22,11 +22,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( std::unique_ptr tx_, const std::string & query_str_, @@ -38,8 +33,8 @@ PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( { description.init(sample_block); for (const auto idx : ext::range(0, description.sample_block.columns())) - if (description.types[idx].first == ValueType::vtArray) - prepareArrayInfo(idx, description.sample_block.getByPosition(idx).type); + if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) + preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); /// pqxx::stream_from uses COPY command, will get error if ';' is present if (query_str.ends_with(';')) query_str.resize(query_str.size() - 1); @@ -80,12 +75,17 @@ Block PostgreSQLBlockInputStream::readImpl() { ColumnNullable & column_nullable = assert_cast(*columns[idx]); const auto & data_type = assert_cast(*sample.type); - insertValue(column_nullable.getNestedColumn(), (*row)[idx], description.types[idx].first, data_type.getNestedType(), idx); + + insertPostgreSQLValue( + column_nullable.getNestedColumn(), (*row)[idx], + description.types[idx].first, data_type.getNestedType(), array_info, idx); + column_nullable.getNullMapData().emplace_back(0); } else { - insertValue(*columns[idx], (*row)[idx], description.types[idx].first, sample.type, idx); + insertPostgreSQLValue( + *columns[idx], (*row)[idx], description.types[idx].first, sample.type, array_info, idx); } } else @@ -113,183 +113,6 @@ void PostgreSQLBlockInputStream::readSuffix() } -void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view value, - const ExternalResultDescription::ValueType type, const DataTypePtr data_type, size_t idx) -{ - switch (type) - { - case ValueType::vtUInt8: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtUInt16: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtUInt32: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtUInt64: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtInt8: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtInt16: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtInt32: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtInt64: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtFloat32: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtFloat64: - assert_cast(column).insertValue(pqxx::from_string(value)); - break; - case ValueType::vtFixedString:[[fallthrough]]; - case ValueType::vtString: - assert_cast(column).insertData(value.data(), value.size()); - break; - case ValueType::vtUUID: - assert_cast(column).insert(parse(value.data(), value.size())); - break; - case ValueType::vtDate: - assert_cast(column).insertValue(UInt16{LocalDate{std::string(value)}.getDayNum()}); - break; - case ValueType::vtDateTime: - assert_cast(column).insertValue(time_t{LocalDateTime{std::string(value)}}); - break; - case ValueType::vtDateTime64:[[fallthrough]]; - case ValueType::vtDecimal32: [[fallthrough]]; - case ValueType::vtDecimal64: [[fallthrough]]; - case ValueType::vtDecimal128: [[fallthrough]]; - case ValueType::vtDecimal256: - { - ReadBufferFromString istr(value); - data_type->deserializeAsWholeText(column, istr, FormatSettings{}); - break; - } - case ValueType::vtArray: - { - pqxx::array_parser parser{value}; - std::pair parsed = parser.get_next(); - - size_t dimension = 0, max_dimension = 0, expected_dimensions = array_info[idx].num_dimensions; - const auto parse_value = array_info[idx].pqxx_parser; - std::vector> dimensions(expected_dimensions + 1); - - while (parsed.first != pqxx::array_parser::juncture::done) - { - if ((parsed.first == pqxx::array_parser::juncture::row_start) && (++dimension > expected_dimensions)) - throw Exception("Got more dimensions than expected", ErrorCodes::BAD_ARGUMENTS); - - else if (parsed.first == pqxx::array_parser::juncture::string_value) - dimensions[dimension].emplace_back(parse_value(parsed.second)); - - else if (parsed.first == pqxx::array_parser::juncture::null_value) - dimensions[dimension].emplace_back(array_info[idx].default_value); - - else if (parsed.first == pqxx::array_parser::juncture::row_end) - { - max_dimension = std::max(max_dimension, dimension); - - if (--dimension == 0) - break; - - dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end())); - dimensions[dimension + 1].clear(); - } - - parsed = parser.get_next(); - } - - if (max_dimension < expected_dimensions) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Got less dimensions than expected. ({} instead of {})", max_dimension, expected_dimensions); - - assert_cast(column).insert(Array(dimensions[1].begin(), dimensions[1].end())); - break; - } - } -} - - -void PostgreSQLBlockInputStream::prepareArrayInfo(size_t column_idx, const DataTypePtr data_type) -{ - const auto * array_type = typeid_cast(data_type.get()); - auto nested = array_type->getNestedType(); - - size_t count_dimensions = 1; - while (isArray(nested)) - { - ++count_dimensions; - nested = typeid_cast(nested.get())->getNestedType(); - } - - Field default_value = nested->getDefault(); - if (nested->isNullable()) - nested = static_cast(nested.get())->getNestedType(); - - WhichDataType which(nested); - std::function parser; - - if (which.isUInt8() || which.isUInt16()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isInt8() || which.isInt16()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isUInt32()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isInt32()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isUInt64()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isInt64()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isFloat32()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isFloat64()) - parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; - else if (which.isString() || which.isFixedString()) - parser = [](std::string & field) -> Field { return field; }; - else if (which.isDate()) - parser = [](std::string & field) -> Field { return UInt16{LocalDate{field}.getDayNum()}; }; - else if (which.isDateTime()) - parser = [](std::string & field) -> Field { return time_t{LocalDateTime{field}}; }; - else if (which.isDecimal32()) - parser = [nested](std::string & field) -> Field - { - const auto & type = typeid_cast *>(nested.get()); - DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); - return convertFieldToType(field, res); - }; - else if (which.isDecimal64()) - parser = [nested](std::string & field) -> Field - { - const auto & type = typeid_cast *>(nested.get()); - DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); - return convertFieldToType(field, res); - }; - else if (which.isDecimal128()) - parser = [nested](std::string & field) -> Field - { - const auto & type = typeid_cast *>(nested.get()); - DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); - return convertFieldToType(field, res); - }; - else if (which.isDecimal256()) - parser = [nested](std::string & field) -> Field - { - const auto & type = typeid_cast *>(nested.get()); - DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); - return convertFieldToType(field, res); - }; - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName()); - - array_info[column_idx] = {count_dimensions, default_value, parser}; -} } diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index 1e52b48c7cf..c18ccd0f55e 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB @@ -29,19 +30,14 @@ public: Block getHeader() const override { return description.sample_block.cloneEmpty(); } private: - using ValueType = ExternalResultDescription::ValueType; - void readPrefix() override; Block readImpl() override; void readSuffix() override; - void insertValue(IColumn & column, std::string_view value, - const ExternalResultDescription::ValueType type, const DataTypePtr data_type, size_t idx); void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } - void prepareArrayInfo(size_t column_idx, const DataTypePtr data_type); String query_str; const UInt64 max_block_size; @@ -51,13 +47,7 @@ private: std::unique_ptr tx; std::unique_ptr stream; - struct ArrayInfo - { - size_t num_dimensions; - Field default_value; - std::function pqxx_parser; - }; - std::unordered_map array_info; + std::unordered_map array_info; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp deleted file mode 100644 index 04ee68eb3aa..00000000000 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include "PostgreSQLReplicaBlockInputStream.h" - -#include -#include -#include - - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -namespace DB -{ - -PostgreSQLReplicaBlockInputStream::PostgreSQLReplicaBlockInputStream( - StoragePostgreSQLReplica & storage_, - ConsumerBufferPtr buffer_, - const StorageMetadataPtr & metadata_snapshot_, - std::shared_ptr context_, - const Names & columns, - size_t max_block_size_) - : storage(storage_) - , buffer(buffer_) - , metadata_snapshot(metadata_snapshot_) - , context(context_) - , column_names(columns) - , max_block_size(max_block_size_) - , non_virtual_header(metadata_snapshot->getSampleBlockNonMaterialized()) - , sample_block(non_virtual_header) - , virtual_header(metadata_snapshot->getSampleBlockForColumns({}, storage.getVirtuals(), storage.getStorageID())) -{ - for (const auto & column : virtual_header) - sample_block.insert(column); -} - - -PostgreSQLReplicaBlockInputStream::~PostgreSQLReplicaBlockInputStream() -{ -} - - -void PostgreSQLReplicaBlockInputStream::readPrefixImpl() -{ -} - - -Block PostgreSQLReplicaBlockInputStream::readImpl() -{ - if (!buffer || finished) - return Block(); - - finished = true; - - MutableColumns result_columns = non_virtual_header.cloneEmptyColumns(); - MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); - - auto input_format = FormatFactory::instance().getInputFormat( - "Values", *buffer, non_virtual_header, *context, max_block_size); - - InputPort port(input_format->getPort().getHeader(), input_format.get()); - connect(input_format->getPort(), port); - port.setNeeded(); - - auto read_rabbitmq_message = [&] - { - size_t new_rows = 0; - - while (true) - { - auto status = input_format->prepare(); - - switch (status) - { - case IProcessor::Status::Ready: - input_format->work(); - break; - - case IProcessor::Status::Finished: - input_format->resetParser(); - return new_rows; - - case IProcessor::Status::PortFull: - { - auto chunk = port.pull(); - - auto chunk_rows = chunk.getNumRows(); - new_rows += chunk_rows; - - auto columns = chunk.detachColumns(); - - for (size_t i = 0, s = columns.size(); i < s; ++i) - { - result_columns[i]->insertRangeFrom(*columns[i], 0, columns[i]->size()); - } - break; - } - case IProcessor::Status::NeedData: - case IProcessor::Status::Async: - case IProcessor::Status::ExpandPipeline: - throw Exception("Source processor returned status " + IProcessor::statusToName(status), ErrorCodes::LOGICAL_ERROR); - } - } - }; - - size_t total_rows = 0; - - while (true) - { - if (buffer->eof()) - break; - - auto new_rows = read_rabbitmq_message(); - - if (new_rows) - { - //auto timestamp = buffer->getTimestamp(); - //for (size_t i = 0; i < new_rows; ++i) - //{ - // virtual_columns[0]->insert(timestamp); - //} - - total_rows = total_rows + new_rows; - } - - buffer->allowNext(); - - if (total_rows >= max_block_size || !checkTimeLimit()) - break; - } - - if (total_rows == 0) - return Block(); - - auto result_block = non_virtual_header.cloneWithColumns(std::move(result_columns)); - auto virtual_block = virtual_header.cloneWithColumns(std::move(virtual_columns)); - - for (const auto & column : virtual_block.getColumnsWithTypeAndName()) - result_block.insert(column); - - return result_block; -} - - -void PostgreSQLReplicaBlockInputStream::readSuffixImpl() -{ -} - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h b/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h deleted file mode 100644 index 995c640682a..00000000000 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaBlockInputStream.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include -#include "StoragePostgreSQLReplica.h" -#include "PostgreSQLReplicaConsumerBuffer.h" -#include "buffer_fwd.h" - - -namespace DB -{ - -class PostgreSQLReplicaBlockInputStream : public IBlockInputStream -{ - -public: - PostgreSQLReplicaBlockInputStream( - StoragePostgreSQLReplica & storage_, - ConsumerBufferPtr buffer_, - const StorageMetadataPtr & metadata_snapshot_, - std::shared_ptr context_, - const Names & columns, - size_t max_block_size_); - - ~PostgreSQLReplicaBlockInputStream() override; - - String getName() const override { return storage.getName(); } - Block getHeader() const override { return sample_block; } - - void readPrefixImpl() override; - Block readImpl() override; - void readSuffixImpl() override; - -private: - StoragePostgreSQLReplica & storage; - ConsumerBufferPtr buffer; - StorageMetadataPtr metadata_snapshot; - std::shared_ptr context; - Names column_names; - const size_t max_block_size; - - bool finished = false; - const Block non_virtual_header; - Block sample_block; - const Block virtual_header; -}; - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 076863eb8dd..c38b898fdc1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -2,13 +2,30 @@ #include #include + #include + #include #include #include + #include #include + #include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include namespace DB { @@ -17,9 +34,9 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -static const auto wal_reader_reschedule_ms = 500; +static const auto reschedule_ms = 500; static const auto max_thread_work_duration_ms = 60000; -static const auto max_empty_slot_reads = 20; +static const auto max_empty_slot_reads = 2; PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( std::shared_ptr context_, @@ -27,7 +44,9 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( const std::string & conn_str, const std::string & replication_slot_name_, const std::string & publication_name_, - const LSNPosition & start_lsn) + const LSNPosition & start_lsn, + const size_t max_block_size_, + StoragePtr nested_storage_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) , context(context_) , replication_slot_name(replication_slot_name_) @@ -35,40 +54,49 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( , table_name(table_name_) , connection(std::make_shared(conn_str)) , current_lsn(start_lsn) + , max_block_size(max_block_size_) + , nested_storage(nested_storage_) + , sample_block(nested_storage->getInMemoryMetadata().getSampleBlock()) { replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); - wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ WALReaderFunc(); }); + description.init(sample_block); + for (const auto idx : ext::range(0, description.sample_block.columns())) + if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) + preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); + + columns = description.sample_block.cloneEmptyColumns(); + + wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); wal_reader_task->deactivate(); } void PostgreSQLReplicaConsumer::startSynchronization() { - //wal_reader_task->activateAndSchedule(); + wal_reader_task->activateAndSchedule(); } void PostgreSQLReplicaConsumer::stopSynchronization() { stop_synchronization.store(true); - if (wal_reader_task) - wal_reader_task->deactivate(); + wal_reader_task->deactivate(); } -void PostgreSQLReplicaConsumer::WALReaderFunc() +void PostgreSQLReplicaConsumer::replicationStream() { size_t count_empty_slot_reads = 0; auto start_time = std::chrono::steady_clock::now(); - LOG_TRACE(log, "Starting synchronization thread"); + LOG_TRACE(log, "Starting replication stream"); while (!stop_synchronization) { if (!readFromReplicationSlot() && ++count_empty_slot_reads == max_empty_slot_reads) { - LOG_TRACE(log, "Reschedule synchronization. Replication slot is empty."); + LOG_TRACE(log, "Reschedule replication stream. Replication slot is empty."); break; } else @@ -78,13 +106,38 @@ void PostgreSQLReplicaConsumer::WALReaderFunc() auto duration = std::chrono::duration_cast(end_time - start_time); if (duration.count() > max_thread_work_duration_ms) { - LOG_TRACE(log, "Reschedule synchronization. Thread work duration limit exceeded."); + LOG_TRACE(log, "Reschedule replication_stream. Thread work duration limit exceeded."); break; } } if (!stop_synchronization) - wal_reader_task->scheduleAfter(wal_reader_reschedule_ms); + wal_reader_task->scheduleAfter(reschedule_ms); +} + + +void PostgreSQLReplicaConsumer::insertValue(std::string & value, size_t column_idx) +{ + const auto & sample = description.sample_block.getByPosition(column_idx); + bool is_nullable = description.types[column_idx].second; + + LOG_TRACE(log, "INSERTING VALUE {}", value); + if (is_nullable) + { + ColumnNullable & column_nullable = assert_cast(*columns[column_idx]); + const auto & data_type = assert_cast(*sample.type); + + insertPostgreSQLValue( + column_nullable.getNestedColumn(), value, + description.types[column_idx].first, data_type.getNestedType(), array_info, column_idx); + + column_nullable.getNullMapData().emplace_back(0); + } + else + { + insertPostgreSQLValue( + *columns[column_idx], value, description.types[column_idx].first, sample.type, array_info, column_idx); + } } @@ -150,17 +203,24 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos Int16 num_columns = readInt16(message, pos); /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data LOG_DEBUG(log, "num_columns {}", num_columns); - for (int k = 0; k < num_columns; ++k) + for (int column_idx = 0; column_idx < num_columns; ++column_idx) { char identifier = readInt8(message, pos); Int32 col_len = readInt32(message, pos); - String result; + String value; for (int i = 0; i < col_len; ++i) { - result += readInt8(message, pos); + value += readInt8(message, pos); } - LOG_DEBUG(log, "identifier {}, col_len {}, result {}", identifier, col_len, result); + + insertValue(value, column_idx); + + LOG_DEBUG(log, "identifier {}, col_len {}, value {}", identifier, col_len, value); } + + String val = "1"; + insertValue(val, num_columns); + insertValue(val, num_columns + 1); //readString(message, pos, size, result); } @@ -171,6 +231,7 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio size_t pos = 2; char type = readInt8(replication_message, pos); + LOG_TRACE(log, "TYPE: {}", type); switch (type) { @@ -180,6 +241,7 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio Int64 transaction_commit_timestamp = readInt64(replication_message, pos); LOG_DEBUG(log, "transaction lsn {}, transaction commit timespamp {}", transaction_end_lsn, transaction_commit_timestamp); + //current_lsn.lsn_value = transaction_end_lsn; break; } case 'C': // Commit @@ -191,6 +253,7 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio Int64 transaction_commit_timestamp = readInt64(replication_message, pos); LOG_DEBUG(log, "commit lsn {}, transaction lsn {}, transaction commit timestamp {}", commit_lsn, transaction_end_lsn, transaction_commit_timestamp); + final_lsn.lsn = current_lsn.lsn; break; } case 'O': // Origin @@ -245,16 +308,49 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio } +void PostgreSQLReplicaConsumer::syncIntoTable(Block & block) +{ + Context insert_context(*context); + insert_context.makeQueryContext(); + + auto insert = std::make_shared(); + insert->table_id = nested_storage->getStorageID(); + + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); + OneBlockInputStream input(block); + + copyData(input, *block_io.out); + LOG_TRACE(log, "TABLE SYNC END"); +} + + +void PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr ntx) +{ + LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn.lsn); + std::string query_str = fmt::format("SELECT pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn.lsn); + pqxx::result result{ntx->exec(query_str)}; + if (!result.empty()) + { + std::string s1 = result[0].size() > 0 && !result[0][0].is_null() ? result[0][0].as() : "NULL"; + std::string s2 = result[0].size() > 1 && !result[0][1].is_null() ? result[0][1].as() : "NULL"; + LOG_TRACE(log, "ADVANCE LSN: {} and {}", s1, s2); + + } +} + + /// Read binary changes from replication slot via copy command. bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { + columns = description.sample_block.cloneEmptyColumns(); bool slot_empty = true; try { - auto tx = std::make_unique(*replication_connection->conn()); + auto tx = std::make_shared(*replication_connection->conn()); /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. std::string query_str = fmt::format( - "select data FROM pg_logical_slot_peek_binary_changes(" + "select lsn, data FROM pg_logical_slot_peek_binary_changes(" "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", replication_slot_name, publication_name); pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); @@ -267,17 +363,23 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { LOG_TRACE(log, "STREAM REPLICATION END"); stream.complete(); + + Block result_rows = description.sample_block.cloneWithColumns(std::move(columns)); + if (result_rows.rows()) + { + syncIntoTable(result_rows); + advanceLSN(tx); + } + tx->commit(); break; } slot_empty = false; - for (const auto idx : ext::range(0, row->size())) - { - LOG_TRACE(log, "Replication message: {}", (*row)[idx]); - decodeReplicationMessage((*row)[idx].c_str(), (*row)[idx].size()); - } + current_lsn.lsn = (*row)[0]; + LOG_TRACE(log, "Replication message: {}", (*row)[1]); + decodeReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } } catch (...) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index bb4b4c5033b..ca357236180 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -3,7 +3,10 @@ #include "PostgreSQLConnection.h" #include #include +#include +#include #include "pqxx/pqxx" +#include namespace DB { @@ -11,8 +14,9 @@ namespace DB struct LSNPosition { std::string lsn; + int64_t lsn_value; - uint64_t getValue() + int64_t getValue() { uint64_t upper_half, lower_half, result; std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); @@ -22,6 +26,15 @@ struct LSNPosition // upper_half, lower_half, result); return result; } + + std::string getString() + { + char result[16]; + std::snprintf(result, sizeof(result), "%lX/%lX", (lsn_value >> 32), lsn_value & 0xFFFFFFFF); + //assert(lsn_value == result.getValue()); + std::string ans = result; + return ans; + } }; @@ -34,20 +47,28 @@ public: const std::string & conn_str_, const std::string & replication_slot_name_, const std::string & publication_name_, - const LSNPosition & start_lsn); + const LSNPosition & start_lsn, + const size_t max_block_size_, + StoragePtr nested_storage_); /// Start reading WAL from current_lsn position. Initial data sync from created snapshot already done. void startSynchronization(); void stopSynchronization(); private: - /// Executed by wal_reader_task. A separate thread reads wal and advances lsn when rows were written via copyData. - void WALReaderFunc(); + /// Executed by wal_reader_task. A separate thread reads wal and advances lsn to last commited position + /// after rows were written via copyData. + void replicationStream(); + void stopReplicationStream(); /// Start changes stream from WAL via copy command (up to max_block_size changes). bool readFromReplicationSlot(); void decodeReplicationMessage(const char * replication_message, size_t size); + void insertValue(std::string & value, size_t column_idx); + void syncIntoTable(Block & block); + void advanceLSN(std::shared_ptr ntx); + /// Methods to parse replication message data. void readTupleData(const char * message, size_t & pos, size_t size); void readString(const char * message, size_t & pos, size_t size, String & result); @@ -64,9 +85,18 @@ private: const std::string table_name; PostgreSQLConnectionPtr connection, replication_connection; - LSNPosition current_lsn; + LSNPosition current_lsn, final_lsn; BackgroundSchedulePool::TaskHolder wal_reader_task; + //BackgroundSchedulePool::TaskHolder table_sync_task; std::atomic stop_synchronization = false; + + const size_t max_block_size; + StoragePtr nested_storage; + Block sample_block; + ExternalResultDescription description; + MutableColumns columns; + /// Needed for insertPostgreSQLValue() method to parse array + std::unordered_map array_info; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp deleted file mode 100644 index e8c4ba3d55a..00000000000 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include "PostgreSQLReplicaConsumerBuffer.h" - - -namespace DB -{ - -PostgreSQLReplicaConsumerBuffer::PostgreSQLReplicaConsumerBuffer( - uint64_t max_block_size_) - : ReadBuffer(nullptr, 0) - , rows_data(max_block_size_) -{ -} - - -PostgreSQLReplicaConsumerBuffer::~PostgreSQLReplicaConsumerBuffer() -{ - BufferBase::set(nullptr, 0, 0); -} - - -bool PostgreSQLReplicaConsumerBuffer::nextImpl() -{ - if (!allowed) - return false; - - if (rows_data.tryPop(current_row_data)) - { - auto * new_position = const_cast(current_row_data.data.data()); - BufferBase::set(new_position, current_row_data.data.size(), 0); - allowed = false; - - return true; - } - - return false; -} - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h deleted file mode 100644 index 8c8de3a8b68..00000000000 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumerBuffer.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include "buffer_fwd.h" - - -namespace DB -{ - -class PostgreSQLReplicaConsumerBuffer : public ReadBuffer -{ - -public: - PostgreSQLReplicaConsumerBuffer( - uint64_t max_block_size_); - - ~PostgreSQLReplicaConsumerBuffer() override; - - void allowNext() { allowed = true; } - -private: - bool nextImpl() override; - - struct RowData - { - String data; - RowData() : data("") {} - }; - - RowData current_row_data; - ConcurrentBoundedQueue rows_data; - bool allowed = true; -}; - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 5a68ef65925..7b75c42c7a8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -28,13 +28,15 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & conn_str, std::shared_ptr context_, const std::string & publication_name_, - const std::string & replication_slot_name_) + const std::string & replication_slot_name_, + const size_t max_block_size_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) , context(context_) , database_name(database_name_) , table_name(table_name_) , publication_name(publication_name_) , replication_slot(replication_slot_name_) + , max_block_size(max_block_size_) , connection(std::make_shared(conn_str)) { /// Create a replication connection, through which it is possible to execute only commands from streaming replication protocol @@ -56,7 +58,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( void PostgreSQLReplicationHandler::startup(StoragePtr storage) { - helper_table = storage; + nested_storage = storage; startup_task->activateAndSchedule(); } @@ -98,6 +100,7 @@ bool PostgreSQLReplicationHandler::isPublicationExist() assert(!result.empty()); bool publication_exists = (result[0][0].as() == "t"); + /// TODO: check if publication is still valid? if (publication_exists) LOG_TRACE(log, "Publication {} already exists. Using existing version", publication_name); @@ -121,7 +124,7 @@ void PostgreSQLReplicationHandler::createPublication() /// TODO: check replica identity /// Requires changed replica identity for included table to be able to receive old values of updated rows. - /// (ALTER TABLE table_name REPLICA IDENTITY FULL) + /// (ALTER TABLE table_name REPLICA IDENTITY FULL ?) } @@ -173,7 +176,9 @@ void PostgreSQLReplicationHandler::startReplication() connection->conn_str(), replication_slot, publication_name, - start_lsn); + start_lsn, + max_block_size, + nested_storage); LOG_DEBUG(log, "Commiting replication transaction"); ntx->commit(); @@ -203,12 +208,12 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) insert_context.makeQueryContext(); auto insert = std::make_shared(); - insert->table_id = helper_table->getStorageID(); + insert->table_id = nested_storage->getStorageID(); InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); - const StorageInMemoryMetadata & storage_metadata = helper_table->getInMemoryMetadata(); + const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); @@ -296,10 +301,18 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, st } +void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) +{ + std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); + ntx->exec(query_str); +} + + /// Only used when MaterializePostgreSQL table is dropped. -void PostgreSQLReplicationHandler::checkAndDropReplicationSlot() +void PostgreSQLReplicationHandler::removeSlotAndPublication() { auto ntx = std::make_shared(*replication_connection->conn()); + dropPublication(ntx); if (isReplicationSlotExist(ntx, replication_slot)) dropReplicationSlot(ntx, replication_slot, false); ntx->commit(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 2e85bae5cb9..5cc4d336921 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -24,11 +24,12 @@ public: const std::string & conn_str_, std::shared_ptr context_, const std::string & publication_slot_name_, - const std::string & replication_slot_name_); + const std::string & replication_slot_name_, + const size_t max_block_size_); void startup(StoragePtr storage_); void shutdown(); - void checkAndDropReplicationSlot(); + void removeSlotAndPublication(); private: using NontransactionPtr = std::shared_ptr; @@ -41,6 +42,7 @@ private: void createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name); void createReplicationSlot(NontransactionPtr ntx); void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name, bool use_replication_api); + void dropPublication(NontransactionPtr ntx); void startReplication(); void loadFromSnapshot(std::string & snapshot_name); @@ -53,6 +55,7 @@ private: std::string publication_name, replication_slot; std::string temp_replication_slot; + const size_t max_block_size; PostgreSQLConnectionPtr connection; PostgreSQLConnectionPtr replication_connection; @@ -60,7 +63,7 @@ private: BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; - StoragePtr helper_table; + StoragePtr nested_storage; //LSNPosition start_lsn, final_lsn; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index bca77f314cd..13cd5321737 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -27,7 +27,6 @@ #include #include "PostgreSQLReplicationSettings.h" -#include "PostgreSQLReplicaBlockInputStream.h" #include #include @@ -61,13 +60,15 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( relative_data_path.resize(relative_data_path.size() - 1); relative_data_path += nested_storage_suffix; + replication_handler = std::make_unique( remote_database_name, remote_table_name, connection_str, global_context, global_context->getMacros()->expand(replication_settings->postgresql_replication_slot_name.value), - global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value) + global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value), + global_context->getSettingsRef().postgresql_replica_max_rows_to_insert.value ); } @@ -180,12 +181,13 @@ void StoragePostgreSQLReplica::startup() { Context context_copy(*global_context); const auto ast_create = getCreateHelperTableQuery(); + auto table_id = getStorageID(); Poco::File path(relative_data_path); if (!path.exists()) { LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), - "Creating helper table {}", getStorageID().table_name + nested_storage_suffix); + "Creating helper table {}", table_id.table_name + nested_storage_suffix); InterpreterCreateQuery interpreter(ast_create, context_copy); interpreter.execute(); } @@ -193,8 +195,13 @@ void StoragePostgreSQLReplica::startup() LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), "Directory already exists {}", relative_data_path); - nested_storage = createTableFromAST(ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; - nested_storage->startup(); + nested_storage = DatabaseCatalog::instance().getTable( + StorageID(table_id.database_name, table_id.table_name + nested_storage_suffix), + *global_context); + + //nested_storage = createTableFromAST( + // ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; + //nested_storage->startup(); replication_handler->startup(nested_storage); } @@ -208,8 +215,7 @@ void StoragePostgreSQLReplica::shutdown() void StoragePostgreSQLReplica::shutdownFinal() { - /// TODO: Under lock? Make sure synchronization stopped. - replication_handler->checkAndDropReplicationSlot(); + replication_handler->removeSlotAndPublication(); dropNested(); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index c02c9696d87..8dbfeb79bf0 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -19,7 +19,6 @@ #include #include "PostgreSQLReplicationHandler.h" #include "PostgreSQLReplicationSettings.h" -#include "buffer_fwd.h" #include "pqxx/pqxx" namespace DB @@ -46,6 +45,7 @@ public: size_t max_block_size, unsigned num_streams) override; + /// Called right after shutdown() in case of drop query void shutdownFinal(); protected: diff --git a/src/Storages/PostgreSQL/buffer_fwd.h b/src/Storages/PostgreSQL/buffer_fwd.h deleted file mode 100644 index 40ffd64aad3..00000000000 --- a/src/Storages/PostgreSQL/buffer_fwd.h +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -namespace DB -{ - -class PostgreSQLReplicaConsumerBuffer; -using ConsumerBufferPtr = std::shared_ptr; - -} diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp new file mode 100644 index 00000000000..5d4723364dc --- /dev/null +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp @@ -0,0 +1,208 @@ +#include "insertPostgreSQLValue.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +void insertPostgreSQLValue( + IColumn & column, std::string_view value, + const ExternalResultDescription::ValueType type, const DataTypePtr data_type, + std::unordered_map & array_info, size_t idx) +{ + switch (type) + { + case ExternalResultDescription::ValueType::vtUInt8: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtUInt16: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtUInt32: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtUInt64: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtInt8: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtInt16: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtInt32: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtInt64: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtFloat32: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtFloat64: + assert_cast(column).insertValue(pqxx::from_string(value)); + break; + case ExternalResultDescription::ValueType::vtFixedString:[[fallthrough]]; + case ExternalResultDescription::ValueType::vtString: + assert_cast(column).insertData(value.data(), value.size()); + break; + case ExternalResultDescription::ValueType::vtUUID: + assert_cast(column).insert(parse(value.data(), value.size())); + break; + case ExternalResultDescription::ValueType::vtDate: + assert_cast(column).insertValue(UInt16{LocalDate{std::string(value)}.getDayNum()}); + break; + case ExternalResultDescription::ValueType::vtDateTime: + assert_cast(column).insertValue(time_t{LocalDateTime{std::string(value)}}); + break; + case ExternalResultDescription::ValueType::vtDateTime64:[[fallthrough]]; + case ExternalResultDescription::ValueType::vtDecimal32: [[fallthrough]]; + case ExternalResultDescription::ValueType::vtDecimal64: [[fallthrough]]; + case ExternalResultDescription::ValueType::vtDecimal128: [[fallthrough]]; + case ExternalResultDescription::ValueType::vtDecimal256: + { + ReadBufferFromString istr(value); + data_type->deserializeAsWholeText(column, istr, FormatSettings{}); + break; + } + case ExternalResultDescription::ValueType::vtArray: + { + pqxx::array_parser parser{value}; + std::pair parsed = parser.get_next(); + + size_t dimension = 0, max_dimension = 0, expected_dimensions = array_info[idx].num_dimensions; + const auto parse_value = array_info[idx].pqxx_parser; + std::vector> dimensions(expected_dimensions + 1); + + while (parsed.first != pqxx::array_parser::juncture::done) + { + if ((parsed.first == pqxx::array_parser::juncture::row_start) && (++dimension > expected_dimensions)) + throw Exception("Got more dimensions than expected", ErrorCodes::BAD_ARGUMENTS); + + else if (parsed.first == pqxx::array_parser::juncture::string_value) + dimensions[dimension].emplace_back(parse_value(parsed.second)); + + else if (parsed.first == pqxx::array_parser::juncture::null_value) + dimensions[dimension].emplace_back(array_info[idx].default_value); + + else if (parsed.first == pqxx::array_parser::juncture::row_end) + { + max_dimension = std::max(max_dimension, dimension); + + if (--dimension == 0) + break; + + dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end())); + dimensions[dimension + 1].clear(); + } + + parsed = parser.get_next(); + } + + if (max_dimension < expected_dimensions) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Got less dimensions than expected. ({} instead of {})", max_dimension, expected_dimensions); + + assert_cast(column).insert(Array(dimensions[1].begin(), dimensions[1].end())); + break; + } + } +} + + +void preparePostgreSQLArrayInfo( + std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type) +{ + const auto * array_type = typeid_cast(data_type.get()); + auto nested = array_type->getNestedType(); + + size_t count_dimensions = 1; + while (isArray(nested)) + { + ++count_dimensions; + nested = typeid_cast(nested.get())->getNestedType(); + } + + Field default_value = nested->getDefault(); + if (nested->isNullable()) + nested = static_cast(nested.get())->getNestedType(); + + WhichDataType which(nested); + std::function parser; + + if (which.isUInt8() || which.isUInt16()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isInt8() || which.isInt16()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isUInt32()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isInt32()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isUInt64()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isInt64()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isFloat32()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isFloat64()) + parser = [](std::string & field) -> Field { return pqxx::from_string(field); }; + else if (which.isString() || which.isFixedString()) + parser = [](std::string & field) -> Field { return field; }; + else if (which.isDate()) + parser = [](std::string & field) -> Field { return UInt16{LocalDate{field}.getDayNum()}; }; + else if (which.isDateTime()) + parser = [](std::string & field) -> Field { return time_t{LocalDateTime{field}}; }; + else if (which.isDecimal32()) + parser = [nested](std::string & field) -> Field + { + const auto & type = typeid_cast *>(nested.get()); + DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); + return convertFieldToType(field, res); + }; + else if (which.isDecimal64()) + parser = [nested](std::string & field) -> Field + { + const auto & type = typeid_cast *>(nested.get()); + DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); + return convertFieldToType(field, res); + }; + else if (which.isDecimal128()) + parser = [nested](std::string & field) -> Field + { + const auto & type = typeid_cast *>(nested.get()); + DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); + return convertFieldToType(field, res); + }; + else if (which.isDecimal256()) + parser = [nested](std::string & field) -> Field + { + const auto & type = typeid_cast *>(nested.get()); + DataTypeDecimal res(getDecimalPrecision(*type), getDecimalScale(*type)); + return convertFieldToType(field, res); + }; + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName()); + + array_info[column_idx] = {count_dimensions, default_value, parser}; +} +} + diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.h b/src/Storages/PostgreSQL/insertPostgreSQLValue.h new file mode 100644 index 00000000000..1582d35d096 --- /dev/null +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include +#include + + +namespace DB +{ + +struct PostgreSQLArrayInfo +{ + size_t num_dimensions; + Field default_value; + std::function pqxx_parser; +}; + + +void insertPostgreSQLValue( + IColumn & column, std::string_view value, + const ExternalResultDescription::ValueType type, const DataTypePtr data_type, + std::unordered_map & array_info, size_t idx); + +void preparePostgreSQLArrayInfo( + std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type); + +} diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index ccf0b2eee13..487ee2a35cb 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -13,7 +13,7 @@ instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml' postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( - key Integer NOT NULL, value Integer, PRIMARY KEY (key)) + key Integer NOT NULL, value Integer) """ def get_postgres_conn(database=False): @@ -108,6 +108,70 @@ def test_no_connection_at_startup(started_cluster): cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) + +def test_detach_attach_is_ok(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + result = instance.query('SELECT * FROM test.postgresql_replica;') + postgresql_replica_check_result(result, True) + + instance.query('DETACH TABLE test.postgresql_replica') + instance.query('ATTACH TABLE test.postgresql_replica') + + result = instance.query('SELECT * FROM test.postgresql_replica;') + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + + +def test_replicating_inserts(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 10) + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 10 + number, 10 + number from numbers(10)") + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 20 + number, 20 + number from numbers(10)") + + time.sleep(4) + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 30) + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 30 + number, 30 + number from numbers(10)") + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 40 + number, 40 + number from numbers(10)") + + time.sleep(4) + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 50) + + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 0d19992128abc82cfa0cda9fee3abf939c8809ec Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 3 Feb 2021 16:13:18 +0000 Subject: [PATCH 020/931] Replicate delete queries --- .../PostgreSQLBlockInputStream.cpp | 2 +- src/DataStreams/PostgreSQLBlockInputStream.h | 7 -- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 88 +++++++++++++++---- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 19 +++- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 75 ++++++++++++++++ .../PostgreSQL/PostgreSQLReplicaMetadata.h | 31 +++++++ .../PostgreSQLReplicationHandler.cpp | 19 +++- .../PostgreSQL/PostgreSQLReplicationHandler.h | 5 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 75 +++++++++++++--- .../PostgreSQL/StoragePostgreSQLReplica.h | 2 +- .../PostgreSQL/insertPostgreSQLValue.cpp | 8 ++ .../PostgreSQL/insertPostgreSQLValue.h | 2 + src/Storages/StoragePostgreSQL.h | 1 + .../test_storage_postgresql_replica/test.py | 58 +++++++++--- 14 files changed, 333 insertions(+), 59 deletions(-) create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index a52ca1e58a4..5b43a21c6fc 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -90,7 +90,7 @@ Block PostgreSQLBlockInputStream::readImpl() } else { - insertDefaultValue(*columns[idx], *sample.column); + insertDefaultPostgreSQLValue(*columns[idx], *sample.column); } } diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index c18ccd0f55e..f51526b2eb3 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -15,7 +15,6 @@ namespace DB { -using ConnectionPtr = std::shared_ptr; class PostgreSQLBlockInputStream : public IBlockInputStream { @@ -34,16 +33,10 @@ private: Block readImpl() override; void readSuffix() override; - void insertDefaultValue(IColumn & column, const IColumn & sample_column) - { - column.insertFrom(sample_column, 0); - } - String query_str; const UInt64 max_block_size; ExternalResultDescription description; - ConnectionPtr connection; std::unique_ptr tx; std::unique_ptr stream; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index c38b898fdc1..3a91e893392 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -29,6 +29,7 @@ namespace DB { + namespace ErrorCodes { extern const int LOGICAL_ERROR; @@ -36,7 +37,8 @@ namespace ErrorCodes static const auto reschedule_ms = 500; static const auto max_thread_work_duration_ms = 60000; -static const auto max_empty_slot_reads = 2; +static const auto max_empty_slot_reads = 16; + PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( std::shared_ptr context_, @@ -44,6 +46,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( const std::string & conn_str, const std::string & replication_slot_name_, const std::string & publication_name_, + const std::string & metadata_path, const LSNPosition & start_lsn, const size_t max_block_size_, StoragePtr nested_storage_) @@ -51,6 +54,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) + , metadata(metadata_path) , table_name(table_name_) , connection(std::make_shared(conn_str)) , current_lsn(start_lsn) @@ -69,6 +73,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); wal_reader_task->deactivate(); + } @@ -118,10 +123,10 @@ void PostgreSQLReplicaConsumer::replicationStream() void PostgreSQLReplicaConsumer::insertValue(std::string & value, size_t column_idx) { + LOG_TRACE(log, "INSERTING VALUE {}", value); const auto & sample = description.sample_block.getByPosition(column_idx); bool is_nullable = description.types[column_idx].second; - LOG_TRACE(log, "INSERTING VALUE {}", value); if (is_nullable) { ColumnNullable & column_nullable = assert_cast(*columns[column_idx]); @@ -141,6 +146,13 @@ void PostgreSQLReplicaConsumer::insertValue(std::string & value, size_t column_i } +void PostgreSQLReplicaConsumer::insertDefaultValue(size_t column_idx) +{ + const auto & sample = description.sample_block.getByPosition(column_idx); + insertDefaultPostgreSQLValue(*columns[column_idx], *sample.column); +} + + void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, size_t size, String & result) { assert(size > pos + 2); @@ -198,7 +210,7 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) } -void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, size_t /* size */) +void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, PostgreSQLQuery type) { Int16 num_columns = readInt16(message, pos); /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data @@ -218,20 +230,36 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos LOG_DEBUG(log, "identifier {}, col_len {}, value {}", identifier, col_len, value); } - String val = "1"; - insertValue(val, num_columns); - insertValue(val, num_columns + 1); - //readString(message, pos, size, result); + switch (type) + { + case PostgreSQLQuery::INSERT: + { + columns[num_columns]->insert(Int8(1)); + columns[num_columns + 1]->insert(UInt64(metadata.version())); + //insertValueMaterialized(*columns[num_columns], 1); + //insertValueMaterialized(*columns[num_columns + 1], metadata.version()); + break; + } + case PostgreSQLQuery::DELETE: + { + columns[num_columns]->insert(Int8(-1)); + columns[num_columns + 1]->insert(UInt64(metadata.version())); + break; + } + case PostgreSQLQuery::UPDATE: + { + break; + } + } } -void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replication_message, size_t size) +void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replication_message, size_t size) { /// Skip '\x' size_t pos = 2; char type = readInt8(replication_message, pos); - LOG_TRACE(log, "TYPE: {}", type); switch (type) { @@ -292,13 +320,23 @@ void PostgreSQLReplicaConsumer::decodeReplicationMessage(const char * replicatio Int32 relation_id = readInt32(replication_message, pos); Int8 new_tuple = readInt8(replication_message, pos); LOG_DEBUG(log, "relationID {}, newTuple {}", relation_id, new_tuple); - readTupleData(replication_message, pos, size); + readTupleData(replication_message, pos, PostgreSQLQuery::INSERT); break; } case 'U': // Update break; case 'D': // Delete + { + Int32 relation_id = readInt32(replication_message, pos); + //Int8 index_replica_identity = readInt8(replication_message, pos); + Int8 full_replica_identity = readInt8(replication_message, pos); + LOG_DEBUG(log, "relationID {}, full replica identity {}", + relation_id, full_replica_identity); + //LOG_DEBUG(log, "relationID {}, index replica identity {} full replica identity {}", + // relation_id, index_replica_identity, full_replica_identity); + readTupleData(replication_message, pos, PostgreSQLQuery::DELETE); break; + } case 'T': // Truncate break; default: @@ -344,16 +382,18 @@ void PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { columns = description.sample_block.cloneEmptyColumns(); + std::shared_ptr tx; bool slot_empty = true; try { - auto tx = std::make_shared(*replication_connection->conn()); + tx = std::make_shared(*replication_connection->conn()); /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. std::string query_str = fmt::format( "select lsn, data FROM pg_logical_slot_peek_binary_changes(" "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", replication_slot_name, publication_name); pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); + LOG_DEBUG(log, "Starting replication stream"); while (true) { @@ -364,14 +404,12 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() LOG_TRACE(log, "STREAM REPLICATION END"); stream.complete(); - Block result_rows = description.sample_block.cloneWithColumns(std::move(columns)); - if (result_rows.rows()) + if (slot_empty) { - syncIntoTable(result_rows); - advanceLSN(tx); + tx->commit(); + return false; } - tx->commit(); break; } @@ -379,7 +417,7 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() current_lsn.lsn = (*row)[0]; LOG_TRACE(log, "Replication message: {}", (*row)[1]); - decodeReplicationMessage((*row)[1].c_str(), (*row)[1].size()); + processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } } catch (...) @@ -388,7 +426,21 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() return false; } - return !slot_empty; + Block result_rows = description.sample_block.cloneWithColumns(std::move(columns)); + if (result_rows.rows()) + { + assert(!slot_empty); + metadata.commitVersion([&]() + { + syncIntoTable(result_rows); + advanceLSN(tx); + + /// TODO: Can transaction still be active if got exception before commiting it? It must be closed if connection is ok. + tx->commit(); + }); + } + + return true; } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index ca357236180..b396dad80a9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -2,6 +2,7 @@ #include "PostgreSQLConnection.h" #include +#include "PostgreSQLReplicaMetadata.h" #include #include #include @@ -47,6 +48,7 @@ public: const std::string & conn_str_, const std::string & replication_slot_name_, const std::string & publication_name_, + const std::string & metadata_path, const LSNPosition & start_lsn, const size_t max_block_size_, StoragePtr nested_storage_); @@ -61,16 +63,26 @@ private: void replicationStream(); void stopReplicationStream(); + enum class PostgreSQLQuery + { + INSERT, + UPDATE, + DELETE + }; + /// Start changes stream from WAL via copy command (up to max_block_size changes). bool readFromReplicationSlot(); - void decodeReplicationMessage(const char * replication_message, size_t size); + void processReplicationMessage(const char * replication_message, size_t size); void insertValue(std::string & value, size_t column_idx); + //static void insertValueMaterialized(IColumn & column, uint64_t value); + void insertDefaultValue(size_t column_idx); + void syncIntoTable(Block & block); void advanceLSN(std::shared_ptr ntx); /// Methods to parse replication message data. - void readTupleData(const char * message, size_t & pos, size_t size); + void readTupleData(const char * message, size_t & pos, PostgreSQLQuery type); void readString(const char * message, size_t & pos, size_t size, String & result); Int64 readInt64(const char * message, size_t & pos); Int32 readInt32(const char * message, size_t & pos); @@ -81,6 +93,7 @@ private: std::shared_ptr context; const std::string replication_slot_name; const std::string publication_name; + PostgreSQLReplicaMetadata metadata; const std::string table_name; PostgreSQLConnectionPtr connection, replication_connection; @@ -97,6 +110,8 @@ private: MutableColumns columns; /// Needed for insertPostgreSQLValue() method to parse array std::unordered_map array_info; + + size_t data_version = 1; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp new file mode 100644 index 00000000000..81b258b61c1 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -0,0 +1,75 @@ +#include "PostgreSQLReplicaMetadata.h" +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadata_file_path) + : metadata_file(metadata_file_path) + , tmp_metadata_file(metadata_file_path + ".tmp") + , data_version(1) +{ + readDataVersion(); +} + + +void PostgreSQLReplicaMetadata::readDataVersion() +{ + if (Poco::File(metadata_file).exists()) + { + LOG_INFO(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + "PostgreSQLReplica metadata file exists. Starting version {}", data_version); + + ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); + + assertString("\nData version:\t", in); + readIntText(data_version, in); + + LOG_INFO(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + "PostgreSQLReplica metadata file exists. Starting version {}", data_version); + } +} + + +void PostgreSQLReplicaMetadata::writeDataVersion() +{ + WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); + writeString("\nData Version:\t" + toString(data_version), out); + + out.next(); + out.sync(); + out.close(); +} + + +/// While data is recieved, version is updated. Before table sync, write last version to tmp file. +/// Then sync data to table and rename tmp to non-tmp. +void PostgreSQLReplicaMetadata::commitVersion(const std::function & finalizeStreamFunc) +{ + writeDataVersion(); + + try + { + finalizeStreamFunc(); + Poco::File(tmp_metadata_file).renameTo(metadata_file); + } + catch (...) + { + Poco::File(tmp_metadata_file).remove(); + throw; + } +} + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h new file mode 100644 index 00000000000..87750c0e007 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -0,0 +1,31 @@ +#pragma once +#include + + +namespace DB +{ + +class PostgreSQLReplicaMetadata +{ +public: + PostgreSQLReplicaMetadata(const std::string & metadata_file_path); + + void commitVersion(const std::function & syncTableFunc); + + size_t version() + { + return data_version++; + } + +private: + void readDataVersion(); + void writeDataVersion(); + + const std::string metadata_file; + const std::string tmp_metadata_file; + + size_t data_version; + +}; + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 7b75c42c7a8..53c3c66c504 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -2,6 +2,7 @@ #include "PostgreSQLReplicaConsumer.h" #include +#include #include #include #include @@ -38,6 +39,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , replication_slot(replication_slot_name_) , max_block_size(max_block_size_) , connection(std::make_shared(conn_str)) + , metadata_path(DatabaseCatalog::instance().getDatabase(database_name)->getMetadataPath() + "/.metadata") { /// Create a replication connection, through which it is possible to execute only commands from streaming replication protocol /// interface. Passing 'database' as the value instructs walsender to connect to the database specified in the dbname parameter, @@ -82,6 +84,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() throw; } + LOG_DEBUG(log, "PostgreSQLReplica starting replication proccess"); startReplication(); } @@ -170,17 +173,19 @@ void PostgreSQLReplicationHandler::startReplication() createReplicationSlot(ntx); } + LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Creating replication consumer"); consumer = std::make_shared( context, table_name, connection->conn_str(), replication_slot, publication_name, + metadata_path, start_lsn, max_block_size, nested_storage); - LOG_DEBUG(log, "Commiting replication transaction"); + LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Successfully created replication consumer"); ntx->commit(); consumer->startSynchronization(); @@ -303,18 +308,28 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, st void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) { + if (publication_name.empty()) + return; + std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); ntx->exec(query_str); } /// Only used when MaterializePostgreSQL table is dropped. -void PostgreSQLReplicationHandler::removeSlotAndPublication() +void PostgreSQLReplicationHandler::shutdownFinal() { + if (Poco::File(metadata_path).exists()) + Poco::File(metadata_path).remove(); + + /// TODO: another transaction might be active on this same connection. Need to make sure it does not happen. + replication_connection->conn()->close(); auto ntx = std::make_shared(*replication_connection->conn()); + dropPublication(ntx); if (isReplicationSlotExist(ntx, replication_slot)) dropReplicationSlot(ntx, replication_slot, false); + ntx->commit(); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 5cc4d336921..afc8a4bd213 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -3,6 +3,7 @@ #include #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaConsumer.h" +#include "PostgreSQLReplicaMetadata.h" #include #include #include "pqxx/pqxx" @@ -29,7 +30,7 @@ public: void startup(StoragePtr storage_); void shutdown(); - void removeSlotAndPublication(); + void shutdownFinal(); private: using NontransactionPtr = std::shared_ptr; @@ -61,10 +62,10 @@ private: PostgreSQLConnectionPtr replication_connection; std::shared_ptr tx; + const String metadata_path; BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; StoragePtr nested_storage; - //LSNPosition start_lsn, final_lsn; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 13cd5321737..825c49668bb 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -40,7 +40,8 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } -static auto nested_storage_suffix = "_ReplacingMergeTree"; +static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; + StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, @@ -57,9 +58,9 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( , replication_settings(std::move(replication_settings_)) { setInMemoryMetadata(storage_metadata); - relative_data_path.resize(relative_data_path.size() - 1); - relative_data_path += nested_storage_suffix; - + if (relative_data_path.ends_with("/")) + relative_data_path.resize(relative_data_path.size() - 1); + relative_data_path += NESTED_STORAGE_SUFFIX; replication_handler = std::make_unique( remote_database_name, @@ -132,7 +133,7 @@ ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() auto create_table_query = std::make_shared(); auto table_id = getStorageID(); - create_table_query->table = table_id.table_name + nested_storage_suffix; + create_table_query->table = table_id.table_name + NESTED_STORAGE_SUFFIX; create_table_query->database = table_id.database_name; create_table_query->if_not_exists = true; @@ -166,13 +167,63 @@ Pipe StoragePostgreSQLReplica::read( StoragePtr storage = DatabaseCatalog::instance().getTable(nested_storage->getStorageID(), *global_context); auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - const StorageMetadataPtr & nested_metadata = storage->getInMemoryMetadataPtr(); + const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); + + NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); + + Block nested_header = nested_metadata->getSampleBlock(); + ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); + ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); + + if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) + { + auto & tables_in_select_query = select_query->tables()->as(); + + if (!tables_in_select_query.children.empty()) + { + auto & tables_element = tables_in_select_query.children[0]->as(); + + if (tables_element.table_expression) + tables_element.table_expression->as().final = true; + } + } + + String filter_column_name; + Names require_columns_name = column_names; + ASTPtr expressions = std::make_shared(); + if (column_names_set.empty() || !column_names_set.count(sign_column.name)) + { + require_columns_name.emplace_back(sign_column.name); + + const auto & sign_column_name = std::make_shared(sign_column.name); + const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); + + expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); + filter_column_name = expressions->children.back()->getColumnName(); + + for (const auto & column_name : column_names) + expressions->children.emplace_back(std::make_shared(column_name)); + } + Pipe pipe = storage->read( - column_names, + require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); pipe.addTableLock(lock); + + if (!expressions->children.empty() && !pipe.empty()) + { + Block pipe_header = pipe.getHeader(); + auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); + ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); + + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, expression_actions, filter_column_name, false); + }); + } + return pipe; } @@ -187,7 +238,7 @@ void StoragePostgreSQLReplica::startup() if (!path.exists()) { LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), - "Creating helper table {}", table_id.table_name + nested_storage_suffix); + "Creating helper table {}", table_id.table_name + NESTED_STORAGE_SUFFIX); InterpreterCreateQuery interpreter(ast_create, context_copy); interpreter.execute(); } @@ -196,13 +247,9 @@ void StoragePostgreSQLReplica::startup() "Directory already exists {}", relative_data_path); nested_storage = DatabaseCatalog::instance().getTable( - StorageID(table_id.database_name, table_id.table_name + nested_storage_suffix), + StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), *global_context); - //nested_storage = createTableFromAST( - // ast_create->as(), getStorageID().database_name, relative_data_path, context_copy, false).second; - //nested_storage->startup(); - replication_handler->startup(nested_storage); } @@ -215,7 +262,7 @@ void StoragePostgreSQLReplica::shutdown() void StoragePostgreSQLReplica::shutdownFinal() { - replication_handler->removeSlotAndPublication(); + replication_handler->shutdownFinal(); dropNested(); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 8dbfeb79bf0..3207389c68f 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -67,7 +67,7 @@ private: ASTPtr getCreateHelperTableQuery(); void dropNested(); - String relative_data_path; + String relative_data_path, metadata_path; std::shared_ptr global_context; std::unique_ptr replication_settings; diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp index 5d4723364dc..8cd17cca982 100644 --- a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp @@ -16,6 +16,7 @@ #include #include + namespace DB { @@ -24,6 +25,13 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } + +void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_column) +{ + column.insertFrom(sample_column, 0); +} + + void insertPostgreSQLValue( IColumn & column, std::string_view value, const ExternalResultDescription::ValueType type, const DataTypePtr data_type, diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.h b/src/Storages/PostgreSQL/insertPostgreSQLValue.h index 1582d35d096..d9f24247935 100644 --- a/src/Storages/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.h @@ -26,4 +26,6 @@ void insertPostgreSQLValue( void preparePostgreSQLArrayInfo( std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type); +void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_column); + } diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index 8aebae5896b..8fc7a93b579 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -17,6 +17,7 @@ namespace DB class PostgreSQLConnection; using PostgreSQLConnectionPtr = std::shared_ptr; +using ConnectionPtr = std::shared_ptr; class StoragePostgreSQL final : public ext::shared_ptr_helper, public IStorage { diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 487ee2a35cb..6852b8beaf1 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -31,6 +31,7 @@ def create_postgres_db(cursor, name): def create_postgres_table(cursor, table_name): cursor.execute(postgres_table_template.format(table_name)) + cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) def postgresql_replica_check_result(result, check=False, ref_file='test_postgresql_replica.reference'): fpath = p.join(p.dirname(__file__), ref_file) @@ -71,13 +72,14 @@ def test_initial_load_from_snapshot(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') - result = instance.query('SELECT * FROM test.postgresql_replica;') + time.sleep(0.2) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) @@ -90,7 +92,7 @@ def test_no_connection_at_startup(started_cluster): started_cluster.pause_container('postgres1') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -102,9 +104,8 @@ def test_no_connection_at_startup(started_cluster): while int(result) == 0: result = instance.query('SELECT count() FROM test.postgresql_replica;') time.sleep(1); - print(result) - result = instance.query('SELECT * FROM test.postgresql_replica;') + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) @@ -116,24 +117,25 @@ def test_detach_attach_is_ok(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') - result = instance.query('SELECT * FROM test.postgresql_replica;') + time.sleep(0.2) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') postgresql_replica_check_result(result, True) instance.query('DETACH TABLE test.postgresql_replica') instance.query('ATTACH TABLE test.postgresql_replica') - result = instance.query('SELECT * FROM test.postgresql_replica;') + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) -def test_replicating_inserts(started_cluster): +def test_replicating_insert_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); @@ -141,19 +143,20 @@ def test_replicating_inserts(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') + time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') assert(int(result) == 10) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 10 + number, 10 + number from numbers(10)") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 20 + number, 20 + number from numbers(10)") - time.sleep(4) + time.sleep(2) result = instance.query('SELECT count() FROM test.postgresql_replica;') assert(int(result) == 30) @@ -161,13 +164,44 @@ def test_replicating_inserts(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 30 + number, 30 + number from numbers(10)") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 40 + number, 40 + number from numbers(10)") - time.sleep(4) + time.sleep(2) result = instance.query('SELECT count() FROM test.postgresql_replica;') assert(int(result) == 50) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + +def test_replicating_delete_queries(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + time.sleep(0.2) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + postgresql_replica_check_result(result, True) + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") + time.sleep(2) + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 100) + + cursor.execute('DELETE FROM postgresql_replica WHERE key > 49;') + time.sleep(2); + + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) From 8d0c28d4706c4960dba13d056e11aecdabe3d498 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 4 Feb 2021 09:33:31 +0000 Subject: [PATCH 021/931] Replicate update queries --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 28 +++++++++++++++++-- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 2 +- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 1 + .../test_storage_postgresql_replica/test.py | 27 ++++++++++++++++++ 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 3a91e893392..ae65a39ca1f 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -210,7 +210,7 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) } -void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, PostgreSQLQuery type) +void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos); /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data @@ -236,8 +236,6 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos { columns[num_columns]->insert(Int8(1)); columns[num_columns + 1]->insert(UInt64(metadata.version())); - //insertValueMaterialized(*columns[num_columns], 1); - //insertValueMaterialized(*columns[num_columns + 1], metadata.version()); break; } case PostgreSQLQuery::DELETE: @@ -248,6 +246,12 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos } case PostgreSQLQuery::UPDATE: { + if (old_value) + columns[num_columns]->insert(Int8(-1)); + else + columns[num_columns]->insert(Int8(1)); + + columns[num_columns + 1]->insert(UInt64(metadata.version())); break; } } @@ -319,17 +323,35 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos); Int8 new_tuple = readInt8(replication_message, pos); + LOG_DEBUG(log, "relationID {}, newTuple {}", relation_id, new_tuple); readTupleData(replication_message, pos, PostgreSQLQuery::INSERT); break; } case 'U': // Update + { + Int32 relation_id = readInt32(replication_message, pos); + Int8 primary_key_or_old_tuple_data = readInt8(replication_message, pos); + + LOG_DEBUG(log, "relationID {}, key {}", relation_id, primary_key_or_old_tuple_data); + + readTupleData(replication_message, pos, PostgreSQLQuery::UPDATE, true); + + if (pos + 1 < size) + { + Int8 new_tuple_data = readInt8(replication_message, pos); + LOG_DEBUG(log, "new tuple data {}", new_tuple_data); + readTupleData(replication_message, pos, PostgreSQLQuery::UPDATE); + } + break; + } case 'D': // Delete { Int32 relation_id = readInt32(replication_message, pos); //Int8 index_replica_identity = readInt8(replication_message, pos); Int8 full_replica_identity = readInt8(replication_message, pos); + LOG_DEBUG(log, "relationID {}, full replica identity {}", relation_id, full_replica_identity); //LOG_DEBUG(log, "relationID {}, index replica identity {} full replica identity {}", diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index b396dad80a9..0eb6be143b5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -82,7 +82,7 @@ private: void advanceLSN(std::shared_ptr ntx); /// Methods to parse replication message data. - void readTupleData(const char * message, size_t & pos, PostgreSQLQuery type); + void readTupleData(const char * message, size_t & pos, PostgreSQLQuery type, bool old_value = false); void readString(const char * message, size_t & pos, size_t size, String & result); Int64 readInt64(const char * message, size_t & pos); Int32 readInt32(const char * message, size_t & pos); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 81b258b61c1..f3e1086bc91 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -62,6 +62,7 @@ void PostgreSQLReplicaMetadata::commitVersion(const std::function & fina try { + /// TODO: return last actially written lsn and write it to file finalizeStreamFunc(); Poco::File(tmp_metadata_file).renameTo(metadata_file); } diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 6852b8beaf1..b4549e03ced 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -206,6 +206,33 @@ def test_replicating_delete_queries(started_cluster): postgresql_replica_check_result(result, True) +def test_replicating_update_queries(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 50) + + cursor.execute('UPDATE postgresql_replica SET value = value - 10;') + time.sleep(2); + + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + print(result) + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From d255b63364232dcd3c72fd749f74d468986eb9ff Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 4 Feb 2021 17:17:16 +0000 Subject: [PATCH 022/931] Slightly better --- .../compose/docker_compose_postgres.yml | 2 +- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 12 ++-- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 4 +- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 11 +--- .../PostgreSQL/PostgreSQLReplicaMetadata.h | 2 +- .../PostgreSQLReplicationHandler.cpp | 56 +++++++------------ .../PostgreSQL/PostgreSQLReplicationHandler.h | 7 +-- .../test_storage_postgresql_replica/test.py | 34 ++++++++++- 8 files changed, 71 insertions(+), 57 deletions(-) diff --git a/docker/test/integration/runner/compose/docker_compose_postgres.yml b/docker/test/integration/runner/compose/docker_compose_postgres.yml index 7b3bee8de08..4b39623ec5e 100644 --- a/docker/test/integration/runner/compose/docker_compose_postgres.yml +++ b/docker/test/integration/runner/compose/docker_compose_postgres.yml @@ -7,7 +7,7 @@ services: POSTGRES_PASSWORD: mysecretpassword ports: - 5432:5432 - command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=5"] + command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"] networks: default: aliases: diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index ae65a39ca1f..b4a7344a9cd 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -43,7 +43,7 @@ static const auto max_empty_slot_reads = 16; PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( std::shared_ptr context_, const std::string & table_name_, - const std::string & conn_str, + PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, @@ -56,14 +56,12 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( , publication_name(publication_name_) , metadata(metadata_path) , table_name(table_name_) - , connection(std::make_shared(conn_str)) + , connection(std::move(connection_)) , current_lsn(start_lsn) , max_block_size(max_block_size_) , nested_storage(nested_storage_) , sample_block(nested_storage->getInMemoryMetadata().getSampleBlock()) { - replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); - description.init(sample_block); for (const auto idx : ext::range(0, description.sample_block.columns())) if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) @@ -94,6 +92,7 @@ void PostgreSQLReplicaConsumer::replicationStream() { size_t count_empty_slot_reads = 0; auto start_time = std::chrono::steady_clock::now(); + metadata.readDataVersion(); LOG_TRACE(log, "Starting replication stream"); @@ -406,9 +405,12 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() columns = description.sample_block.cloneEmptyColumns(); std::shared_ptr tx; bool slot_empty = true; + try { - tx = std::make_shared(*replication_connection->conn()); + tx = std::make_shared(*connection->conn()); + //tx->set_variable("transaction_isolation", "'repeatable read'"); + /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. std::string query_str = fmt::format( "select lsn, data FROM pg_logical_slot_peek_binary_changes(" diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 0eb6be143b5..cbe19c4436e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -45,7 +45,7 @@ public: PostgreSQLReplicaConsumer( std::shared_ptr context_, const std::string & table_name_, - const std::string & conn_str_, + PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, @@ -96,7 +96,7 @@ private: PostgreSQLReplicaMetadata metadata; const std::string table_name; - PostgreSQLConnectionPtr connection, replication_connection; + PostgreSQLConnectionPtr connection; LSNPosition current_lsn, final_lsn; BackgroundSchedulePool::TaskHolder wal_reader_task; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index f3e1086bc91..74804d0d93d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -21,7 +21,6 @@ PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadat , tmp_metadata_file(metadata_file_path + ".tmp") , data_version(1) { - readDataVersion(); } @@ -29,16 +28,12 @@ void PostgreSQLReplicaMetadata::readDataVersion() { if (Poco::File(metadata_file).exists()) { - LOG_INFO(&Poco::Logger::get("PostgreSQLReplicaMetadata"), - "PostgreSQLReplica metadata file exists. Starting version {}", data_version); - ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); - assertString("\nData version:\t", in); readIntText(data_version, in); - LOG_INFO(&Poco::Logger::get("PostgreSQLReplicaMetadata"), - "PostgreSQLReplica metadata file exists. Starting version {}", data_version); + LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + "Last written version is {}. (From metadata file {})", data_version, metadata_file); } } @@ -46,7 +41,7 @@ void PostgreSQLReplicaMetadata::readDataVersion() void PostgreSQLReplicaMetadata::writeDataVersion() { WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); - writeString("\nData Version:\t" + toString(data_version), out); + writeString("\nData version:\t" + toString(data_version), out); out.next(); out.sync(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h index 87750c0e007..13a53746c22 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -11,6 +11,7 @@ public: PostgreSQLReplicaMetadata(const std::string & metadata_file_path); void commitVersion(const std::function & syncTableFunc); + void readDataVersion(); size_t version() { @@ -18,7 +19,6 @@ public: } private: - void readDataVersion(); void writeDataVersion(); const std::string metadata_file; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 53c3c66c504..b845f697d1c 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -39,19 +39,14 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , replication_slot(replication_slot_name_) , max_block_size(max_block_size_) , connection(std::make_shared(conn_str)) + , replication_connection(std::make_shared(fmt::format("{} replication=database", connection->conn_str()))) , metadata_path(DatabaseCatalog::instance().getDatabase(database_name)->getMetadataPath() + "/.metadata") { - /// Create a replication connection, through which it is possible to execute only commands from streaming replication protocol - /// interface. Passing 'database' as the value instructs walsender to connect to the database specified in the dbname parameter, - /// which will allow the connection to be used for logical replication from that database. - replication_connection = std::make_shared(fmt::format("{} replication=database", conn_str)); - - /// Non temporary replication slot. Should be the same at restart. if (replication_slot.empty()) replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); /// Temporary replication slot is used to acquire a snapshot for initial table synchronization and to determine starting lsn position. - temp_replication_slot = replication_slot + "_temp"; + tmp_replication_slot = replication_slot + "_temp"; startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); startup_task->deactivate(); @@ -69,8 +64,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { try { - /// Used commands require a specific transaction isolation mode. - replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); + connection->conn(); } catch (pqxx::broken_connection const & pqxx_error) { @@ -133,7 +127,10 @@ void PostgreSQLReplicationHandler::createPublication() void PostgreSQLReplicationHandler::startReplication() { - tx = std::make_shared(*connection->conn()); + /// used commands require a specific transaction isolation mode. + replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); + + tx = std::make_shared(*replication_connection->conn()); if (publication_name.empty()) { publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); @@ -154,8 +151,8 @@ void PostgreSQLReplicationHandler::startReplication() auto ntx = std::make_shared(*replication_connection->conn()); /// Normally temporary replication slot should not exist. - if (isReplicationSlotExist(ntx, temp_replication_slot)) - dropReplicationSlot(ntx, temp_replication_slot, true); + if (isReplicationSlotExist(ntx, tmp_replication_slot)) + dropReplicationSlot(ntx, tmp_replication_slot); std::string snapshot_name; LSNPosition start_lsn; @@ -168,16 +165,18 @@ void PostgreSQLReplicationHandler::startReplication() /// Initial table synchronization from created snapshot loadFromSnapshot(snapshot_name); /// Do not need this replication slot anymore (snapshot loaded and start lsn determined - dropReplicationSlot(ntx, temp_replication_slot, true); + dropReplicationSlot(ntx, tmp_replication_slot); /// Non-temporary replication slot createReplicationSlot(ntx); } + ntx->commit(); + LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Creating replication consumer"); consumer = std::make_shared( context, table_name, - connection->conn_str(), + std::move(connection), replication_slot, publication_name, metadata_path, @@ -186,7 +185,6 @@ void PostgreSQLReplicationHandler::startReplication() nested_storage); LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Successfully created replication consumer"); - ntx->commit(); consumer->startSynchronization(); } @@ -254,18 +252,18 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, void PostgreSQLReplicationHandler::createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name) { - std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", temp_replication_slot); + std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", tmp_replication_slot); try { pqxx::result result{ntx->exec(query_str)}; start_lsn.lsn = result[0][1].as(); snapshot_name = result[0][2].as(); LOG_TRACE(log, "Created temporary replication slot: {}, start lsn: {}, snapshot: {}", - temp_replication_slot, start_lsn.lsn, snapshot_name); + tmp_replication_slot, start_lsn.lsn, snapshot_name); } catch (Exception & e) { - e.addMessage("while creating PostgreSQL replication slot {}", temp_replication_slot); + e.addMessage("while creating PostgreSQL replication slot {}", tmp_replication_slot); throw; } } @@ -287,21 +285,10 @@ void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx) } -void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, std::string & slot_name, bool use_replication_api) +void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, std::string & slot_name) { - if (use_replication_api) - { - std::string query_str = fmt::format("DROP_REPLICATION_SLOT {}", slot_name); - ntx->exec(query_str); - } - else - { - pqxx::work work(*connection->conn()); - std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); - work.exec(query_str); - work.commit(); - } - + std::string query_str = fmt::format("DROP_REPLICATION_SLOT {}", slot_name); + ntx->exec(query_str); LOG_TRACE(log, "Replication slot {} is dropped", slot_name); } @@ -319,16 +306,15 @@ void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) /// Only used when MaterializePostgreSQL table is dropped. void PostgreSQLReplicationHandler::shutdownFinal() { + /// TODO: check: if metadata file does not exist and replication slot does exist, then need to drop it at startup if (Poco::File(metadata_path).exists()) Poco::File(metadata_path).remove(); - /// TODO: another transaction might be active on this same connection. Need to make sure it does not happen. - replication_connection->conn()->close(); auto ntx = std::make_shared(*replication_connection->conn()); dropPublication(ntx); if (isReplicationSlotExist(ntx, replication_slot)) - dropReplicationSlot(ntx, replication_slot, false); + dropReplicationSlot(ntx, replication_slot); ntx->commit(); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index afc8a4bd213..594f57e0dc7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -42,7 +42,7 @@ private: bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); void createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name); void createReplicationSlot(NontransactionPtr ntx); - void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name, bool use_replication_api); + void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); void dropPublication(NontransactionPtr ntx); void startReplication(); @@ -55,11 +55,10 @@ private: const std::string database_name, table_name; std::string publication_name, replication_slot; - std::string temp_replication_slot; + std::string tmp_replication_slot; const size_t max_block_size; - PostgreSQLConnectionPtr connection; - PostgreSQLConnectionPtr replication_connection; + PostgreSQLConnectionPtr connection, replication_connection; std::shared_ptr tx; const String metadata_path; diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index b4549e03ced..44c637cc165 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -228,7 +228,39 @@ def test_replicating_update_queries(started_cluster): time.sleep(2); result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') - print(result) + cursor.execute('DROP TABLE postgresql_replica;') + postgresql_replica_check_result(result, True) + + +def test_resume_from_written_version(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") + time.sleep(2) + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 100) + + instance.query('DETACH TABLE test.postgresql_replica') + + cursor.execute('DELETE FROM postgresql_replica WHERE key > 49;') + cursor.execute('UPDATE postgresql_replica SET value = value - 10;') + + instance.query('ATTACH TABLE test.postgresql_replica') + + time.sleep(3) + + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) From 02e19f942226184cdfa7d7827c9b8bef995253e4 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 4 Feb 2021 21:05:43 +0000 Subject: [PATCH 023/931] Better --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 30 +++---- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 6 +- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 53 +++++++++--- .../PostgreSQL/PostgreSQLReplicaMetadata.h | 12 +-- .../PostgreSQLReplicationHandler.cpp | 46 ++++++---- .../PostgreSQL/PostgreSQLReplicationHandler.h | 14 ++- .../test_storage_postgresql_replica/test.py | 85 +++++++++++++++++-- 7 files changed, 176 insertions(+), 70 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index b4a7344a9cd..e8e73cd2d52 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -67,8 +67,6 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); - columns = description.sample_block.cloneEmptyColumns(); - wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); wal_reader_task->deactivate(); @@ -92,7 +90,7 @@ void PostgreSQLReplicaConsumer::replicationStream() { size_t count_empty_slot_reads = 0; auto start_time = std::chrono::steady_clock::now(); - metadata.readDataVersion(); + metadata.readMetadata(); LOG_TRACE(log, "Starting replication stream"); @@ -384,18 +382,19 @@ void PostgreSQLReplicaConsumer::syncIntoTable(Block & block) } -void PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr ntx) +String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr ntx) { LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn.lsn); - std::string query_str = fmt::format("SELECT pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn.lsn); - pqxx::result result{ntx->exec(query_str)}; - if (!result.empty()) - { - std::string s1 = result[0].size() > 0 && !result[0][0].is_null() ? result[0][0].as() : "NULL"; - std::string s2 = result[0].size() > 1 && !result[0][1].is_null() ? result[0][1].as() : "NULL"; - LOG_TRACE(log, "ADVANCE LSN: {} and {}", s1, s2); - } + std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn.lsn); + pqxx::result result{ntx->exec(query_str)}; + + ntx->commit(); + + if (!result.empty()) + return result[0][0].as(); + + return final_lsn.lsn; } @@ -454,13 +453,10 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() if (result_rows.rows()) { assert(!slot_empty); - metadata.commitVersion([&]() + metadata.commitMetadata(final_lsn.lsn, [&]() { syncIntoTable(result_rows); - advanceLSN(tx); - - /// TODO: Can transaction still be active if got exception before commiting it? It must be closed if connection is ok. - tx->commit(); + return advanceLSN(tx); }); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index cbe19c4436e..efb9dabc121 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -22,9 +22,6 @@ struct LSNPosition uint64_t upper_half, lower_half, result; std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); result = (upper_half << 32) + lower_half; - //LOG_DEBUG(&Poco::Logger::get("LSNParsing"), - // "Created replication slot. upper half: {}, lower_half: {}, start lsn: {}", - // upper_half, lower_half, result); return result; } @@ -32,7 +29,6 @@ struct LSNPosition { char result[16]; std::snprintf(result, sizeof(result), "%lX/%lX", (lsn_value >> 32), lsn_value & 0xFFFFFFFF); - //assert(lsn_value == result.getValue()); std::string ans = result; return ans; } @@ -79,7 +75,7 @@ private: void insertDefaultValue(size_t column_idx); void syncIntoTable(Block & block); - void advanceLSN(std::shared_ptr ntx); + String advanceLSN(std::shared_ptr ntx); /// Methods to parse replication message data. void readTupleData(const char * message, size_t & pos, PostgreSQLQuery type, bool old_value = false); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 74804d0d93d..3188f271f0a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -19,29 +19,51 @@ namespace ErrorCodes PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadata_file_path) : metadata_file(metadata_file_path) , tmp_metadata_file(metadata_file_path + ".tmp") - , data_version(1) + , last_version(1) { } -void PostgreSQLReplicaMetadata::readDataVersion() +void PostgreSQLReplicaMetadata::readMetadata() { if (Poco::File(metadata_file).exists()) { ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); - assertString("\nData version:\t", in); - readIntText(data_version, in); + + assertString("\nLast version:\t", in); + readIntText(last_version, in); + + assertString("\nLast LSN:\t", in); + readString(last_lsn, in); + + if (checkString("\nActual LSN:\t", in)) + { + std::string actual_lsn; + readString(actual_lsn, in); + + if (!actual_lsn.empty()) + last_lsn = actual_lsn; + } LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaMetadata"), - "Last written version is {}. (From metadata file {})", data_version, metadata_file); + "Last written version is {}. (From metadata file {})", last_version, metadata_file); } } -void PostgreSQLReplicaMetadata::writeDataVersion() +void PostgreSQLReplicaMetadata::writeMetadata(bool append_metadata) { WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); - writeString("\nData version:\t" + toString(data_version), out); + + if (!append_metadata) + { + writeString("\nLast version:\t" + toString(last_version), out); + writeString("\nLast LSN:\t" + toString(last_lsn), out); + } + else + { + writeString("\nActual LSN:\t" + toString(last_lsn), out); + } out.next(); out.sync(); @@ -51,14 +73,15 @@ void PostgreSQLReplicaMetadata::writeDataVersion() /// While data is recieved, version is updated. Before table sync, write last version to tmp file. /// Then sync data to table and rename tmp to non-tmp. -void PostgreSQLReplicaMetadata::commitVersion(const std::function & finalizeStreamFunc) +void PostgreSQLReplicaMetadata::commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc) { - writeDataVersion(); + std::string actual_lsn; + last_lsn = lsn; + writeMetadata(); try { - /// TODO: return last actially written lsn and write it to file - finalizeStreamFunc(); + actual_lsn = finalizeStreamFunc(); Poco::File(tmp_metadata_file).renameTo(metadata_file); } catch (...) @@ -66,6 +89,14 @@ void PostgreSQLReplicaMetadata::commitVersion(const std::function & fina Poco::File(tmp_metadata_file).remove(); throw; } + + /// This is not supposed to happen + if (actual_lsn != last_lsn) + { + writeMetadata(true); + LOG_WARNING(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + "Last written LSN {} is not equal to actual LSN {}", last_lsn, actual_lsn); + } } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h index 13a53746c22..f93b74c8c65 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -10,22 +10,22 @@ class PostgreSQLReplicaMetadata public: PostgreSQLReplicaMetadata(const std::string & metadata_file_path); - void commitVersion(const std::function & syncTableFunc); - void readDataVersion(); + void commitMetadata(std::string & lsn, const std::function & syncTableFunc); + void readMetadata(); size_t version() { - return data_version++; + return last_version++; } private: - void writeDataVersion(); + void writeMetadata(bool append_metadata = false); const std::string metadata_file; const std::string tmp_metadata_file; - size_t data_version; - + uint64_t last_version; + std::string last_lsn; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index b845f697d1c..1726185ad8a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -35,6 +35,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , context(context_) , database_name(database_name_) , table_name(table_name_) + , connection_str(conn_str) , publication_name(publication_name_) , replication_slot(replication_slot_name_) , max_block_size(max_block_size_) @@ -70,6 +71,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { LOG_ERROR(log, "Unable to set up connection for table {}.{}. Reconnection attempt continues. Error message: {}", database_name, table_name, pqxx_error.what()); + startup_task->scheduleAfter(reschedule_ms); } catch (Exception & e) @@ -78,7 +80,6 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() throw; } - LOG_DEBUG(log, "PostgreSQLReplica starting replication proccess"); startReplication(); } @@ -90,7 +91,7 @@ void PostgreSQLReplicationHandler::shutdown() } -bool PostgreSQLReplicationHandler::isPublicationExist() +bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) { std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); pqxx::result result{tx->exec(query_str)}; @@ -105,7 +106,7 @@ bool PostgreSQLReplicationHandler::isPublicationExist() } -void PostgreSQLReplicationHandler::createPublication() +void PostgreSQLReplicationHandler::createPublication(std::shared_ptr tx) { /// 'ONLY' means just a table, without descendants. std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_name); @@ -119,28 +120,29 @@ void PostgreSQLReplicationHandler::createPublication() throw Exception(fmt::format("PostgreSQL table {}.{} does not exist", database_name, table_name), ErrorCodes::UNKNOWN_TABLE); } - /// TODO: check replica identity + /// TODO: check replica identity? /// Requires changed replica identity for included table to be able to receive old values of updated rows. - /// (ALTER TABLE table_name REPLICA IDENTITY FULL ?) } void PostgreSQLReplicationHandler::startReplication() { + LOG_DEBUG(log, "PostgreSQLReplica starting replication proccess"); + /// used commands require a specific transaction isolation mode. replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - tx = std::make_shared(*replication_connection->conn()); + auto tx = std::make_shared(*replication_connection->conn()); if (publication_name.empty()) { publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL /// table is dropped. - if (!isPublicationExist()) - createPublication(); + if (!isPublicationExist(tx)) + createPublication(tx); } - else if (!isPublicationExist()) + else if (!isPublicationExist(tx)) { throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -157,8 +159,7 @@ void PostgreSQLReplicationHandler::startReplication() std::string snapshot_name; LSNPosition start_lsn; - /// Non temporary replication slot should be deleted with drop table only and created only once, reused after detach. - if (!isReplicationSlotExist(ntx, replication_slot)) + auto initial_sync = [&]() { /// Temporary replication slot createTempReplicationSlot(ntx, start_lsn, snapshot_name); @@ -168,6 +169,18 @@ void PostgreSQLReplicationHandler::startReplication() dropReplicationSlot(ntx, tmp_replication_slot); /// Non-temporary replication slot createReplicationSlot(ntx); + }; + + /// Non temporary replication slot should be deleted with drop table only and created only once, reused after detach. + if (!isReplicationSlotExist(ntx, replication_slot)) + { + initial_sync(); + } + else if (!Poco::File(metadata_path).exists()) + { + /// If non-temporary slot exists and metadata file (where last synced version is written) does not exist, it is not normal. + dropReplicationSlot(ntx, replication_slot); + initial_sync(); } ntx->commit(); @@ -187,6 +200,9 @@ void PostgreSQLReplicationHandler::startReplication() LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Successfully created replication consumer"); consumer->startSynchronization(); + + /// Takes time to close + replication_connection->conn()->close(); } @@ -287,7 +303,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx) void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, std::string & slot_name) { - std::string query_str = fmt::format("DROP_REPLICATION_SLOT {}", slot_name); + std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); ntx->exec(query_str); LOG_TRACE(log, "Replication slot {} is dropped", slot_name); } @@ -303,14 +319,13 @@ void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) } -/// Only used when MaterializePostgreSQL table is dropped. void PostgreSQLReplicationHandler::shutdownFinal() { - /// TODO: check: if metadata file does not exist and replication slot does exist, then need to drop it at startup if (Poco::File(metadata_path).exists()) Poco::File(metadata_path).remove(); - auto ntx = std::make_shared(*replication_connection->conn()); + connection = std::make_shared(connection_str); + auto ntx = std::make_shared(*connection->conn()); dropPublication(ntx); if (isReplicationSlotExist(ntx, replication_slot)) @@ -319,5 +334,4 @@ void PostgreSQLReplicationHandler::shutdownFinal() ntx->commit(); } - } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 594f57e0dc7..9d2fcf9f042 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -28,16 +28,15 @@ public: const std::string & replication_slot_name_, const size_t max_block_size_); - void startup(StoragePtr storage_); + void startup(StoragePtr storage); void shutdown(); void shutdownFinal(); private: using NontransactionPtr = std::shared_ptr; - void waitConnectionAndStart(); - bool isPublicationExist(); - void createPublication(); + bool isPublicationExist(std::shared_ptr tx); + void createPublication(std::shared_ptr tx); bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); void createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name); @@ -45,22 +44,19 @@ private: void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); void dropPublication(NontransactionPtr ntx); + void waitConnectionAndStart(); void startReplication(); void loadFromSnapshot(std::string & snapshot_name); - Context createQueryContext(); - void getTableOutput(const Context & query_context); Poco::Logger * log; std::shared_ptr context; - const std::string database_name, table_name; + const std::string database_name, table_name, connection_str; std::string publication_name, replication_slot; std::string tmp_replication_slot; const size_t max_block_size; PostgreSQLConnectionPtr connection, replication_connection; - std::shared_ptr tx; - const String metadata_path; BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 44c637cc165..5f91fd2f7b4 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -78,12 +78,13 @@ def test_initial_load_from_snapshot(started_cluster): PRIMARY KEY key; ''') - time.sleep(0.2) + time.sleep(1) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) +@pytest.mark.timeout(180) def test_no_connection_at_startup(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -102,8 +103,8 @@ def test_no_connection_at_startup(started_cluster): result = instance.query('SELECT count() FROM test.postgresql_replica;') while int(result) == 0: + time.sleep(0.5); result = instance.query('SELECT count() FROM test.postgresql_replica;') - time.sleep(1); result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') @@ -123,7 +124,11 @@ def test_detach_attach_is_ok(started_cluster): PRIMARY KEY key; ''') - time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') postgresql_replica_check_result(result, True) @@ -149,7 +154,11 @@ def test_replicating_insert_queries(started_cluster): PRIMARY KEY key; ''') - time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query('SELECT count() FROM test.postgresql_replica;') assert(int(result) == 10) @@ -188,7 +197,11 @@ def test_replicating_delete_queries(started_cluster): PRIMARY KEY key; ''') - time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') postgresql_replica_check_result(result, True) @@ -220,8 +233,11 @@ def test_replicating_update_queries(started_cluster): PRIMARY KEY key; ''') - time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + assert(int(result) == 50) cursor.execute('UPDATE postgresql_replica SET value = value - 10;') @@ -245,6 +261,13 @@ def test_resume_from_written_version(started_cluster): PRIMARY KEY key; ''') + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + assert(int(result) == 50) + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") time.sleep(2) @@ -265,6 +288,56 @@ def test_resume_from_written_version(started_cluster): postgresql_replica_check_result(result, True) +@pytest.mark.timeout(180) +def test_many_replication_messages(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; + ''') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 100000): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000, 100000)") + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) != 200000): + result = instance.query('SELECT count() FROM test.postgresql_replica;') + time.sleep(1) + + result = instance.query('SELECT key FROM test.postgresql_replica ORDER BY key;') + expected = instance.query("SELECT number from numbers(200000)") + assert(result == expected) + + cursor.execute('UPDATE postgresql_replica SET value = key + 1 WHERE key < 100000;') + + result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') + expected = instance.query("SELECT number from numbers(100000)") + + while (result != expected): + result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') + time.sleep(1) + + cursor.execute('DELETE FROM postgresql_replica WHERE key % 2 = 1;') + cursor.execute('DELETE FROM postgresql_replica WHERE key != value;') + + result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') + while (int(result) != 50000): + result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') + time.sleep(1) + + cursor.execute('DROP TABLE postgresql_replica;') + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From d87bfef890498b3d8c5c6c4770a981ef7540a990 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 6 Feb 2021 00:17:54 +0000 Subject: [PATCH 024/931] Read up to max_block_size rows --- src/Core/Settings.h | 1 - .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 11 +- .../PostgreSQLReplicationSettings.h | 1 + .../PostgreSQL/StoragePostgreSQLReplica.cpp | 5 +- .../test_storage_postgresql_replica/test.py | 131 +++++++++++++----- 5 files changed, 105 insertions(+), 44 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8c830f2dac1..9bb9ad30f15 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -60,7 +60,6 @@ class IColumn; M(Milliseconds, replace_running_query_max_wait_ms, 5000, "The wait time for running query with the same query_id to finish when setting 'replace_running_query' is active.", 0) \ M(Milliseconds, kafka_max_wait_ms, 5000, "The wait time for reading from Kafka before retry.", 0) \ M(Milliseconds, rabbitmq_max_wait_ms, 5000, "The wait time for reading from RabbitMQ before retry.", 0) \ - M(UInt64, postgresql_replica_max_rows_to_insert, 65536, "Maximum number of rows in PostgreSQL batch insertion in PostgreSQLReplica storage engine", 0) \ M(UInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.", 0) \ M(UInt64, idle_connection_timeout, 3600, "Close idle TCP connections after specified number of seconds.", 0) \ M(UInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.", 0) \ diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index e8e73cd2d52..3a81c4bc887 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -408,15 +408,16 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() try { tx = std::make_shared(*connection->conn()); - //tx->set_variable("transaction_isolation", "'repeatable read'"); - /// up_to_lsn is set to NULL, up_to_n_changes is set to max_block_size. + /// Read up to max_block_size rows changes (upto_n_changes parameter). It return larger number as the limit + /// is checked only after each transaction block. + /// Returns less than max_block_changes, if reached end of wal. Sync to table in this case. std::string query_str = fmt::format( "select lsn, data FROM pg_logical_slot_peek_binary_changes(" - "'{}', NULL, NULL, 'publication_names', '{}', 'proto_version', '1')", - replication_slot_name, publication_name); + "'{}', NULL, {}, 'publication_names', '{}', 'proto_version', '1')", + replication_slot_name, max_block_size, publication_name); + pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query_str)); - LOG_DEBUG(log, "Starting replication stream"); while (true) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h index 1c3ca6ff73d..8db4c3b3bb7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h @@ -10,6 +10,7 @@ namespace DB #define LIST_OF_POSTGRESQL_REPLICATION_SETTINGS(M) \ M(String, postgresql_replication_slot_name, "", "PostgreSQL replication slot name.", 0) \ M(String, postgresql_publication_name, "", "PostgreSQL publication name.", 0) \ + M(UInt64, postgresql_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ DECLARE_SETTINGS_TRAITS(PostgreSQLReplicationSettingsTraits, LIST_OF_POSTGRESQL_REPLICATION_SETTINGS) diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 825c49668bb..9c24ffe8a43 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -69,7 +69,10 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( global_context, global_context->getMacros()->expand(replication_settings->postgresql_replication_slot_name.value), global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value), - global_context->getSettingsRef().postgresql_replica_max_rows_to_insert.value + replication_settings->postgresql_max_block_size.changed + ? replication_settings->postgresql_max_block_size.value + : (global_context->getSettingsRef().max_insert_block_size.value) + ); } diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 5f91fd2f7b4..8774100af1a 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -65,6 +65,7 @@ def rabbitmq_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') +@pytest.mark.timeout(120) def test_initial_load_from_snapshot(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -78,13 +79,16 @@ def test_initial_load_from_snapshot(started_cluster): PRIMARY KEY key; ''') - time.sleep(1) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.2) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) -@pytest.mark.timeout(180) +@pytest.mark.timeout(120) def test_no_connection_at_startup(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -111,6 +115,7 @@ def test_no_connection_at_startup(started_cluster): postgresql_replica_check_result(result, True) +@pytest.mark.timeout(120) def test_detach_attach_is_ok(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -140,6 +145,7 @@ def test_detach_attach_is_ok(started_cluster): postgresql_replica_check_result(result, True) +@pytest.mark.timeout(120) def test_replicating_insert_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -155,34 +161,32 @@ def test_replicating_insert_queries(started_cluster): ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): + while (int(result) != 10): time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') - result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 10) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 10 + number, 10 + number from numbers(10)") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 20 + number, 20 + number from numbers(10)") - time.sleep(2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 30) + while (int(result) != 30): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 30 + number, 30 + number from numbers(10)") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 40 + number, 40 + number from numbers(10)") - time.sleep(2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 50) + while (int(result) != 50): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) +@pytest.mark.timeout(120) def test_replicating_delete_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -197,28 +201,34 @@ def test_replicating_delete_queries(started_cluster): PRIMARY KEY key; ''') - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): - time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.2) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + postgresql_replica_check_result(result, True) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") - time.sleep(2) result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 100) + while int(result) != 100: + time.sleep(0.5) + result = instance.query('SELECT count() FROM test.postgresql_replica;') cursor.execute('DELETE FROM postgresql_replica WHERE key > 49;') - time.sleep(2); result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.5) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) +@pytest.mark.timeout(120) def test_replicating_update_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -234,20 +244,22 @@ def test_replicating_update_queries(started_cluster): ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): + while (int(result) != 50): time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 50) - cursor.execute('UPDATE postgresql_replica SET value = value - 10;') - time.sleep(2); result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.5) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) +@pytest.mark.timeout(120) def test_resume_from_written_version(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -262,17 +274,16 @@ def test_resume_from_written_version(started_cluster): ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): + while (int(result) != 50): time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 50) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") - time.sleep(2) result = instance.query('SELECT count() FROM test.postgresql_replica;') - assert(int(result) == 100) + while (int(result) != 100): + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') instance.query('DETACH TABLE test.postgresql_replica') @@ -281,14 +292,16 @@ def test_resume_from_written_version(started_cluster): instance.query('ATTACH TABLE test.postgresql_replica') - time.sleep(3) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.5) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) -@pytest.mark.timeout(180) +@pytest.mark.timeout(120) def test_many_replication_messages(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -296,10 +309,14 @@ def test_many_replication_messages(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica ( + key UInt64, value UInt64, + _sign Int8 MATERIALIZED 1, + _version UInt64 MATERIALIZED 1, + PRIMARY KEY(key)) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; + SETTINGS postgresql_max_block_size = 50000; ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') @@ -311,8 +328,9 @@ def test_many_replication_messages(started_cluster): result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) != 200000): - result = instance.query('SELECT count() FROM test.postgresql_replica;') time.sleep(1) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + print("INSERT OK") result = instance.query('SELECT key FROM test.postgresql_replica ORDER BY key;') expected = instance.query("SELECT number from numbers(200000)") @@ -324,20 +342,59 @@ def test_many_replication_messages(started_cluster): expected = instance.query("SELECT number from numbers(100000)") while (result != expected): - result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') time.sleep(1) + result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') + print("UPDATE OK") cursor.execute('DELETE FROM postgresql_replica WHERE key % 2 = 1;') cursor.execute('DELETE FROM postgresql_replica WHERE key != value;') result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') while (int(result) != 50000): - result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') time.sleep(1) + result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') + print("DELETE OK") cursor.execute('DROP TABLE postgresql_replica;') +@pytest.mark.timeout(120) +def test_flush_by_block_size(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(1000)") + + instance.query(''' + CREATE TABLE test.postgresql_replica ( + key UInt64, value UInt64, + _sign Int8 MATERIALIZED 1, + _version UInt64 MATERIALIZED 1, + PRIMARY KEY(key)) + ENGINE = PostgreSQLReplica( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + SETTINGS postgresql_max_block_size = 5000; + ''') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) != 1000: + time.sleep(0.2) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + for i in range(100): + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} * 1000 + number, number from numbers(1000)".format(i)) + + time.sleep(0.5) + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while (int(result) == 0): + result = instance.query('SELECT count() FROM test.postgresql_replica;') + time.sleep(0.2) + + assert(int(result) % 5000 == 0) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 7ceb784d1e187c173438cac5d072b5d34698f1d6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 6 Feb 2021 12:28:42 +0000 Subject: [PATCH 025/931] Better slot usage, some fixes --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 82 +++++++++---------- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 34 ++------ .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 1 + .../PostgreSQL/PostgreSQLReplicaMetadata.h | 6 +- .../PostgreSQLReplicationHandler.cpp | 68 ++++----------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 19 ++--- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 59 ++++++------- .../PostgreSQL/StoragePostgreSQLReplica.h | 13 ++- .../PostgreSQL/insertPostgreSQLValue.cpp | 4 +- .../test_storage_postgresql_replica/test.py | 44 +++++----- 10 files changed, 129 insertions(+), 201 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 3a81c4bc887..d8bee013c51 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -1,31 +1,14 @@ #include "PostgreSQLReplicaConsumer.h" -#include -#include - -#include - -#include -#include -#include - -#include +#include #include - +#include +#include +#include +#include #include #include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include namespace DB { @@ -37,7 +20,6 @@ namespace ErrorCodes static const auto reschedule_ms = 500; static const auto max_thread_work_duration_ms = 60000; -static const auto max_empty_slot_reads = 16; PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( @@ -47,7 +29,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, - const LSNPosition & start_lsn, + const std::string & start_lsn, const size_t max_block_size_, StoragePtr nested_storage_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) @@ -69,12 +51,21 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); wal_reader_task->deactivate(); - } void PostgreSQLReplicaConsumer::startSynchronization() { + metadata.readMetadata(); + + if (!metadata.lsn().empty()) + { + auto tx = std::make_shared(*connection->conn()); + final_lsn = metadata.lsn(); + final_lsn = advanceLSN(tx); + tx->commit(); + } + wal_reader_task->activateAndSchedule(); } @@ -88,21 +79,14 @@ void PostgreSQLReplicaConsumer::stopSynchronization() void PostgreSQLReplicaConsumer::replicationStream() { - size_t count_empty_slot_reads = 0; auto start_time = std::chrono::steady_clock::now(); - metadata.readMetadata(); LOG_TRACE(log, "Starting replication stream"); while (!stop_synchronization) { - if (!readFromReplicationSlot() && ++count_empty_slot_reads == max_empty_slot_reads) - { - LOG_TRACE(log, "Reschedule replication stream. Replication slot is empty."); + if (!readFromReplicationSlot()) break; - } - else - count_empty_slot_reads = 0; auto end_time = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(end_time - start_time); @@ -270,7 +254,6 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati Int64 transaction_commit_timestamp = readInt64(replication_message, pos); LOG_DEBUG(log, "transaction lsn {}, transaction commit timespamp {}", transaction_end_lsn, transaction_commit_timestamp); - //current_lsn.lsn_value = transaction_end_lsn; break; } case 'C': // Commit @@ -282,7 +265,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati Int64 transaction_commit_timestamp = readInt64(replication_message, pos); LOG_DEBUG(log, "commit lsn {}, transaction lsn {}, transaction commit timestamp {}", commit_lsn, transaction_end_lsn, transaction_commit_timestamp); - final_lsn.lsn = current_lsn.lsn; + final_lsn = current_lsn; break; } case 'O': // Origin @@ -384,9 +367,9 @@ void PostgreSQLReplicaConsumer::syncIntoTable(Block & block) String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr ntx) { - LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn.lsn); + LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn); - std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn.lsn); + std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn); pqxx::result result{ntx->exec(query_str)}; ntx->commit(); @@ -394,7 +377,7 @@ String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr(); - return final_lsn.lsn; + return final_lsn; } @@ -409,9 +392,10 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { tx = std::make_shared(*connection->conn()); - /// Read up to max_block_size rows changes (upto_n_changes parameter). It return larger number as the limit + /// Read up to max_block_size rows changes (upto_n_changes parameter). It might return larger number as the limit /// is checked only after each transaction block. /// Returns less than max_block_changes, if reached end of wal. Sync to table in this case. + std::string query_str = fmt::format( "select lsn, data FROM pg_logical_slot_peek_binary_changes(" "'{}', NULL, {}, 'publication_names', '{}', 'proto_version', '1')", @@ -439,11 +423,26 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() slot_empty = false; - current_lsn.lsn = (*row)[0]; + current_lsn = (*row)[0]; LOG_TRACE(log, "Replication message: {}", (*row)[1]); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } } + catch (const pqxx::sql_error & e) + { + /// Currently `sql replication interface` is used and it has the problem that it registers relcache + /// callbacks on each pg_logical_slot_get_changes and there is no way to invalidate them: + /// https://github.com/postgres/postgres/blob/master/src/backend/replication/pgoutput/pgoutput.c#L1128 + /// So at some point will get out of limit and then they will be cleaned. + + std::string error_message = e.what(); + if (error_message.find("out of relcache_callback_list slots") != std::string::npos) + LOG_DEBUG(log, "Out of rel_cache_list slot"); + else + tryLogCurrentException(__PRETTY_FUNCTION__); + + return false; + } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); @@ -453,8 +452,9 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() Block result_rows = description.sample_block.cloneWithColumns(std::move(columns)); if (result_rows.rows()) { + LOG_TRACE(log, "SYNCING TABLE {} max_block_size {}", result_rows.rows(), max_block_size); assert(!slot_empty); - metadata.commitMetadata(final_lsn.lsn, [&]() + metadata.commitMetadata(final_lsn, [&]() { syncIntoTable(result_rows); return advanceLSN(tx); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index efb9dabc121..41e636705b9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -1,40 +1,18 @@ #pragma once #include "PostgreSQLConnection.h" -#include #include "PostgreSQLReplicaMetadata.h" +#include "pqxx/pqxx" + +#include #include #include -#include -#include "pqxx/pqxx" #include + namespace DB { -struct LSNPosition -{ - std::string lsn; - int64_t lsn_value; - - int64_t getValue() - { - uint64_t upper_half, lower_half, result; - std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); - result = (upper_half << 32) + lower_half; - return result; - } - - std::string getString() - { - char result[16]; - std::snprintf(result, sizeof(result), "%lX/%lX", (lsn_value >> 32), lsn_value & 0xFFFFFFFF); - std::string ans = result; - return ans; - } -}; - - class PostgreSQLReplicaConsumer { public: @@ -45,7 +23,7 @@ public: const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, - const LSNPosition & start_lsn, + const std::string & start_lsn, const size_t max_block_size_, StoragePtr nested_storage_); @@ -94,7 +72,7 @@ private: const std::string table_name; PostgreSQLConnectionPtr connection; - LSNPosition current_lsn, final_lsn; + std::string current_lsn, final_lsn; BackgroundSchedulePool::TaskHolder wal_reader_task; //BackgroundSchedulePool::TaskHolder table_sync_task; std::atomic stop_synchronization = false; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 3188f271f0a..a5ae25c3f53 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -1,4 +1,5 @@ #include "PostgreSQLReplicaMetadata.h" + #include #include #include diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h index f93b74c8c65..31044dc3490 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -13,10 +13,8 @@ public: void commitMetadata(std::string & lsn, const std::function & syncTableFunc); void readMetadata(); - size_t version() - { - return last_version++; - } + size_t version() { return last_version++; } + std::string lsn() { return last_lsn; } private: void writeMetadata(bool append_metadata = false); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 1726185ad8a..c7429e792ae 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -1,15 +1,10 @@ #include "PostgreSQLReplicationHandler.h" -#include "PostgreSQLReplicaConsumer.h" -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include + namespace DB { @@ -27,6 +22,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & table_name_, const std::string & conn_str, + const std::string & metadata_path_, std::shared_ptr context_, const std::string & publication_name_, const std::string & replication_slot_name_, @@ -36,19 +32,16 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , database_name(database_name_) , table_name(table_name_) , connection_str(conn_str) + , metadata_path(metadata_path_) , publication_name(publication_name_) , replication_slot(replication_slot_name_) , max_block_size(max_block_size_) , connection(std::make_shared(conn_str)) , replication_connection(std::make_shared(fmt::format("{} replication=database", connection->conn_str()))) - , metadata_path(DatabaseCatalog::instance().getDatabase(database_name)->getMetadataPath() + "/.metadata") { if (replication_slot.empty()) replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); - /// Temporary replication slot is used to acquire a snapshot for initial table synchronization and to determine starting lsn position. - tmp_replication_slot = replication_slot + "_temp"; - startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); startup_task->deactivate(); } @@ -56,7 +49,9 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( void PostgreSQLReplicationHandler::startup(StoragePtr storage) { - nested_storage = storage; + nested_storage = std::move(storage); + + startup_task->activateAndSchedule(); } @@ -67,7 +62,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { connection->conn(); } - catch (pqxx::broken_connection const & pqxx_error) + catch (const pqxx::broken_connection & pqxx_error) { LOG_ERROR(log, "Unable to set up connection for table {}.{}. Reconnection attempt continues. Error message: {}", database_name, table_name, pqxx_error.what()); @@ -152,33 +147,22 @@ void PostgreSQLReplicationHandler::startReplication() auto ntx = std::make_shared(*replication_connection->conn()); - /// Normally temporary replication slot should not exist. - if (isReplicationSlotExist(ntx, tmp_replication_slot)) - dropReplicationSlot(ntx, tmp_replication_slot); - - std::string snapshot_name; - LSNPosition start_lsn; + std::string snapshot_name, start_lsn; auto initial_sync = [&]() { - /// Temporary replication slot - createTempReplicationSlot(ntx, start_lsn, snapshot_name); - /// Initial table synchronization from created snapshot + createReplicationSlot(ntx, start_lsn, snapshot_name); loadFromSnapshot(snapshot_name); - /// Do not need this replication slot anymore (snapshot loaded and start lsn determined - dropReplicationSlot(ntx, tmp_replication_slot); - /// Non-temporary replication slot - createReplicationSlot(ntx); }; - /// Non temporary replication slot should be deleted with drop table only and created only once, reused after detach. + /// Replication slot should be deleted with drop table only and created only once, reused after detach. if (!isReplicationSlotExist(ntx, replication_slot)) { initial_sync(); } else if (!Poco::File(metadata_path).exists()) { - /// If non-temporary slot exists and metadata file (where last synced version is written) does not exist, it is not normal. + /// If replication slot exists and metadata file (where last synced version is written) does not exist, it is not normal. dropReplicationSlot(ntx, replication_slot); initial_sync(); } @@ -266,32 +250,16 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, } -void PostgreSQLReplicationHandler::createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name) +void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name) { - std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", tmp_replication_slot); + std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot); try { pqxx::result result{ntx->exec(query_str)}; - start_lsn.lsn = result[0][1].as(); + start_lsn = result[0][1].as(); snapshot_name = result[0][2].as(); LOG_TRACE(log, "Created temporary replication slot: {}, start lsn: {}, snapshot: {}", - tmp_replication_slot, start_lsn.lsn, snapshot_name); - } - catch (Exception & e) - { - e.addMessage("while creating PostgreSQL replication slot {}", tmp_replication_slot); - throw; - } -} - - -void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx) -{ - std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput", replication_slot); - try - { - pqxx::result result{ntx->exec(query_str)}; - LOG_TRACE(log, "Created replication slot: {}, start lsn: {}", replication_slot, result[0][1].as()); + replication_slot, start_lsn, snapshot_name); } catch (Exception & e) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 9d2fcf9f042..29e7f9b3a43 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,17 +1,10 @@ #pragma once -#include #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaConsumer.h" #include "PostgreSQLReplicaMetadata.h" -#include -#include -#include "pqxx/pqxx" -/* Implementation of logical streaming replication protocol: https://www.postgresql.org/docs/10/protocol-logical-replication.html. - */ - namespace DB { @@ -23,6 +16,7 @@ public: const std::string & database_name_, const std::string & table_name_, const std::string & conn_str_, + const std::string & metadata_path_, std::shared_ptr context_, const std::string & publication_slot_name_, const std::string & replication_slot_name_, @@ -36,11 +30,11 @@ private: using NontransactionPtr = std::shared_ptr; bool isPublicationExist(std::shared_ptr tx); - void createPublication(std::shared_ptr tx); - bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); - void createTempReplicationSlot(NontransactionPtr ntx, LSNPosition & start_lsn, std::string & snapshot_name); - void createReplicationSlot(NontransactionPtr ntx); + + void createPublication(std::shared_ptr tx); + void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name); + void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); void dropPublication(NontransactionPtr ntx); @@ -50,14 +44,13 @@ private: Poco::Logger * log; std::shared_ptr context; - const std::string database_name, table_name, connection_str; + const std::string database_name, table_name, connection_str, metadata_path; std::string publication_name, replication_slot; std::string tmp_replication_slot; const size_t max_block_size; PostgreSQLConnectionPtr connection, replication_connection; - const String metadata_path; BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; StoragePtr nested_storage; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 9c24ffe8a43..dae51cac76f 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -1,36 +1,25 @@ #include "StoragePostgreSQLReplica.h" - -#include -#include -#include - -#include -#include -#include - -#include -#include +#include "PostgreSQLReplicationSettings.h" #include #include - #include #include - +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include #include #include #include - #include -#include "PostgreSQLReplicationSettings.h" -#include - -#include -#include namespace DB { @@ -62,10 +51,14 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( relative_data_path.resize(relative_data_path.size() - 1); relative_data_path += NESTED_STORAGE_SUFFIX; + auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() + + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name; + replication_handler = std::make_unique( remote_database_name, remote_table_name, connection_str, + metadata_path, global_context, global_context->getMacros()->expand(replication_settings->postgresql_replication_slot_name.value), global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value), @@ -148,9 +141,6 @@ ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() auto primary_key_ast = getInMemoryMetadataPtr()->getPrimaryKeyAST(); if (primary_key_ast) storage->set(storage->order_by, primary_key_ast); - /// else - - //storage->set(storage->partition_by, ?); create_table_query->set(create_table_query->storage, storage); @@ -167,17 +157,24 @@ Pipe StoragePostgreSQLReplica::read( size_t max_block_size, unsigned num_streams) { - StoragePtr storage = DatabaseCatalog::instance().getTable(nested_storage->getStorageID(), *global_context); + if (!nested_storage) + { + auto table_id = getStorageID(); + nested_storage = DatabaseCatalog::instance().getTable( + StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), + *global_context); + } + auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); - NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - Block nested_header = nested_metadata->getSampleBlock(); ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); + NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); + if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) { auto & tables_in_select_query = select_query->tables()->as(); @@ -208,7 +205,7 @@ Pipe StoragePostgreSQLReplica::read( expressions->children.emplace_back(std::make_shared(column_name)); } - Pipe pipe = storage->read( + Pipe pipe = nested_storage->read( require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); @@ -249,11 +246,9 @@ void StoragePostgreSQLReplica::startup() LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), "Directory already exists {}", relative_data_path); - nested_storage = DatabaseCatalog::instance().getTable( - StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), - *global_context); - - replication_handler->startup(nested_storage); + replication_handler->startup( + DatabaseCatalog::instance().getTable( + StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), *global_context)); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 3207389c68f..652f948bf79 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -2,24 +2,21 @@ #include "config_core.h" +#include "PostgreSQLReplicationHandler.h" +#include "PostgreSQLReplicationSettings.h" + #include #include #include #include #include #include - #include #include #include #include - -#include #include -#include -#include "PostgreSQLReplicationHandler.h" -#include "PostgreSQLReplicationSettings.h" -#include "pqxx/pqxx" + namespace DB { @@ -67,7 +64,7 @@ private: ASTPtr getCreateHelperTableQuery(); void dropNested(); - String relative_data_path, metadata_path; + String relative_data_path; std::shared_ptr global_context; std::unique_ptr replication_settings; diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp index 8cd17cca982..20b88bff499 100644 --- a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp @@ -1,4 +1,5 @@ #include "insertPostgreSQLValue.h" + #include #include #include @@ -10,11 +11,8 @@ #include #include #include -#include #include #include -#include -#include namespace DB diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 8774100af1a..8773c484039 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -323,6 +323,7 @@ def test_many_replication_messages(started_cluster): while (int(result) == 100000): time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') + print("SYNC OK") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000, 100000)") @@ -358,41 +359,40 @@ def test_many_replication_messages(started_cluster): cursor.execute('DROP TABLE postgresql_replica;') -@pytest.mark.timeout(120) -def test_flush_by_block_size(started_cluster): +@pytest.mark.timeout(180) +def test_connection_loss(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); - - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(1000)") + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query(''' - CREATE TABLE test.postgresql_replica ( - key UInt64, value UInt64, - _sign Int8 MATERIALIZED 1, - _version UInt64 MATERIALIZED 1, - PRIMARY KEY(key)) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - SETTINGS postgresql_max_block_size = 5000; + PRIMARY KEY key; ''') - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while int(result) != 1000: - time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + i = 50 + while i < 100000: + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format(i)) + i += 10000 - for i in range(100): - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} * 1000 + number, number from numbers(1000)".format(i)) - - time.sleep(0.5) + started_cluster.pause_container('postgres1') result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): - result = instance.query('SELECT count() FROM test.postgresql_replica;') - time.sleep(0.2) + print(int(result)) + time.sleep(6) - assert(int(result) % 5000 == 0) + started_cluster.unpause_container('postgres1') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) < 100050: + time.sleep(1) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + cursor.execute('DROP TABLE postgresql_replica;') + assert(int(result) == 100050) if __name__ == '__main__': From 9c2c7be4cf7710bb0b83dc169d7f2dba9aa16494 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 8 Feb 2021 19:32:30 +0000 Subject: [PATCH 026/931] Separate replication interface from single storage --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 155 +++++++++++------- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 58 ++++--- ...ings.cpp => PostgreSQLReplicaSettings.cpp} | 6 +- ...Settings.h => PostgreSQLReplicaSettings.h} | 6 +- .../PostgreSQLReplicationHandler.cpp | 87 +++++----- .../PostgreSQL/PostgreSQLReplicationHandler.h | 15 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 14 +- .../PostgreSQL/StoragePostgreSQLReplica.h | 8 +- 8 files changed, 209 insertions(+), 140 deletions(-) rename src/Storages/PostgreSQL/{PostgreSQLReplicationSettings.cpp => PostgreSQLReplicaSettings.cpp} (76%) rename src/Storages/PostgreSQL/{PostgreSQLReplicationSettings.h => PostgreSQLReplicaSettings.h} (63%) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index d8bee013c51..842c5b3a5d5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -16,6 +16,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int UNKNOWN_TABLE; } static const auto reschedule_ms = 500; @@ -24,30 +25,27 @@ static const auto max_thread_work_duration_ms = 60000; PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( std::shared_ptr context_, - const std::string & table_name_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, - StoragePtr nested_storage_) + Storages storages_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) , metadata(metadata_path) - , table_name(table_name_) , connection(std::move(connection_)) , current_lsn(start_lsn) , max_block_size(max_block_size_) - , nested_storage(nested_storage_) - , sample_block(nested_storage->getInMemoryMetadata().getSampleBlock()) + , storages(storages_) { - description.init(sample_block); - for (const auto idx : ext::range(0, description.sample_block.columns())) - if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) - preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); + for (const auto & [table_name, storage] : storages) + { + buffers.emplace(table_name, BufferData(storage->getInMemoryMetadata().getSampleBlock())); + } wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); wal_reader_task->deactivate(); @@ -102,35 +100,37 @@ void PostgreSQLReplicaConsumer::replicationStream() } -void PostgreSQLReplicaConsumer::insertValue(std::string & value, size_t column_idx) +void PostgreSQLReplicaConsumer::insertValue(BufferData & buffer, const std::string & value, size_t column_idx) { LOG_TRACE(log, "INSERTING VALUE {}", value); - const auto & sample = description.sample_block.getByPosition(column_idx); - bool is_nullable = description.types[column_idx].second; + const auto & sample = buffer.description.sample_block.getByPosition(column_idx); + bool is_nullable = buffer.description.types[column_idx].second; if (is_nullable) { - ColumnNullable & column_nullable = assert_cast(*columns[column_idx]); + ColumnNullable & column_nullable = assert_cast(*buffer.columns[column_idx]); const auto & data_type = assert_cast(*sample.type); insertPostgreSQLValue( column_nullable.getNestedColumn(), value, - description.types[column_idx].first, data_type.getNestedType(), array_info, column_idx); + buffer.description.types[column_idx].first, data_type.getNestedType(), buffer.array_info, column_idx); column_nullable.getNullMapData().emplace_back(0); } else { insertPostgreSQLValue( - *columns[column_idx], value, description.types[column_idx].first, sample.type, array_info, column_idx); + *buffer.columns[column_idx], value, + buffer.description.types[column_idx].first, sample.type, + buffer.array_info, column_idx); } } -void PostgreSQLReplicaConsumer::insertDefaultValue(size_t column_idx) +void PostgreSQLReplicaConsumer::insertDefaultValue(BufferData & buffer, size_t column_idx) { - const auto & sample = description.sample_block.getByPosition(column_idx); - insertDefaultPostgreSQLValue(*columns[column_idx], *sample.column); + const auto & sample = buffer.description.sample_block.getByPosition(column_idx); + insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column); } @@ -191,7 +191,8 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) } -void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos, PostgreSQLQuery type, bool old_value) +void PostgreSQLReplicaConsumer::readTupleData( + BufferData & buffer, const char * message, size_t & pos, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos); /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data @@ -206,7 +207,7 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos value += readInt8(message, pos); } - insertValue(value, column_idx); + insertValue(buffer, value, column_idx); LOG_DEBUG(log, "identifier {}, col_len {}, value {}", identifier, col_len, value); } @@ -215,31 +216,35 @@ void PostgreSQLReplicaConsumer::readTupleData(const char * message, size_t & pos { case PostgreSQLQuery::INSERT: { - columns[num_columns]->insert(Int8(1)); - columns[num_columns + 1]->insert(UInt64(metadata.version())); + buffer.columns[num_columns]->insert(Int8(1)); + buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); + break; } case PostgreSQLQuery::DELETE: { - columns[num_columns]->insert(Int8(-1)); - columns[num_columns + 1]->insert(UInt64(metadata.version())); + buffer.columns[num_columns]->insert(Int8(-1)); + buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); + break; } case PostgreSQLQuery::UPDATE: { if (old_value) - columns[num_columns]->insert(Int8(-1)); + buffer.columns[num_columns]->insert(Int8(-1)); else - columns[num_columns]->insert(Int8(1)); + buffer.columns[num_columns]->insert(Int8(1)); + + buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); - columns[num_columns + 1]->insert(UInt64(metadata.version())); break; } } } -void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replication_message, size_t size) +void PostgreSQLReplicaConsumer::processReplicationMessage( + const char * replication_message, size_t size, std::unordered_set & tables_to_sync) { /// Skip '\x' size_t pos = 2; @@ -295,6 +300,17 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati LOG_DEBUG(log, "Key {}, column name {}, data type id {}, type modifier {}", key, column_name, data_type_id, type_modifier); } + table_to_insert = relation_name; + if (storages.find(table_to_insert) == storages.end()) + { + throw Exception(ErrorCodes::UNKNOWN_TABLE, + "Table {} does not exist, but is included in replication stream", table_to_insert); + } + [[maybe_unused]] auto buffer_iter = buffers.find(table_to_insert); + assert(buffer_iter != buffers.end()); + + tables_to_sync.insert(relation_name); + break; } case 'Y': // Type @@ -305,7 +321,13 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati Int8 new_tuple = readInt8(replication_message, pos); LOG_DEBUG(log, "relationID {}, newTuple {}", relation_id, new_tuple); - readTupleData(replication_message, pos, PostgreSQLQuery::INSERT); + auto buffer = buffers.find(table_to_insert); + if (buffer == buffers.end()) + { + throw Exception(ErrorCodes::UNKNOWN_TABLE, + "Buffer for table {} does not exist", table_to_insert); + } + readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::INSERT); break; } case 'U': // Update @@ -315,13 +337,14 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati LOG_DEBUG(log, "relationID {}, key {}", relation_id, primary_key_or_old_tuple_data); - readTupleData(replication_message, pos, PostgreSQLQuery::UPDATE, true); + auto buffer = buffers.find(table_to_insert); + readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::UPDATE, true); if (pos + 1 < size) { Int8 new_tuple_data = readInt8(replication_message, pos); LOG_DEBUG(log, "new tuple data {}", new_tuple_data); - readTupleData(replication_message, pos, PostgreSQLQuery::UPDATE); + readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::UPDATE); } break; @@ -334,9 +357,9 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati LOG_DEBUG(log, "relationID {}, full replica identity {}", relation_id, full_replica_identity); - //LOG_DEBUG(log, "relationID {}, index replica identity {} full replica identity {}", - // relation_id, index_replica_identity, full_replica_identity); - readTupleData(replication_message, pos, PostgreSQLQuery::DELETE); + + auto buffer = buffers.find(table_to_insert); + readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::DELETE); break; } case 'T': // Truncate @@ -348,20 +371,43 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati } -void PostgreSQLReplicaConsumer::syncIntoTable(Block & block) +void PostgreSQLReplicaConsumer::syncTables( + std::shared_ptr tx, const std::unordered_set & tables_to_sync) { - Context insert_context(*context); - insert_context.makeQueryContext(); + for (const auto & table_name : tables_to_sync) + { + auto & buffer = buffers.find(table_name)->second; + Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns)); - auto insert = std::make_shared(); - insert->table_id = nested_storage->getStorageID(); + if (result_rows.rows()) + { + LOG_TRACE(log, "SYNCING TABLE {} max_block_size {}", result_rows.rows(), max_block_size); - InterpreterInsertQuery interpreter(insert, insert_context); - auto block_io = interpreter.execute(); - OneBlockInputStream input(block); + metadata.commitMetadata(final_lsn, [&]() + { + Context insert_context(*context); + insert_context.makeQueryContext(); - copyData(input, *block_io.out); - LOG_TRACE(log, "TABLE SYNC END"); + auto insert = std::make_shared(); + insert->table_id = storages[table_name]->getStorageID(); + + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); + + /// TODO: what if one block is not enough + OneBlockInputStream input(result_rows); + + copyData(input, *block_io.out); + + LOG_TRACE(log, "TABLE SYNC END"); + + auto actual_lsn = advanceLSN(tx); + buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); + + return actual_lsn; + }); + } + } } @@ -384,9 +430,9 @@ String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr tx; bool slot_empty = true; + std::unordered_set tables_to_sync; try { @@ -425,7 +471,7 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() current_lsn = (*row)[0]; LOG_TRACE(log, "Replication message: {}", (*row)[1]); - processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); + processReplicationMessage((*row)[1].c_str(), (*row)[1].size(), tables_to_sync); } } catch (const pqxx::sql_error & e) @@ -443,23 +489,16 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() return false; } - catch (...) + catch (const Exception & e) { + if (e.code() == ErrorCodes::UNKNOWN_TABLE) + throw; + tryLogCurrentException(__PRETTY_FUNCTION__); return false; } - Block result_rows = description.sample_block.cloneWithColumns(std::move(columns)); - if (result_rows.rows()) - { - LOG_TRACE(log, "SYNCING TABLE {} max_block_size {}", result_rows.rows(), max_block_size); - assert(!slot_empty); - metadata.commitMetadata(final_lsn, [&]() - { - syncIntoTable(result_rows); - return advanceLSN(tx); - }); - } + syncTables(tx, tables_to_sync); return true; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 41e636705b9..817c57a99fa 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB @@ -16,16 +17,17 @@ namespace DB class PostgreSQLReplicaConsumer { public: + using Storages = std::unordered_map; + PostgreSQLReplicaConsumer( std::shared_ptr context_, - const std::string & table_name_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, - StoragePtr nested_storage_); + Storages storages_); /// Start reading WAL from current_lsn position. Initial data sync from created snapshot already done. void startSynchronization(); @@ -44,19 +46,37 @@ private: DELETE }; - /// Start changes stream from WAL via copy command (up to max_block_size changes). bool readFromReplicationSlot(); - void processReplicationMessage(const char * replication_message, size_t size); - - void insertValue(std::string & value, size_t column_idx); - //static void insertValueMaterialized(IColumn & column, uint64_t value); - void insertDefaultValue(size_t column_idx); - - void syncIntoTable(Block & block); + void syncTables(std::shared_ptr tx, const std::unordered_set & tables_to_sync); String advanceLSN(std::shared_ptr ntx); + void processReplicationMessage( + const char * replication_message, size_t size, std::unordered_set & tables_to_sync); + + struct BufferData + { + ExternalResultDescription description; + MutableColumns columns; + /// Needed for insertPostgreSQLValue() method to parse array + std::unordered_map array_info; + + BufferData(const Block block) + { + description.init(block); + columns = description.sample_block.cloneEmptyColumns(); + for (const auto idx : ext::range(0, description.sample_block.columns())) + if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) + preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); + } + }; + + using Buffers = std::unordered_map; + + void insertDefaultValue(BufferData & buffer, size_t column_idx); + void insertValue(BufferData & buffer, const std::string & value, size_t column_idx); + void readTupleData(BufferData & buffer, const char * message, size_t & pos, PostgreSQLQuery type, bool old_value = false); + /// Methods to parse replication message data. - void readTupleData(const char * message, size_t & pos, PostgreSQLQuery type, bool old_value = false); void readString(const char * message, size_t & pos, size_t size, String & result); Int64 readInt64(const char * message, size_t & pos); Int32 readInt32(const char * message, size_t & pos); @@ -69,23 +89,17 @@ private: const std::string publication_name; PostgreSQLReplicaMetadata metadata; - const std::string table_name; PostgreSQLConnectionPtr connection; std::string current_lsn, final_lsn; + const size_t max_block_size; + std::string table_to_insert; + BackgroundSchedulePool::TaskHolder wal_reader_task; - //BackgroundSchedulePool::TaskHolder table_sync_task; std::atomic stop_synchronization = false; - const size_t max_block_size; - StoragePtr nested_storage; - Block sample_block; - ExternalResultDescription description; - MutableColumns columns; - /// Needed for insertPostgreSQLValue() method to parse array - std::unordered_map array_info; - - size_t data_version = 1; + Storages storages; + Buffers buffers; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp similarity index 76% rename from src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp rename to src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp index fa5ebb0edf3..aa1fec92ef4 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp @@ -1,4 +1,4 @@ -#include "PostgreSQLReplicationSettings.h" +#include "PostgreSQLReplicaSettings.h" #include #include #include @@ -13,9 +13,9 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -IMPLEMENT_SETTINGS_TRAITS(PostgreSQLReplicationSettingsTraits, LIST_OF_POSTGRESQL_REPLICATION_SETTINGS) +IMPLEMENT_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) -void PostgreSQLReplicationSettings::loadFromQuery(ASTStorage & storage_def) +void PostgreSQLReplicaSettings::loadFromQuery(ASTStorage & storage_def) { if (storage_def.settings) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h similarity index 63% rename from src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h rename to src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h index 8db4c3b3bb7..72b7f98ea6e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h @@ -7,14 +7,14 @@ namespace DB class ASTStorage; -#define LIST_OF_POSTGRESQL_REPLICATION_SETTINGS(M) \ +#define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ M(String, postgresql_replication_slot_name, "", "PostgreSQL replication slot name.", 0) \ M(String, postgresql_publication_name, "", "PostgreSQL publication name.", 0) \ M(UInt64, postgresql_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ -DECLARE_SETTINGS_TRAITS(PostgreSQLReplicationSettingsTraits, LIST_OF_POSTGRESQL_REPLICATION_SETTINGS) +DECLARE_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) -struct PostgreSQLReplicationSettings : public BaseSettings +struct PostgreSQLReplicaSettings : public BaseSettings { void loadFromQuery(ASTStorage & storage_def); }; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c7429e792ae..e9d7b1d9a69 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -20,7 +20,6 @@ static const auto reschedule_ms = 500; PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, - const std::string & table_name_, const std::string & conn_str, const std::string & metadata_path_, std::shared_ptr context_, @@ -30,7 +29,6 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) , context(context_) , database_name(database_name_) - , table_name(table_name_) , connection_str(conn_str) , metadata_path(metadata_path_) , publication_name(publication_name_) @@ -40,18 +38,21 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , replication_connection(std::make_shared(fmt::format("{} replication=database", connection->conn_str()))) { if (replication_slot.empty()) - replication_slot = fmt::format("{}_{}_ch_replication_slot", database_name, table_name); + replication_slot = fmt::format("{}_ch_replication_slot", database_name); startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); startup_task->deactivate(); } -void PostgreSQLReplicationHandler::startup(StoragePtr storage) +void PostgreSQLReplicationHandler::addStoragePtr(const std::string & table_name, StoragePtr storage) { - nested_storage = std::move(storage); + storages[table_name] = std::move(storage); +} +void PostgreSQLReplicationHandler::startup() +{ startup_task->activateAndSchedule(); } @@ -64,14 +65,15 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() } catch (const pqxx::broken_connection & pqxx_error) { - LOG_ERROR(log, "Unable to set up connection for table {}.{}. Reconnection attempt continues. Error message: {}", - database_name, table_name, pqxx_error.what()); + LOG_ERROR(log, + "Unable to set up connection. Reconnection attempt continue. Error message: {}", + pqxx_error.what()); startup_task->scheduleAfter(reschedule_ms); } catch (Exception & e) { - e.addMessage("while setting up connection for {}.{}", database_name, table_name); + e.addMessage("while setting up connection for PostgreSQLReplica engine"); throw; } @@ -103,16 +105,25 @@ bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) { + String table_names; + for (const auto & storage_data : storages) + { + if (!table_names.empty()) + table_names += ", "; + table_names += storage_data.first; + } + /// 'ONLY' means just a table, without descendants. - std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_name); + std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_names); try { tx->exec(query_str); LOG_TRACE(log, "Created publication {}", publication_name); } - catch (pqxx::undefined_table const &) + catch (Exception & e) { - throw Exception(fmt::format("PostgreSQL table {}.{} does not exist", database_name, table_name), ErrorCodes::UNKNOWN_TABLE); + e.addMessage("while creating pg_publication"); + throw; } /// TODO: check replica identity? @@ -130,7 +141,7 @@ void PostgreSQLReplicationHandler::startReplication() auto tx = std::make_shared(*replication_connection->conn()); if (publication_name.empty()) { - publication_name = fmt::format("{}_{}_ch_publication", database_name, table_name); + publication_name = fmt::format("{}_ch_publication", database_name); /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL /// table is dropped. @@ -172,14 +183,13 @@ void PostgreSQLReplicationHandler::startReplication() LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Creating replication consumer"); consumer = std::make_shared( context, - table_name, std::move(connection), replication_slot, publication_name, metadata_path, start_lsn, max_block_size, - nested_storage); + storages); LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Successfully created replication consumer"); @@ -194,39 +204,42 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) { LOG_DEBUG(log, "Creating transaction snapshot"); - try + for (const auto & [table_name, storage] : storages) { - auto stx = std::make_unique(*connection->conn()); + try + { + auto stx = std::make_unique(*connection->conn()); - /// Specific isolation level is required to read from snapshot. - stx->set_variable("transaction_isolation", "'repeatable read'"); + /// Specific isolation level is required to read from snapshot. + stx->set_variable("transaction_isolation", "'repeatable read'"); - std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); - stx->exec(query_str); + std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); + stx->exec(query_str); - /// Load from snapshot, which will show table state before creation of replication slot. - query_str = fmt::format("SELECT * FROM {}", table_name); + /// Load from snapshot, which will show table state before creation of replication slot. + query_str = fmt::format("SELECT * FROM {}", table_name); - Context insert_context(*context); - insert_context.makeQueryContext(); + Context insert_context(*context); + insert_context.makeQueryContext(); - auto insert = std::make_shared(); - insert->table_id = nested_storage->getStorageID(); + auto insert = std::make_shared(); + insert->table_id = storage->getStorageID(); - InterpreterInsertQuery interpreter(insert, insert_context); - auto block_io = interpreter.execute(); + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); - const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); - auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); + const StorageInMemoryMetadata & storage_metadata = storage->getInMemoryMetadata(); + auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); - PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); + PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); - copyData(input, *block_io.out); - } - catch (Exception & e) - { - e.addMessage("while initial data sync for table {}.{}", database_name, table_name); - throw; + copyData(input, *block_io.out); + } + catch (Exception & e) + { + e.addMessage("while initial data synchronization"); + throw; + } } LOG_DEBUG(log, "Done loading from snapshot"); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 29e7f9b3a43..f4118c4aed8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -14,7 +14,6 @@ public: friend class PGReplicaLSN; PostgreSQLReplicationHandler( const std::string & database_name_, - const std::string & table_name_, const std::string & conn_str_, const std::string & metadata_path_, std::shared_ptr context_, @@ -22,12 +21,14 @@ public: const std::string & replication_slot_name_, const size_t max_block_size_); - void startup(StoragePtr storage); + void startup(); + void addStoragePtr(const std::string & table_name, StoragePtr storage); void shutdown(); void shutdownFinal(); private: using NontransactionPtr = std::shared_ptr; + using Storages = std::unordered_map; bool isPublicationExist(std::shared_ptr tx); bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); @@ -44,16 +45,16 @@ private: Poco::Logger * log; std::shared_ptr context; - const std::string database_name, table_name, connection_str, metadata_path; - + const std::string database_name, connection_str, metadata_path; std::string publication_name, replication_slot; - std::string tmp_replication_slot; const size_t max_block_size; PostgreSQLConnectionPtr connection, replication_connection; - BackgroundSchedulePool::TaskHolder startup_task; std::shared_ptr consumer; - StoragePtr nested_storage; + + BackgroundSchedulePool::TaskHolder startup_task; + + Storages storages; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index dae51cac76f..27ab6c7adaf 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -1,5 +1,4 @@ #include "StoragePostgreSQLReplica.h" -#include "PostgreSQLReplicationSettings.h" #include #include @@ -35,13 +34,14 @@ static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, const String & remote_database_name, - const String & remote_table_name, + const String & remote_table_name_, const String & connection_str, const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - std::unique_ptr replication_settings_) + std::unique_ptr replication_settings_) : IStorage(table_id_) + , remote_table_name(remote_table_name_) , relative_data_path(relative_data_path_) , global_context(std::make_shared(context_.getGlobalContext())) , replication_settings(std::move(replication_settings_)) @@ -56,7 +56,6 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( replication_handler = std::make_unique( remote_database_name, - remote_table_name, connection_str, metadata_path, global_context, @@ -246,9 +245,12 @@ void StoragePostgreSQLReplica::startup() LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), "Directory already exists {}", relative_data_path); - replication_handler->startup( + replication_handler->addStoragePtr( + remote_table_name, DatabaseCatalog::instance().getTable( StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), *global_context)); + + replication_handler->startup(); } @@ -295,7 +297,7 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) { ASTs & engine_args = args.engine_args; bool has_settings = args.storage_def->settings; - auto postgresql_replication_settings = std::make_unique(); + auto postgresql_replication_settings = std::make_unique(); if (has_settings) postgresql_replication_settings->loadFromQuery(*args.storage_def); diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 652f948bf79..c1e4b319187 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -3,7 +3,7 @@ #include "config_core.h" #include "PostgreSQLReplicationHandler.h" -#include "PostgreSQLReplicationSettings.h" +#include "PostgreSQLReplicaSettings.h" #include #include @@ -54,7 +54,7 @@ protected: const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - std::unique_ptr replication_settings_); + std::unique_ptr replication_settings_); private: std::shared_ptr getMaterializedColumnsDeclaration( @@ -64,10 +64,10 @@ private: ASTPtr getCreateHelperTableQuery(); void dropNested(); - String relative_data_path; + std::string remote_table_name, relative_data_path; std::shared_ptr global_context; - std::unique_ptr replication_settings; + std::unique_ptr replication_settings; std::unique_ptr replication_handler; /// ReplacingMergeTree table From ed6d19b8a6f4cfa9334b2601c5af58944b65f766 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 8 Feb 2021 23:23:51 +0000 Subject: [PATCH 027/931] Add PostgreSQLReplica database engine --- src/Databases/DatabaseFactory.cpp | 52 +++- .../PostgreSQL/DatabasePostgreSQL.cpp | 20 +- src/Databases/PostgreSQL/DatabasePostgreSQL.h | 1 - .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 253 +++++++++++++++++ .../PostgreSQL/DatabasePostgreSQLReplica.h | 88 ++++++ .../fetchPostgreSQLTableStructure.cpp | 14 + .../fetchPostgreSQLTableStructure.h | 4 +- .../PostgreSQL/PostgreSQLConnection.h | 3 +- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 188 +++++++------ .../PostgreSQL/PostgreSQLReplicaConsumer.h | 47 ++-- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 1 + .../PostgreSQL/PostgreSQLReplicaMetadata.h | 2 + .../PostgreSQLReplicationHandler.cpp | 110 ++++++-- .../PostgreSQL/PostgreSQLReplicationHandler.h | 20 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 262 ++++++++++++------ .../PostgreSQL/StoragePostgreSQLReplica.h | 46 ++- .../__init__.py | 0 .../configs/log_conf.xml | 11 + .../test.py | 138 +++++++++ 19 files changed, 1003 insertions(+), 257 deletions(-) create mode 100644 src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp create mode 100644 src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h create mode 100644 tests/integration/test_postgresql_replica_database_engine/__init__.py create mode 100644 tests/integration/test_postgresql_replica_database_engine/configs/log_conf.xml create mode 100644 tests/integration/test_postgresql_replica_database_engine/test.py diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 5166e15b7b4..d4b7674f73b 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -34,7 +34,9 @@ #if USE_LIBPQXX #include // Y_IGNORE +#include #include +#include #endif namespace DB @@ -96,7 +98,9 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String const String & engine_name = engine_define->engine->name; const UUID & uuid = create.uuid; - if (engine_name != "MySQL" && engine_name != "MaterializeMySQL" && engine_name != "Lazy" && engine_name != "PostgreSQL" && engine_define->engine->arguments) + if (engine_name != "MySQL" && engine_name != "MaterializeMySQL" + && engine_name != "PostgreSQL" && engine_name != "PostgreSQLReplica" + && engine_name != "Lazy" && engine_define->engine->arguments) throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS); if (engine_define->engine->parameters || engine_define->partition_by || engine_define->primary_key || engine_define->order_by || @@ -219,6 +223,52 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared( context, metadata_path, engine_define, database_name, postgres_database_name, connection, use_table_cache); } + else if (engine_name == "PostgreSQLReplica") + { + const ASTFunction * engine = engine_define->engine; + + if (!engine->arguments || engine->arguments->children.size() != 4) + { + throw Exception( + fmt::format("{} Database require host:port, database_name, username, password arguments ", engine_name), + ErrorCodes::BAD_ARGUMENTS); + } + + ASTs & engine_args = engine->arguments->children; + + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); + + const auto & host_port = safeGetLiteralValue(engine_args[0], engine_name); + const auto & postgres_database_name = safeGetLiteralValue(engine_args[1], engine_name); + const auto & username = safeGetLiteralValue(engine_args[2], engine_name); + const auto & password = safeGetLiteralValue(engine_args[3], engine_name); + + auto parsed_host_port = parseAddress(host_port, 5432); + auto connection = std::make_shared( + postgres_database_name, parsed_host_port.first, parsed_host_port.second, username, password); + + auto postgresql_replica_settings = std::make_unique(); + + if (engine_define->settings) + postgresql_replica_settings->loadFromQuery(*engine_define); + + if (create.uuid == UUIDHelpers::Nil) + { + return std::make_shared>( + context, metadata_path, uuid, engine_define, + database_name, postgres_database_name, connection, + std::move(postgresql_replica_settings)); + } + else + { + return std::make_shared>( + context, metadata_path, uuid, engine_define, + database_name, postgres_database_name, connection, + std::move(postgresql_replica_settings)); + } + } + #endif diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 722b9c64edb..ebe5ba107bd 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -58,7 +58,7 @@ bool DatabasePostgreSQL::empty() const { std::lock_guard lock(mutex); - auto tables_list = fetchTablesList(); + auto tables_list = fetchPostgreSQLTablesList(connection->conn()); for (const auto & table_name : tables_list) if (!detached_or_dropped.count(table_name)) @@ -74,7 +74,7 @@ DatabaseTablesIteratorPtr DatabasePostgreSQL::getTablesIterator( std::lock_guard lock(mutex); Tables tables; - auto table_names = fetchTablesList(); + auto table_names = fetchPostgreSQLTablesList(connection->conn()); for (const auto & table_name : table_names) if (!detached_or_dropped.count(table_name)) @@ -84,20 +84,6 @@ DatabaseTablesIteratorPtr DatabasePostgreSQL::getTablesIterator( } -std::unordered_set DatabasePostgreSQL::fetchTablesList() const -{ - std::unordered_set tables; - std::string query = "SELECT tablename FROM pg_catalog.pg_tables " - "WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'"; - pqxx::read_transaction tx(*connection->conn()); - - for (auto table_name : tx.stream(query)) - tables.insert(std::get<0>(table_name)); - - return tables; -} - - bool DatabasePostgreSQL::checkPostgresTable(const String & table_name) const { if (table_name.find('\'') != std::string::npos @@ -299,7 +285,7 @@ void DatabasePostgreSQL::loadStoredObjects(Context & /* context */, bool, bool / void DatabasePostgreSQL::removeOutdatedTables() { std::lock_guard lock{mutex}; - auto actual_tables = fetchTablesList(); + auto actual_tables = fetchPostgreSQLTablesList(connection->conn()); if (cache_tables) { diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.h b/src/Databases/PostgreSQL/DatabasePostgreSQL.h index 56ea6645f15..79dbd993a0b 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.h @@ -80,7 +80,6 @@ private: BackgroundSchedulePool::TaskHolder cleaner_task; bool checkPostgresTable(const String & table_name) const; - std::unordered_set fetchTablesList() const; StoragePtr fetchTable(const String & table_name, const Context & context, const bool table_checked) const; void removeOutdatedTables(); ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp new file mode 100644 index 00000000000..d279d7e5c5c --- /dev/null +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -0,0 +1,253 @@ +#include + +#if USE_LIBPQXX + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NOT_IMPLEMENTED; + extern const int UNKNOWN_TABLE; + extern const int TABLE_IS_DROPPED; + extern const int TABLE_ALREADY_EXISTS; +} + + +static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; + +/// TODO: add detach, after which table structure is updated, need to update StoragePtr and recreate nested_storage. +/// Also pass new storagePtr to replication Handler. Stop replication stream mean while? + +template<> +DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( + const Context & context, + const String & metadata_path_, + UUID /* uuid */, + const ASTStorage * database_engine_define_, + const String & database_name_, + const String & postgres_database_name, + PostgreSQLConnectionPtr connection_, + std::unique_ptr settings_) + : DatabaseOrdinary( + database_name_, metadata_path_, "data/" + escapeForFileName(database_name_) + "/", + "DatabasePostgreSQLReplica (" + database_name_ + ")", context) + , global_context(context.getGlobalContext()) + , metadata_path(metadata_path_) + , database_engine_define(database_engine_define_->clone()) + , database_name(database_name_) + , remote_database_name(postgres_database_name) + , connection(std::move(connection_)) + , settings(std::move(settings_)) +{ +} + + +template<> +DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( + const Context & context, + const String & metadata_path_, + UUID uuid, + const ASTStorage * database_engine_define_, + const String & database_name_, + const String & postgres_database_name, + PostgreSQLConnectionPtr connection_, + std::unique_ptr settings_) + : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabasePostgreSQLReplica (" + database_name_ + ")", context) + , global_context(context.getGlobalContext()) + , metadata_path(metadata_path_) + , database_engine_define(database_engine_define_->clone()) + , remote_database_name(postgres_database_name) + , connection(std::move(connection_)) + , settings(std::move(settings_)) +{ +} + + +template +void DatabasePostgreSQLReplica::startSynchronization() +{ + auto publication_name = global_context.getMacros()->expand(settings->postgresql_publication_name.value); + auto replication_slot = global_context.getMacros()->expand(settings->postgresql_replication_slot_name.value); + + replication_handler = std::make_unique( + remote_database_name, + connection->conn_str(), + metadata_path + METADATA_SUFFIX, + std::make_shared(global_context), + replication_slot, + publication_name, + settings->postgresql_max_block_size.changed + ? settings->postgresql_max_block_size.value + : (global_context.getSettingsRef().max_insert_block_size.value)); + + std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->conn()); + + for (const auto & table_name : tables_to_replicate) + { + auto storage = getStorage(table_name); + + if (storage) + { + replication_handler->addStorage(table_name, storage.get()->template as()); + tables[table_name] = storage; + } + } + + LOG_TRACE(&Poco::Logger::get("PostgreSQLReplicaDatabaseEngine"), "Loaded {} tables. Starting synchronization", tables.size()); + replication_handler->startup(); +} + + +template +StoragePtr DatabasePostgreSQLReplica::getStorage(const String & name) +{ + auto storage = tryGetTable(name, global_context); + + if (storage) + return storage; + + auto use_nulls = global_context.getSettingsRef().external_table_functions_use_nulls; + auto columns = fetchPostgreSQLTableStructure(connection->conn(), name, use_nulls); + + if (!columns) + return StoragePtr{}; + + StorageInMemoryMetadata metadata; + metadata.setColumns(ColumnsDescription(*columns)); + + storage = StoragePostgreSQLReplica::create(StorageID(database_name, name), metadata_path, metadata, global_context); + + return storage; +} + + +template +void DatabasePostgreSQLReplica::shutdown() +{ + if (replication_handler) + replication_handler->shutdown(); +} + + +template +void DatabasePostgreSQLReplica::loadStoredObjects( + Context & context, bool has_force_restore_data_flag, bool force_attach) +{ + Base::loadStoredObjects(context, has_force_restore_data_flag, force_attach); + startSynchronization(); + +} + + +template +StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, const Context & context) const +{ + if (context.hasQueryContext()) + { + auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; + if (storage_set.find("ReplacingMergeTree") != storage_set.end()) + { + return Base::tryGetTable(name, context); + } + } + + auto table = tables.find(name); + if (table != tables.end()) + return table->second; + + return StoragePtr{}; + +} + + +/// TODO: assert called from sync thread +template +void DatabasePostgreSQLReplica::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) +{ + Base::createTable(context, name, table, query); +} + + +template +void DatabasePostgreSQLReplica::dropTable(const Context & context, const String & name, bool no_delay) +{ + Base::dropTable(context, name, no_delay); +} + + +template +void DatabasePostgreSQLReplica::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) +{ + Base::attachTable(name, table, relative_table_path); +} + + +template +StoragePtr DatabasePostgreSQLReplica::detachTable(const String & name) +{ + return Base::detachTable(name); +} + + +template +void DatabasePostgreSQLReplica::drop(const Context & context) +{ + if (replication_handler) + { + replication_handler->shutdown(); + replication_handler->shutdownFinal(); + } + + /// Remove metadata + Poco::File metadata(Base::getMetadataPath() + METADATA_SUFFIX); + + if (metadata.exists()) + metadata.remove(false); + + Base::drop(context); +} + + +template +DatabaseTablesIteratorPtr DatabasePostgreSQLReplica::getTablesIterator( + const Context & /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) +{ + Tables nested_tables; + for (const auto & [table_name, storage] : tables) + { + auto nested_storage = storage->as()->tryGetNested(); + + if (nested_storage) + nested_tables[table_name] = nested_storage; + } + + return std::make_unique(nested_tables, database_name); +} + +} + +#endif diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h new file mode 100644 index 00000000000..a73acd7b27b --- /dev/null +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h @@ -0,0 +1,88 @@ +#pragma once + +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX + +#include +#include + +#include +#include +#include +#include +#include + + +namespace DB +{ + +class Context; +class PostgreSQLConnection; +using PostgreSQLConnectionPtr = std::shared_ptr; + + +template +class DatabasePostgreSQLReplica : public Base +{ + +public: + DatabasePostgreSQLReplica( + const Context & context, + const String & metadata_path_, + UUID uuid, + const ASTStorage * database_engine_define, + const String & dbname_, + const String & postgres_dbname, + PostgreSQLConnectionPtr connection_, + std::unique_ptr settings_); + + String getEngineName() const override { return "PostgreSQLReplica"; } + String getMetadataPath() const override { return metadata_path; } + + void loadStoredObjects(Context &, bool, bool force_attach) override; + + DatabaseTablesIteratorPtr getTablesIterator( + const Context & context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) override; + + StoragePtr tryGetTable(const String & name, const Context & context) const override; + + void createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) override; + + void dropTable(const Context & context, const String & name, bool no_delay) override; + + void attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) override; + + StoragePtr detachTable(const String & name) override; + + void drop(const Context & context) override; + + void shutdown() override; + + +private: + void startSynchronization(); + StoragePtr getStorage(const String & name); + + const Context global_context; + String metadata_path; + ASTPtr database_engine_define; + String database_name, remote_database_name; + PostgreSQLConnectionPtr connection; + std::unique_ptr settings; + + std::shared_ptr replication_handler; + std::map tables; + + bool checkPostgresTable(const String & table_name) const; + std::unordered_set fetchTablesList() const; + StoragePtr fetchTable(const String & table_name, const Context & context, const bool table_checked) const; + void removeOutdatedTables(); + ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; +}; + +} + +#endif diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index ec23cfc8794..28f698b3da5 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -25,6 +25,20 @@ namespace ErrorCodes } +std::unordered_set fetchPostgreSQLTablesList(ConnectionPtr connection) +{ + std::unordered_set tables; + std::string query = "SELECT tablename FROM pg_catalog.pg_tables " + "WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'"; + pqxx::read_transaction tx(*connection); + + for (auto table_name : tx.stream(query)) + tables.insert(std::get<0>(table_name)); + + return tables; +} + + static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullable, uint16_t dimensions) { DataTypePtr res; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index bbbb379541b..a507514e92d 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -11,8 +11,10 @@ namespace DB { +std::unordered_set fetchPostgreSQLTablesList(ConnectionPtr connection); + std::shared_ptr fetchPostgreSQLTableStructure( - std::shared_ptr connection, const String & postgres_table_name, bool use_nulls); + ConnectionPtr connection, const String & postgres_table_name, bool use_nulls); } diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.h b/src/Storages/PostgreSQL/PostgreSQLConnection.h index ae79a3436e0..6ffbfe2d20a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLConnection.h +++ b/src/Storages/PostgreSQL/PostgreSQLConnection.h @@ -16,9 +16,10 @@ namespace DB /// Connection is not made until actually used. class PostgreSQLConnection { - using ConnectionPtr = std::shared_ptr; public: + using ConnectionPtr = std::shared_ptr; + PostgreSQLConnection(std::string dbname, std::string host, UInt16 port, std::string user, std::string password) : connection_str(formatConnectionString(std::move(dbname), std::move(host), port, std::move(user), std::move(password))) {} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 842c5b3a5d5..3435abc1fa9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -47,21 +47,28 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( buffers.emplace(table_name, BufferData(storage->getInMemoryMetadata().getSampleBlock())); } - wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ replicationStream(); }); + wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ synchronizationStream(); }); wal_reader_task->deactivate(); } void PostgreSQLReplicaConsumer::startSynchronization() { - metadata.readMetadata(); - - if (!metadata.lsn().empty()) + try { - auto tx = std::make_shared(*connection->conn()); - final_lsn = metadata.lsn(); - final_lsn = advanceLSN(tx); - tx->commit(); + metadata.readMetadata(); + + if (!metadata.lsn().empty()) + { + auto tx = std::make_shared(*connection->conn()); + final_lsn = metadata.lsn(); + final_lsn = advanceLSN(tx); + tx->commit(); + } + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); } wal_reader_task->activateAndSchedule(); @@ -75,11 +82,10 @@ void PostgreSQLReplicaConsumer::stopSynchronization() } -void PostgreSQLReplicaConsumer::replicationStream() +void PostgreSQLReplicaConsumer::synchronizationStream() { auto start_time = std::chrono::steady_clock::now(); - - LOG_TRACE(log, "Starting replication stream"); + LOG_TRACE(log, "Starting synchronization stream"); while (!stop_synchronization) { @@ -89,10 +95,7 @@ void PostgreSQLReplicaConsumer::replicationStream() auto end_time = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(end_time - start_time); if (duration.count() > max_thread_work_duration_ms) - { - LOG_TRACE(log, "Reschedule replication_stream. Thread work duration limit exceeded."); break; - } } if (!stop_synchronization) @@ -148,7 +151,7 @@ void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, s } -Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos) +Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 8); Int32 result = (UInt32(unhex2(message + pos)) << 24) @@ -160,7 +163,7 @@ Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos) } -Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos) +Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 4); Int16 result = (UInt32(unhex2(message + pos)) << 8) @@ -170,7 +173,7 @@ Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos) } -Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos) +Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 2); Int8 result = unhex2(message + pos); @@ -179,7 +182,7 @@ Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos) } -Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) +Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 16); Int64 result = (UInt64(unhex4(message + pos)) << 48) @@ -192,19 +195,19 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos) void PostgreSQLReplicaConsumer::readTupleData( - BufferData & buffer, const char * message, size_t & pos, PostgreSQLQuery type, bool old_value) + BufferData & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { - Int16 num_columns = readInt16(message, pos); + Int16 num_columns = readInt16(message, pos, size); /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data LOG_DEBUG(log, "num_columns {}", num_columns); for (int column_idx = 0; column_idx < num_columns; ++column_idx) { - char identifier = readInt8(message, pos); - Int32 col_len = readInt32(message, pos); + char identifier = readInt8(message, pos, size); + Int32 col_len = readInt32(message, pos, size); String value; for (int i = 0; i < col_len; ++i) { - value += readInt8(message, pos); + value += readInt8(message, pos, size); } insertValue(buffer, value, column_idx); @@ -242,32 +245,31 @@ void PostgreSQLReplicaConsumer::readTupleData( } } - -void PostgreSQLReplicaConsumer::processReplicationMessage( - const char * replication_message, size_t size, std::unordered_set & tables_to_sync) +/// test relation id can be shuffled ? +void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replication_message, size_t size) { /// Skip '\x' size_t pos = 2; - char type = readInt8(replication_message, pos); + char type = readInt8(replication_message, pos, size); LOG_TRACE(log, "TYPE: {}", type); switch (type) { case 'B': // Begin { - Int64 transaction_end_lsn = readInt64(replication_message, pos); - Int64 transaction_commit_timestamp = readInt64(replication_message, pos); + Int64 transaction_end_lsn = readInt64(replication_message, pos, size); + Int64 transaction_commit_timestamp = readInt64(replication_message, pos, size); LOG_DEBUG(log, "transaction lsn {}, transaction commit timespamp {}", transaction_end_lsn, transaction_commit_timestamp); break; } case 'C': // Commit { - readInt8(replication_message, pos); - Int64 commit_lsn = readInt64(replication_message, pos); - Int64 transaction_end_lsn = readInt64(replication_message, pos); + readInt8(replication_message, pos, size); + Int64 commit_lsn = readInt64(replication_message, pos, size); + Int64 transaction_end_lsn = readInt64(replication_message, pos, size); /// Since postgres epoch - Int64 transaction_commit_timestamp = readInt64(replication_message, pos); + Int64 transaction_commit_timestamp = readInt64(replication_message, pos, size); LOG_DEBUG(log, "commit lsn {}, transaction lsn {}, transaction commit timestamp {}", commit_lsn, transaction_end_lsn, transaction_commit_timestamp); final_lsn = current_lsn; @@ -277,12 +279,17 @@ void PostgreSQLReplicaConsumer::processReplicationMessage( break; case 'R': // Relation { - Int32 relation_id = readInt32(replication_message, pos); + Int32 relation_id = readInt32(replication_message, pos, size); String relation_namespace, relation_name; readString(replication_message, pos, size, relation_namespace); + readString(replication_message, pos, size, relation_name); - Int8 replica_identity = readInt8(replication_message, pos); - Int16 num_columns = readInt16(replication_message, pos); + table_to_insert = relation_name; + tables_to_sync.insert(table_to_insert); + LOG_DEBUG(log, "INSERTING TABLE {}", table_to_insert); + + Int8 replica_identity = readInt8(replication_message, pos, size); + Int16 num_columns = readInt16(replication_message, pos, size); LOG_DEBUG(log, "Replication message type 'R', relation_id: {}, namespace: {}, relation name {}, replica identity {}, columns number {}", @@ -293,14 +300,13 @@ void PostgreSQLReplicaConsumer::processReplicationMessage( for (uint16_t i = 0; i < num_columns; ++i) { String column_name; - key = readInt8(replication_message, pos); + key = readInt8(replication_message, pos, size); readString(replication_message, pos, size, column_name); - data_type_id = readInt32(replication_message, pos); - type_modifier = readInt32(replication_message, pos); + data_type_id = readInt32(replication_message, pos, size); + type_modifier = readInt32(replication_message, pos, size); LOG_DEBUG(log, "Key {}, column name {}, data type id {}, type modifier {}", key, column_name, data_type_id, type_modifier); } - table_to_insert = relation_name; if (storages.find(table_to_insert) == storages.end()) { throw Exception(ErrorCodes::UNKNOWN_TABLE, @@ -309,57 +315,55 @@ void PostgreSQLReplicaConsumer::processReplicationMessage( [[maybe_unused]] auto buffer_iter = buffers.find(table_to_insert); assert(buffer_iter != buffers.end()); - tables_to_sync.insert(relation_name); - break; } case 'Y': // Type break; case 'I': // Insert { - Int32 relation_id = readInt32(replication_message, pos); - Int8 new_tuple = readInt8(replication_message, pos); + Int32 relation_id = readInt32(replication_message, pos, size); + Int8 new_tuple = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "relationID {}, newTuple {}", relation_id, new_tuple); + LOG_DEBUG(log, "relationID {}, newTuple {} current insert tabel {}", relation_id, new_tuple, table_to_insert); auto buffer = buffers.find(table_to_insert); if (buffer == buffers.end()) { throw Exception(ErrorCodes::UNKNOWN_TABLE, "Buffer for table {} does not exist", table_to_insert); } - readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::INSERT); + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT); break; } case 'U': // Update { - Int32 relation_id = readInt32(replication_message, pos); - Int8 primary_key_or_old_tuple_data = readInt8(replication_message, pos); + Int32 relation_id = readInt32(replication_message, pos, size); + Int8 primary_key_or_old_tuple_data = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "relationID {}, key {}", relation_id, primary_key_or_old_tuple_data); + LOG_DEBUG(log, "relationID {}, key {} current insert table {}", relation_id, primary_key_or_old_tuple_data, table_to_insert); auto buffer = buffers.find(table_to_insert); - readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::UPDATE, true); + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); if (pos + 1 < size) { - Int8 new_tuple_data = readInt8(replication_message, pos); + Int8 new_tuple_data = readInt8(replication_message, pos, size); LOG_DEBUG(log, "new tuple data {}", new_tuple_data); - readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::UPDATE); + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE); } break; } case 'D': // Delete { - Int32 relation_id = readInt32(replication_message, pos); + Int32 relation_id = readInt32(replication_message, pos, size); //Int8 index_replica_identity = readInt8(replication_message, pos); - Int8 full_replica_identity = readInt8(replication_message, pos); + Int8 full_replica_identity = readInt8(replication_message, pos, size); LOG_DEBUG(log, "relationID {}, full replica identity {}", relation_id, full_replica_identity); auto buffer = buffers.find(table_to_insert); - readTupleData(buffer->second, replication_message, pos, PostgreSQLQuery::DELETE); + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::DELETE); break; } case 'T': // Truncate @@ -371,54 +375,64 @@ void PostgreSQLReplicaConsumer::processReplicationMessage( } -void PostgreSQLReplicaConsumer::syncTables( - std::shared_ptr tx, const std::unordered_set & tables_to_sync) +void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr tx) { + LOG_TRACE(log, "AVAILABLE TABLES {}", tables_to_sync.size()); for (const auto & table_name : tables_to_sync) { - auto & buffer = buffers.find(table_name)->second; - Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns)); - - if (result_rows.rows()) + try { - LOG_TRACE(log, "SYNCING TABLE {} max_block_size {}", result_rows.rows(), max_block_size); + LOG_TRACE(log, "ATTEMPT SYNCING TABLE {}", table_name); + auto & buffer = buffers.find(table_name)->second; + Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns)); - metadata.commitMetadata(final_lsn, [&]() + if (result_rows.rows()) { - Context insert_context(*context); - insert_context.makeQueryContext(); + LOG_TRACE(log, "SYNCING TABLE {} rows {} max_block_size {}", table_name, result_rows.rows(), max_block_size); - auto insert = std::make_shared(); - insert->table_id = storages[table_name]->getStorageID(); + metadata.commitMetadata(final_lsn, [&]() + { + Context insert_context(*context); + insert_context.makeQueryContext(); + insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - InterpreterInsertQuery interpreter(insert, insert_context); - auto block_io = interpreter.execute(); + auto insert = std::make_shared(); + insert->table_id = storages[table_name]->getStorageID(); - /// TODO: what if one block is not enough - OneBlockInputStream input(result_rows); + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); - copyData(input, *block_io.out); + /// TODO: what if one block is not enough + OneBlockInputStream input(result_rows); - LOG_TRACE(log, "TABLE SYNC END"); + copyData(input, *block_io.out); - auto actual_lsn = advanceLSN(tx); - buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); + LOG_TRACE(log, "TABLE SYNC END"); - return actual_lsn; - }); + auto actual_lsn = advanceLSN(tx); + buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); + + return actual_lsn; + }); + } + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); } } + + tables_to_sync.clear(); + tx->commit(); } -String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr ntx) +String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr tx) { LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn); std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn); - pqxx::result result{ntx->exec(query_str)}; - - ntx->commit(); + pqxx::result result{tx->exec(query_str)}; if (!result.empty()) return result[0][0].as(); @@ -432,7 +446,6 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { std::shared_ptr tx; bool slot_empty = true; - std::unordered_set tables_to_sync; try { @@ -471,20 +484,18 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() current_lsn = (*row)[0]; LOG_TRACE(log, "Replication message: {}", (*row)[1]); - processReplicationMessage((*row)[1].c_str(), (*row)[1].size(), tables_to_sync); + processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } } catch (const pqxx::sql_error & e) { - /// Currently `sql replication interface` is used and it has the problem that it registers relcache + /// sql replication interface has the problem that it registers relcache /// callbacks on each pg_logical_slot_get_changes and there is no way to invalidate them: /// https://github.com/postgres/postgres/blob/master/src/backend/replication/pgoutput/pgoutput.c#L1128 /// So at some point will get out of limit and then they will be cleaned. std::string error_message = e.what(); - if (error_message.find("out of relcache_callback_list slots") != std::string::npos) - LOG_DEBUG(log, "Out of rel_cache_list slot"); - else + if (error_message.find("out of relcache_callback_list slots") == std::string::npos) tryLogCurrentException(__PRETTY_FUNCTION__); return false; @@ -498,8 +509,7 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() return false; } - syncTables(tx, tables_to_sync); - + syncTables(tx); return true; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 817c57a99fa..0973ba7f785 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -29,29 +29,20 @@ public: const size_t max_block_size_, Storages storages_); - /// Start reading WAL from current_lsn position. Initial data sync from created snapshot already done. void startSynchronization(); + void stopSynchronization(); private: - /// Executed by wal_reader_task. A separate thread reads wal and advances lsn to last commited position - /// after rows were written via copyData. - void replicationStream(); - void stopReplicationStream(); - - enum class PostgreSQLQuery - { - INSERT, - UPDATE, - DELETE - }; + void synchronizationStream(); bool readFromReplicationSlot(); - void syncTables(std::shared_ptr tx, const std::unordered_set & tables_to_sync); + + void syncTables(std::shared_ptr tx); + String advanceLSN(std::shared_ptr ntx); - void processReplicationMessage( - const char * replication_message, size_t size, std::unordered_set & tables_to_sync); + void processReplicationMessage(const char * replication_message, size_t size); struct BufferData { @@ -74,26 +65,34 @@ private: void insertDefaultValue(BufferData & buffer, size_t column_idx); void insertValue(BufferData & buffer, const std::string & value, size_t column_idx); - void readTupleData(BufferData & buffer, const char * message, size_t & pos, PostgreSQLQuery type, bool old_value = false); - /// Methods to parse replication message data. + enum class PostgreSQLQuery + { + INSERT, + UPDATE, + DELETE + }; + + void readTupleData(BufferData & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false); + void readString(const char * message, size_t & pos, size_t size, String & result); - Int64 readInt64(const char * message, size_t & pos); - Int32 readInt32(const char * message, size_t & pos); - Int16 readInt16(const char * message, size_t & pos); - Int8 readInt8(const char * message, size_t & pos); + Int64 readInt64(const char * message, size_t & pos, size_t size); + Int32 readInt32(const char * message, size_t & pos, size_t size); + Int16 readInt16(const char * message, size_t & pos, size_t size); + Int8 readInt8(const char * message, size_t & pos, size_t size); Poco::Logger * log; std::shared_ptr context; - const std::string replication_slot_name; - const std::string publication_name; - PostgreSQLReplicaMetadata metadata; + const std::string replication_slot_name, publication_name; + PostgreSQLReplicaMetadata metadata; PostgreSQLConnectionPtr connection; std::string current_lsn, final_lsn; const size_t max_block_size; + std::string table_to_insert; + std::unordered_set tables_to_sync; BackgroundSchedulePool::TaskHolder wal_reader_task; std::atomic stop_synchronization = false; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index a5ae25c3f53..9cd5f368a6d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -27,6 +27,7 @@ PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadat void PostgreSQLReplicaMetadata::readMetadata() { + LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaMetadata"), "kssenii 1 {}", metadata_file); if (Poco::File(metadata_file).exists()) { ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h index 31044dc3490..ca7a258e24c 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -11,9 +11,11 @@ public: PostgreSQLReplicaMetadata(const std::string & metadata_file_path); void commitMetadata(std::string & lsn, const std::function & syncTableFunc); + void readMetadata(); size_t version() { return last_version++; } + std::string lsn() { return last_lsn; } private: diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index e9d7b1d9a69..c803f0aa411 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -1,7 +1,11 @@ #include "PostgreSQLReplicationHandler.h" -#include #include +#include +#include + +#include +#include #include #include @@ -18,6 +22,8 @@ namespace ErrorCodes static const auto reschedule_ms = 500; +/// TODO: context should be const + PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & conn_str, @@ -45,9 +51,9 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } -void PostgreSQLReplicationHandler::addStoragePtr(const std::string & table_name, StoragePtr storage) +void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, const StoragePostgreSQLReplica * storage) { - storages[table_name] = std::move(storage); + storages[table_name] = storage; } @@ -61,23 +67,22 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { try { + /// Will throw pqxx::broken_connection if no connection at the moment connection->conn(); + + startSynchronization(); } catch (const pqxx::broken_connection & pqxx_error) { - LOG_ERROR(log, - "Unable to set up connection. Reconnection attempt continue. Error message: {}", + LOG_ERROR(log, "Unable to set up connection. Reconnection attempt continue. Error message: {}", pqxx_error.what()); startup_task->scheduleAfter(reschedule_ms); } - catch (Exception & e) + catch (...) { - e.addMessage("while setting up connection for PostgreSQLReplica engine"); - throw; + tryLogCurrentException(__PRETTY_FUNCTION__); } - - startReplication(); } @@ -118,7 +123,7 @@ void PostgreSQLReplicationHandler::createPublication(std::shared_ptr try { tx->exec(query_str); - LOG_TRACE(log, "Created publication {}", publication_name); + LOG_TRACE(log, "Created publication {} with tables list: {}", publication_name, table_names); } catch (Exception & e) { @@ -131,10 +136,8 @@ void PostgreSQLReplicationHandler::createPublication(std::shared_ptr } -void PostgreSQLReplicationHandler::startReplication() +void PostgreSQLReplicationHandler::startSynchronization() { - LOG_DEBUG(log, "PostgreSQLReplica starting replication proccess"); - /// used commands require a specific transaction isolation mode. replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); @@ -177,10 +180,17 @@ void PostgreSQLReplicationHandler::startReplication() dropReplicationSlot(ntx, replication_slot); initial_sync(); } + else + { + for (const auto & [table_name, storage] : storages) + { + nested_storages[table_name] = storage->getNested(); + storage->setNestedLoaded(); + } + } ntx->commit(); - LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Creating replication consumer"); consumer = std::make_shared( context, std::move(connection), @@ -189,13 +199,10 @@ void PostgreSQLReplicationHandler::startReplication() metadata_path, start_lsn, max_block_size, - storages); - - LOG_DEBUG(&Poco::Logger::get("StoragePostgreSQLMetadata"), "Successfully created replication consumer"); + nested_storages); consumer->startSynchronization(); - /// Takes time to close replication_connection->conn()->close(); } @@ -206,6 +213,12 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) for (const auto & [table_name, storage] : storages) { + storage->createNestedIfNeeded(); + auto nested_storage = storage->tryGetNested(); + + if (!nested_storage) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to create nested storage"); + try { auto stx = std::make_unique(*connection->conn()); @@ -221,19 +234,23 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) Context insert_context(*context); insert_context.makeQueryContext(); + insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); auto insert = std::make_shared(); - insert->table_id = storage->getStorageID(); + insert->table_id = nested_storage->getStorageID(); InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); - const StorageInMemoryMetadata & storage_metadata = storage->getInMemoryMetadata(); + const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); copyData(input, *block_io.out); + + storage->setNestedLoaded(); + nested_storages[table_name] = nested_storage; } catch (Exception & e) { @@ -271,7 +288,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx, pqxx::result result{ntx->exec(query_str)}; start_lsn = result[0][1].as(); snapshot_name = result[0][2].as(); - LOG_TRACE(log, "Created temporary replication slot: {}, start lsn: {}, snapshot: {}", + LOG_TRACE(log, "Created replication slot: {}, start lsn: {}, snapshot: {}", replication_slot, start_lsn, snapshot_name); } catch (Exception & e) @@ -286,7 +303,7 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, st { std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); ntx->exec(query_str); - LOG_TRACE(log, "Replication slot {} is dropped", slot_name); + LOG_TRACE(log, "Dropped replication slot {}", slot_name); } @@ -306,13 +323,50 @@ void PostgreSQLReplicationHandler::shutdownFinal() Poco::File(metadata_path).remove(); connection = std::make_shared(connection_str); - auto ntx = std::make_shared(*connection->conn()); + auto tx = std::make_shared(*connection->conn()); - dropPublication(ntx); - if (isReplicationSlotExist(ntx, replication_slot)) - dropReplicationSlot(ntx, replication_slot); + dropPublication(tx); + if (isReplicationSlotExist(tx, replication_slot)) + dropReplicationSlot(tx, replication_slot); - ntx->commit(); + tx->commit(); +} + + +/// TODO: publication can be created with option `whole_database`. Check this case. +std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_) +{ + auto publication_exist = [&]() + { + auto tx = std::make_shared(*connection_); + bool exist = isPublicationExist(tx); + tx->commit(); + return exist; + }; + + if (publication_name.empty() || !publication_exist()) + { + /// Replicate the whole database and create our own pg_publication + return fetchPostgreSQLTablesList(connection_); + } + else + { + /// Replicate only tables, which are included in a pg_publication + return fetchTablesFromPublication(connection_); + } +} + + +std::unordered_set PostgreSQLReplicationHandler::fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_) +{ + std::string query = fmt::format("SELECT tablename FROM pg_publication_tables WHERE pubname = '{}'", publication_name); + std::unordered_set tables; + pqxx::read_transaction tx(*connection_); + + for (auto table_name : tx.stream(query)) + tables.insert(std::get<0>(table_name)); + + return tables; } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index f4118c4aed8..ba6014aed1f 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -8,6 +8,8 @@ namespace DB { +class StoragePostgreSQLReplica; + class PostgreSQLReplicationHandler { public: @@ -22,13 +24,17 @@ public: const size_t max_block_size_); void startup(); - void addStoragePtr(const std::string & table_name, StoragePtr storage); + void shutdown(); + void shutdownFinal(); + void addStorage(const std::string & table_name, const StoragePostgreSQLReplica * storage); + + std::unordered_set fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); + private: using NontransactionPtr = std::shared_ptr; - using Storages = std::unordered_map; bool isPublicationExist(std::shared_ptr tx); bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); @@ -40,9 +46,13 @@ private: void dropPublication(NontransactionPtr ntx); void waitConnectionAndStart(); - void startReplication(); + + void startSynchronization(); + void loadFromSnapshot(std::string & snapshot_name); + std::unordered_set fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_); + Poco::Logger * log; std::shared_ptr context; const std::string database_name, connection_str, metadata_path; @@ -53,8 +63,10 @@ private: std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task; + std::atomic tables_loaded = false; - Storages storages; + std::unordered_map storages; + std::unordered_map nested_storages; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 27ab6c7adaf..490da9ce322 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace DB @@ -26,6 +27,7 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int LOGICAL_ERROR; } static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; @@ -36,20 +38,18 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( const String & remote_database_name, const String & remote_table_name_, const String & connection_str, - const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, std::unique_ptr replication_settings_) : IStorage(table_id_) , remote_table_name(remote_table_name_) - , relative_data_path(relative_data_path_) , global_context(std::make_shared(context_.getGlobalContext())) , replication_settings(std::move(replication_settings_)) { setInMemoryMetadata(storage_metadata); - if (relative_data_path.ends_with("/")) - relative_data_path.resize(relative_data_path.size() - 1); - relative_data_path += NESTED_STORAGE_SUFFIX; + + is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( + getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name; @@ -69,8 +69,47 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( } +StoragePostgreSQLReplica::StoragePostgreSQLReplica( + const StorageID & table_id_, + const String & /* metadata_path_ */, + const StorageInMemoryMetadata & storage_metadata, + const Context & context_) + : IStorage(table_id_) + , global_context(std::make_shared(context_)) +{ + setInMemoryMetadata(storage_metadata); + is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( + getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; +} + + +StoragePostgreSQLReplica::StoragePostgreSQLReplica( + const StorageID & table_id_, + StoragePtr nested_storage_, + const Context & context_) + : IStorage(table_id_) + , global_context(std::make_shared(context_)) + , nested_storage(nested_storage_) +{ + is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( + getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; + +} + + +std::string StoragePostgreSQLReplica::getNestedTableName() const +{ + auto table_name = getStorageID().table_name; + + if (!is_postgresql_replica_database) + table_name += NESTED_STORAGE_SUFFIX; + + return table_name; +} + + std::shared_ptr StoragePostgreSQLReplica::getMaterializedColumnsDeclaration( - const String name, const String type, UInt64 default_value) + const String name, const String type, UInt64 default_value) const { auto column_declaration = std::make_shared(); @@ -87,7 +126,7 @@ std::shared_ptr StoragePostgreSQLReplica::getMaterializedC } -ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_type) +ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_type) const { WhichDataType which(data_type); @@ -101,12 +140,13 @@ ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_t } -std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage() +std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage() const { auto columns_declare_list = std::make_shared(); auto columns_expression_list = std::make_shared(); auto metadata_snapshot = getInMemoryMetadataPtr(); + for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) { const auto & column_declaration = std::make_shared(); @@ -114,6 +154,7 @@ std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage( column_declaration->type = getColumnDeclaration(column_type_and_name.type); columns_expression_list->children.emplace_back(column_declaration); } + columns_declare_list->set(columns_declare_list->columns, columns_expression_list); columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_sign", "Int8", UInt64(1))); @@ -123,14 +164,14 @@ std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage( } -ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() +ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery() const { auto create_table_query = std::make_shared(); auto table_id = getStorageID(); - create_table_query->table = table_id.table_name + NESTED_STORAGE_SUFFIX; + create_table_query->table = getNestedTableName(); create_table_query->database = table_id.database_name; - create_table_query->if_not_exists = true; + //create_table_query->if_not_exists = true; create_table_query->set(create_table_query->columns_list, getColumnsListFromStorage()); @@ -138,8 +179,10 @@ ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared("_version"))); auto primary_key_ast = getInMemoryMetadataPtr()->getPrimaryKeyAST(); - if (primary_key_ast) - storage->set(storage->order_by, primary_key_ast); + if (!primary_key_ast) + primary_key_ast = std::make_shared("key"); + + storage->set(storage->order_by, primary_key_ast); create_table_query->set(create_table_query->storage, storage); @@ -147,6 +190,120 @@ ASTPtr StoragePostgreSQLReplica::getCreateHelperTableQuery() } +void StoragePostgreSQLReplica::createNestedIfNeeded() const +{ + nested_storage = tryGetNested(); + + if (nested_storage) + return; + + Context context_copy(*global_context); + const auto ast_create = getCreateNestedTableQuery(); + + InterpreterCreateQuery interpreter(ast_create, context_copy); + try + { + interpreter.execute(); + } + catch (...) + { + throw; + } + + nested_storage = getNested(); +} + + +Context StoragePostgreSQLReplica::makeGetNestedTableContext() const +{ + auto get_context(*global_context); + get_context.makeQueryContext(); + get_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + + return get_context; +} + + +StoragePtr StoragePostgreSQLReplica::getNested() const +{ + if (nested_storage) + return nested_storage; + + auto context = makeGetNestedTableContext(); + nested_storage = DatabaseCatalog::instance().getTable( + StorageID(getStorageID().database_name, getNestedTableName()), context); + + return nested_storage; +} + + +StoragePtr StoragePostgreSQLReplica::tryGetNested() const +{ + if (nested_storage) + return nested_storage; + + auto context = makeGetNestedTableContext(); + nested_storage = DatabaseCatalog::instance().tryGetTable( + StorageID(getStorageID().database_name, getNestedTableName()), context); + + return nested_storage; +} + + +void StoragePostgreSQLReplica::startup() +{ + if (!is_postgresql_replica_database) + { + replication_handler->addStorage(remote_table_name, this); + replication_handler->startup(); + } +} + + +void StoragePostgreSQLReplica::shutdown() +{ + if (replication_handler) + replication_handler->shutdown(); +} + + +void StoragePostgreSQLReplica::shutdownFinal() +{ + if (is_postgresql_replica_database) + return; + + if (replication_handler) + replication_handler->shutdownFinal(); + + if (nested_storage) + dropNested(); +} + + +void StoragePostgreSQLReplica::dropNested() +{ + auto table_id = nested_storage->getStorageID(); + auto ast_drop = std::make_shared(); + + ast_drop->kind = ASTDropQuery::Drop; + ast_drop->table = table_id.table_name; + ast_drop->database = table_id.database_name; + ast_drop->if_exists = true; + + auto drop_context(*global_context); + drop_context.makeQueryContext(); + + auto interpreter = InterpreterDropQuery(ast_drop, drop_context); + interpreter.execute(); +} + + +NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const +{ + return NamesAndTypesList{}; +} + + Pipe StoragePostgreSQLReplica::read( const Names & column_names, const StorageMetadataPtr & /* metadata_snapshot */, @@ -156,14 +313,17 @@ Pipe StoragePostgreSQLReplica::read( size_t max_block_size, unsigned num_streams) { - if (!nested_storage) + /// If initial table sync has not yet finished, nested tables might not be created yet. + if (!nested_loaded) { - auto table_id = getStorageID(); - nested_storage = DatabaseCatalog::instance().getTable( - StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), - *global_context); + LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Table {} is not loaded yet", getNestedTableName()); + return Pipe(); } + /// Should throw if there is no nested storage + if (!nested_storage) + getNested(); + auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); @@ -227,70 +387,6 @@ Pipe StoragePostgreSQLReplica::read( } -void StoragePostgreSQLReplica::startup() -{ - Context context_copy(*global_context); - const auto ast_create = getCreateHelperTableQuery(); - auto table_id = getStorageID(); - - Poco::File path(relative_data_path); - if (!path.exists()) - { - LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), - "Creating helper table {}", table_id.table_name + NESTED_STORAGE_SUFFIX); - InterpreterCreateQuery interpreter(ast_create, context_copy); - interpreter.execute(); - } - else - LOG_TRACE(&Poco::Logger::get("StoragePostgreSQLReplica"), - "Directory already exists {}", relative_data_path); - - replication_handler->addStoragePtr( - remote_table_name, - DatabaseCatalog::instance().getTable( - StorageID(table_id.database_name, table_id.table_name + NESTED_STORAGE_SUFFIX), *global_context)); - - replication_handler->startup(); -} - - -void StoragePostgreSQLReplica::shutdown() -{ - replication_handler->shutdown(); -} - - -void StoragePostgreSQLReplica::shutdownFinal() -{ - replication_handler->shutdownFinal(); - dropNested(); -} - - -void StoragePostgreSQLReplica::dropNested() -{ - auto table_id = nested_storage->getStorageID(); - auto ast_drop = std::make_shared(); - - ast_drop->kind = ASTDropQuery::Drop; - ast_drop->table = table_id.table_name; - ast_drop->database = table_id.database_name; - ast_drop->if_exists = true; - - auto drop_context(*global_context); - drop_context.makeQueryContext(); - - auto interpreter = InterpreterDropQuery(ast_drop, drop_context); - interpreter.execute(); -} - - -NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const -{ - return NamesAndTypesList{}; -} - - void registerStoragePostgreSQLReplica(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) @@ -339,7 +435,7 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) return StoragePostgreSQLReplica::create( args.table_id, remote_database, remote_table, connection.conn_str(), - args.relative_data_path, metadata, args.context, + metadata, args.context, std::move(postgresql_replication_settings)); }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index c1e4b319187..277d6fc9313 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -26,6 +26,17 @@ class StoragePostgreSQLReplica final : public ext::shared_ptr_helper; public: + StoragePostgreSQLReplica( + const StorageID & table_id_, + const String & metadata_path_, + const StorageInMemoryMetadata & storage_metadata, + const Context & context_); + + StoragePostgreSQLReplica( + const StorageID & table_id_, + StoragePtr nested_storage_, + const Context & context_); + String getName() const override { return "PostgreSQLReplica"; } void startup() override; @@ -45,33 +56,52 @@ public: /// Called right after shutdown() in case of drop query void shutdownFinal(); + void createNestedIfNeeded() const; + + /// Can be nullptr + StoragePtr tryGetNested() const; + + /// Throw if impossible to get + StoragePtr getNested() const; + + void setNestedLoaded() const { nested_loaded.store(true); } + protected: StoragePostgreSQLReplica( const StorageID & table_id_, const String & remote_database_name, const String & remote_table_name, const String & connection_str, - const String & relative_data_path_, const StorageInMemoryMetadata & storage_metadata, const Context & context_, std::unique_ptr replication_settings_); private: std::shared_ptr getMaterializedColumnsDeclaration( - const String name, const String type, UInt64 default_value); - std::shared_ptr getColumnsListFromStorage(); - ASTPtr getColumnDeclaration(const DataTypePtr & data_type); - ASTPtr getCreateHelperTableQuery(); + const String name, const String type, UInt64 default_value) const; + + std::shared_ptr getColumnsListFromStorage() const; + + ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; + + ASTPtr getCreateNestedTableQuery() const; + + std::string getNestedTableName() const; + + Context makeGetNestedTableContext() const; + void dropNested(); - std::string remote_table_name, relative_data_path; + std::string remote_table_name; std::shared_ptr global_context; std::unique_ptr replication_settings; std::unique_ptr replication_handler; - /// ReplacingMergeTree table - StoragePtr nested_storage; + bool is_postgresql_replica_database = false; + + mutable std::atomic nested_loaded = false; + mutable StoragePtr nested_storage; }; } diff --git a/tests/integration/test_postgresql_replica_database_engine/__init__.py b/tests/integration/test_postgresql_replica_database_engine/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_postgresql_replica_database_engine/configs/log_conf.xml b/tests/integration/test_postgresql_replica_database_engine/configs/log_conf.xml new file mode 100644 index 00000000000..f9d15e572aa --- /dev/null +++ b/tests/integration/test_postgresql_replica_database_engine/configs/log_conf.xml @@ -0,0 +1,11 @@ + + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py new file mode 100644 index 00000000000..7d5c36a83c9 --- /dev/null +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -0,0 +1,138 @@ +import pytest +import time +import psycopg2 +import os.path as p + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True) + +postgres_table_template = """ + CREATE TABLE IF NOT EXISTS {} ( + key Integer NOT NULL, value Integer) + """ + +def get_postgres_conn(database=False): + if database == True: + conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" + else: + conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn = psycopg2.connect(conn_string) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn.autocommit = True + return conn + + +def create_postgres_db(cursor, name): + cursor.execute("CREATE DATABASE {}".format(name)) + + +def create_postgres_table(cursor, table_name): + cursor.execute(postgres_table_template.format(table_name)) + cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) + + +def check_tables_are_synchronized(table_name): + expected = instance.query('select * from postgres_database.{} order by key;'.format(table_name)) + result = instance.query('select * from test_database.{} order by key;'.format(table_name)) + + while result != expected: + time.sleep(0.5) + result = instance.query('select * from test_database.{} order by key;'.format(table_name)) + + assert(result == expected) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + conn = get_postgres_conn() + cursor = conn.cursor() + create_postgres_db(cursor, 'postgres_database') + instance.query("DROP DATABASE IF EXISTS test_database") + instance.query(''' + CREATE DATABASE postgres_database + ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def rabbitmq_setup_teardown(): + yield # run test + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') + + +def test_load_and_sync_all_database(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i)) + + instance.query("CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + assert 'test_database' in instance.query('SHOW DATABASES') + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') + assert(int(result) == NUM_TABLES) + + instance.query("DROP DATABASE test_database") + assert 'test_database' not in instance.query('SHOW DATABASES') + + +def test_replicating_dml(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) + + instance.query( + "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + for i in range(NUM_TABLES): + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50, 50)".format(i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;'.format(i, i, i)) + cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} * {} WHERE key >= 50;'.format(i, i, i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + cursor.execute('DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;'.format(i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + instance.query("DROP DATABASE test_database") + assert 'test_database' not in instance.query('SHOW DATABASES') + + +if __name__ == '__main__': + cluster.start() + input("Cluster created, press any key to destroy...") + cluster.shutdown() From 5bc0010f94178bea02d7b7eece124b3894aaeafc Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 11 Feb 2021 21:59:58 +0000 Subject: [PATCH 028/931] Better --- .../PostgreSQLBlockInputStream.cpp | 48 +++++-- src/DataStreams/PostgreSQLBlockInputStream.h | 13 +- .../PostgreSQL/DatabasePostgreSQL.cpp | 2 +- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 15 +- .../fetchPostgreSQLTableStructure.cpp | 136 +++++++++++++----- .../fetchPostgreSQLTableStructure.h | 18 ++- .../PostgreSQLDictionarySource.cpp | 22 +-- .../PostgreSQLReplicationHandler.cpp | 40 +++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 7 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 131 ++++++++++------- .../PostgreSQL/StoragePostgreSQLReplica.h | 25 ++-- src/Storages/StoragePostgreSQL.cpp | 8 +- .../TableFunctionPostgreSQL.cpp | 7 +- .../test.py | 6 +- 14 files changed, 300 insertions(+), 178 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index 5b43a21c6fc..9e3c6b1bb89 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -22,32 +22,55 @@ namespace DB { -PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( - std::unique_ptr tx_, + +template<> +PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( + std::shared_ptr tx_, const std::string & query_str_, const Block & sample_block, - const UInt64 max_block_size_) + const UInt64 max_block_size_, + bool auto_commit_) : query_str(query_str_) , max_block_size(max_block_size_) - , tx(std::move(tx_)) + , auto_commit(auto_commit_) + , tx(tx_) { description.init(sample_block); +} + + +template<> +PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( + std::shared_ptr tx_, + const std::string & query_str_, + const Block & sample_block, + const UInt64 max_block_size_, + bool auto_commit_) + : query_str(query_str_) + , max_block_size(max_block_size_) + , auto_commit(auto_commit_) + , tx(tx_) +{ + description.init(sample_block); +} + + +template +void PostgreSQLBlockInputStream::readPrefix() +{ for (const auto idx : ext::range(0, description.sample_block.columns())) if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); /// pqxx::stream_from uses COPY command, will get error if ';' is present if (query_str.ends_with(';')) query_str.resize(query_str.size() - 1); -} - -void PostgreSQLBlockInputStream::readPrefix() -{ stream = std::make_unique(*tx, pqxx::from_query, std::string_view(query_str)); } -Block PostgreSQLBlockInputStream::readImpl() +template +Block PostgreSQLBlockInputStream::readImpl() { /// Check if pqxx::stream_from is finished if (!stream || !(*stream)) @@ -103,12 +126,15 @@ Block PostgreSQLBlockInputStream::readImpl() } -void PostgreSQLBlockInputStream::readSuffix() +template +void PostgreSQLBlockInputStream::readSuffix() { if (stream) { stream->complete(); - tx->commit(); + + if (auto_commit) + tx->commit(); } } diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index f51526b2eb3..a558a46d153 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -11,19 +11,22 @@ #include #include #include +#include namespace DB { +template class PostgreSQLBlockInputStream : public IBlockInputStream { public: PostgreSQLBlockInputStream( - std::unique_ptr tx_, - const std::string & query_str, + std::shared_ptr tx_, + const std::string & query_str_, const Block & sample_block, - const UInt64 max_block_size_); + const UInt64 max_block_size_, + bool auto_commit_ = true); String getName() const override { return "PostgreSQL"; } Block getHeader() const override { return description.sample_block.cloneEmpty(); } @@ -35,9 +38,11 @@ private: String query_str; const UInt64 max_block_size; + const bool auto_commit; ExternalResultDescription description; - std::unique_ptr tx; + PostgreSQLConnection::ConnectionPtr connection; + std::shared_ptr tx; std::unique_ptr stream; std::unordered_map array_info; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index ebe5ba107bd..c226540cedc 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -148,7 +148,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, const Conte return StoragePtr{}; auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; - auto columns = fetchPostgreSQLTableStructure(connection->conn(), table_name, use_nulls); + auto columns = fetchPostgreSQLTableStructure(connection->conn(), table_name, use_nulls).columns; if (!columns) return StoragePtr{}; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index d279d7e5c5c..a9b07a0c65b 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -130,18 +130,7 @@ StoragePtr DatabasePostgreSQLReplica::getStorage(const String & name) if (storage) return storage; - auto use_nulls = global_context.getSettingsRef().external_table_functions_use_nulls; - auto columns = fetchPostgreSQLTableStructure(connection->conn(), name, use_nulls); - - if (!columns) - return StoragePtr{}; - - StorageInMemoryMetadata metadata; - metadata.setColumns(ColumnsDescription(*columns)); - - storage = StoragePostgreSQLReplica::create(StorageID(database_name, name), metadata_path, metadata, global_context); - - return storage; + return StoragePostgreSQLReplica::create(StorageID(database_name, name), StoragePtr{}, global_context); } @@ -176,7 +165,7 @@ StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, con } auto table = tables.find(name); - if (table != tables.end()) + if (table != tables.end() && table->second->as()->isNestedLoaded()) return table->second; return StoragePtr{}; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 28f698b3da5..a3624236f76 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -14,6 +14,8 @@ #include #include +#include + namespace DB { @@ -25,7 +27,7 @@ namespace ErrorCodes } -std::unordered_set fetchPostgreSQLTablesList(ConnectionPtr connection) +std::unordered_set fetchPostgreSQLTablesList(PostgreSQLConnection::ConnectionPtr connection) { std::unordered_set tables; std::string query = "SELECT tablename FROM pg_catalog.pg_tables " @@ -39,7 +41,7 @@ std::unordered_set fetchPostgreSQLTablesList(ConnectionPtr connecti } -static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullable, uint16_t dimensions) +static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullable = false, uint16_t dimensions = 0) { DataTypePtr res; @@ -94,16 +96,66 @@ static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullabl } -std::shared_ptr fetchPostgreSQLTableStructure( - std::shared_ptr connection, const String & postgres_table_name, bool use_nulls) +template +std::shared_ptr readNamesAndTypesList( + std::shared_ptr tx, const String & postgres_table_name, const String & query, bool use_nulls, bool only_names_and_types) { auto columns = NamesAndTypesList(); + try + { + pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query)); + + if (only_names_and_types) + { + std::tuple row; + while (stream >> row) + columns.push_back(NameAndTypePair(std::get<0>(row), convertPostgreSQLDataType(std::get<1>(row)))); + } + else + { + std::tuple row; + while (stream >> row) + { + columns.push_back(NameAndTypePair( + std::get<0>(row), /// column name + convertPostgreSQLDataType( + std::get<1>(row), /// data type + use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false + std::get<3>(row)))); /// number of dimensions if data type is array + } + } + + stream.complete(); + } + catch (const pqxx::undefined_table &) + { + throw Exception(fmt::format( + "PostgreSQL table {} does not exist", postgres_table_name), ErrorCodes::UNKNOWN_TABLE); + } + catch (Exception & e) + { + e.addMessage("while fetching postgresql table structure"); + throw; + } + + return !columns.empty() ? std::make_shared(columns) : nullptr; +} + + +template +PostgreSQLTableStructure fetchPostgreSQLTableStructureImpl( + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) +{ + PostgreSQLTableStructure table; + if (postgres_table_name.find('\'') != std::string::npos || postgres_table_name.find('\\') != std::string::npos) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "PostgreSQL table name cannot contain single quote or backslash characters, passed {}", - postgres_table_name); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "PostgreSQL table name cannot contain single quote or backslash characters, passed {}", + postgres_table_name); } std::string query = fmt::format( @@ -112,42 +164,52 @@ std::shared_ptr fetchPostgreSQLTableStructure( "FROM pg_attribute " "WHERE attrelid = '{}'::regclass " "AND NOT attisdropped AND attnum > 0", postgres_table_name); - try - { - pqxx::read_transaction tx(*connection); - pqxx::stream_from stream(tx, pqxx::from_query, std::string_view(query)); - std::tuple row; - while (stream >> row) - { - columns.push_back(NameAndTypePair( - std::get<0>(row), - convertPostgreSQLDataType( - std::get<1>(row), - use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false, i.e. value is nullable - std::get<3>(row)))); - } - stream.complete(); - tx.commit(); - } - catch (const pqxx::undefined_table &) - { - throw Exception(fmt::format( - "PostgreSQL table {}.{} does not exist", - connection->dbname(), postgres_table_name), ErrorCodes::UNKNOWN_TABLE); - } - catch (Exception & e) - { - e.addMessage("while fetching postgresql table structure"); - throw; - } + table.columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, false); - if (columns.empty()) - return nullptr; + if (!with_primary_key) + return table; - return std::make_shared(columns); + /// wiki.postgresql.org/wiki/Retrieve_primary_key_columns + query = fmt::format( + "SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type " + "FROM pg_index i " + "JOIN pg_attribute a ON a.attrelid = i.indrelid " + "AND a.attnum = ANY(i.indkey) " + "WHERE i.indrelid = '{}'::regclass AND i.indisprimary", postgres_table_name); + + table.primary_key_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); + + return table; } + +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) +{ + return fetchPostgreSQLTableStructureImpl(tx, postgres_table_name, use_nulls, with_primary_key); +} + + +/// For the case when several operations are made on the transaction object before it can be used (like export snapshot and isolation level) +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) +{ + return fetchPostgreSQLTableStructureImpl(tx, postgres_table_name, use_nulls, with_primary_key); +} + + +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + PostgreSQLConnection::ConnectionPtr connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key) +{ + auto tx = std::make_shared(*connection); + auto table = fetchPostgreSQLTableStructure(tx, postgres_table_name, use_nulls, with_primary_key); + tx->commit(); + + return table; +} + + } #endif diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index a507514e92d..bf3c8ead422 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -5,16 +5,26 @@ #endif #if USE_LIBPQXX -#include +#include +#include namespace DB { -std::unordered_set fetchPostgreSQLTablesList(ConnectionPtr connection); +std::unordered_set fetchPostgreSQLTablesList(PostgreSQLConnection::ConnectionPtr connection); -std::shared_ptr fetchPostgreSQLTableStructure( - ConnectionPtr connection, const String & postgres_table_name, bool use_nulls); +struct PostgreSQLTableStructure +{ + std::shared_ptr columns; + std::shared_ptr primary_key_columns; +}; + +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + PostgreSQLConnection::ConnectionPtr connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); + +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); } diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 8ede0bc8813..954e5f4e187 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -68,9 +68,8 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar BlockInputStreamPtr PostgreSQLDictionarySource::loadAll() { LOG_TRACE(log, load_all_query); - auto tx = std::make_unique(*connection->conn()); - return std::make_shared( - std::move(tx), load_all_query, sample_block, max_block_size); + auto tx = std::make_shared(*connection->conn()); + return std::make_shared>(tx, load_all_query, sample_block, max_block_size); } @@ -78,23 +77,23 @@ BlockInputStreamPtr PostgreSQLDictionarySource::loadUpdatedAll() { auto load_update_query = getUpdateFieldAndDate(); LOG_TRACE(log, load_update_query); - auto tx = std::make_unique(*connection->conn()); - return std::make_shared(std::move(tx), load_update_query, sample_block, max_block_size); + auto tx = std::make_shared(*connection->conn()); + return std::make_shared>(tx, load_update_query, sample_block, max_block_size); } BlockInputStreamPtr PostgreSQLDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); - auto tx = std::make_unique(*connection->conn()); - return std::make_shared(std::move(tx), query, sample_block, max_block_size); + auto tx = std::make_shared(*connection->conn()); + return std::make_shared>(tx, query, sample_block, max_block_size); } BlockInputStreamPtr PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); - auto tx = std::make_unique(*connection->conn()); - return std::make_shared(std::move(tx), query, sample_block, max_block_size); + auto tx = std::make_shared(*connection->conn()); + return std::make_shared>(tx, query, sample_block, max_block_size); } @@ -116,8 +115,9 @@ std::string PostgreSQLDictionarySource::doInvalidateQuery(const std::string & re Block invalidate_sample_block; ColumnPtr column(ColumnString::create()); invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared(), "Sample Block")); - auto tx = std::make_unique(*connection->conn()); - PostgreSQLBlockInputStream block_input_stream(std::move(tx), request, invalidate_sample_block, 1); + auto tx = std::make_shared(*connection->conn()); + PostgreSQLBlockInputStream block_input_stream(tx, request, invalidate_sample_block, 1); + return readInvalidateQuery(block_input_stream); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c803f0aa411..dd961186494 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -51,7 +51,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } -void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, const StoragePostgreSQLReplica * storage) +void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StoragePostgreSQLReplica * storage) { storages[table_name] = storage; } @@ -81,6 +81,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() } catch (...) { + /// TODO: throw tryLogCurrentException(__PRETTY_FUNCTION__); } } @@ -211,26 +212,24 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) { LOG_DEBUG(log, "Creating transaction snapshot"); - for (const auto & [table_name, storage] : storages) + for (const auto & storage_data : storages) { - storage->createNestedIfNeeded(); - auto nested_storage = storage->tryGetNested(); - - if (!nested_storage) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to create nested storage"); - try { - auto stx = std::make_unique(*connection->conn()); + auto tx = std::make_shared(*connection->conn()); /// Specific isolation level is required to read from snapshot. - stx->set_variable("transaction_isolation", "'repeatable read'"); + tx->set_variable("transaction_isolation", "'repeatable read'"); std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); - stx->exec(query_str); + tx->exec(query_str); + + storage_data.second->createNestedIfNeeded([&]() { return fetchTableStructure(tx, storage_data.first); }); + auto nested_storage = storage_data.second->getNested(); /// Load from snapshot, which will show table state before creation of replication slot. - query_str = fmt::format("SELECT * FROM {}", table_name); + /// Already connected to needed database, no need to add it to query. + query_str = fmt::format("SELECT * FROM {}", storage_data.first); Context insert_context(*context); insert_context.makeQueryContext(); @@ -245,16 +244,17 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); - PostgreSQLBlockInputStream input(std::move(stx), query_str, sample_block, DEFAULT_BLOCK_SIZE); + PostgreSQLBlockInputStream input(tx, query_str, sample_block, DEFAULT_BLOCK_SIZE); copyData(input, *block_io.out); - storage->setNestedLoaded(); - nested_storages[table_name] = nested_storage; + storage_data.second->setNestedLoaded(); + nested_storages[storage_data.first] = nested_storage; } catch (Exception & e) { - e.addMessage("while initial data synchronization"); + tryLogCurrentException(__PRETTY_FUNCTION__); + e.addMessage("while initial data synchronization for table {}", storage_data.first); throw; } } @@ -369,4 +369,12 @@ std::unordered_set PostgreSQLReplicationHandler::fetchTablesFromPub return tables; } + +PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( + std::shared_ptr tx, const std::string & table_name) +{ + auto use_nulls = context->getSettingsRef().external_table_functions_use_nulls; + return fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true); +} + } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index ba6014aed1f..5a44215a612 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -3,6 +3,7 @@ #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaConsumer.h" #include "PostgreSQLReplicaMetadata.h" +#include namespace DB @@ -29,10 +30,12 @@ public: void shutdownFinal(); - void addStorage(const std::string & table_name, const StoragePostgreSQLReplica * storage); + void addStorage(const std::string & table_name, StoragePostgreSQLReplica * storage); std::unordered_set fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); + PostgreSQLTableStructure fetchTableStructure(std::shared_ptr tx, const std::string & table_name); + private: using NontransactionPtr = std::shared_ptr; @@ -65,7 +68,7 @@ private: BackgroundSchedulePool::TaskHolder startup_task; std::atomic tables_loaded = false; - std::unordered_map storages; + std::unordered_map storages; std::unordered_map nested_storages; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 490da9ce322..2eb156ad9f9 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -45,6 +45,8 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( , remote_table_name(remote_table_name_) , global_context(std::make_shared(context_.getGlobalContext())) , replication_settings(std::move(replication_settings_)) + , is_postgresql_replica_database( + DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "PostgreSQLReplica") { setInMemoryMetadata(storage_metadata); @@ -69,20 +71,6 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( } -StoragePostgreSQLReplica::StoragePostgreSQLReplica( - const StorageID & table_id_, - const String & /* metadata_path_ */, - const StorageInMemoryMetadata & storage_metadata, - const Context & context_) - : IStorage(table_id_) - , global_context(std::make_shared(context_)) -{ - setInMemoryMetadata(storage_metadata); - is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( - getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; -} - - StoragePostgreSQLReplica::StoragePostgreSQLReplica( const StorageID & table_id_, StoragePtr nested_storage_, @@ -90,9 +78,9 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( : IStorage(table_id_) , global_context(std::make_shared(context_)) , nested_storage(nested_storage_) + , is_postgresql_replica_database( + DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "PostgreSQLReplica") { - is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( - getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; } @@ -140,18 +128,70 @@ ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_t } -std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage() const +/// For single storage PostgreSQLReplica get columns and primary key columns from storage definition. +/// For database engine PostgreSQLReplica get columns and primary key columns by fetching from PostgreSQL, also using the same +/// transaction with snapshot, which is used for initial tables dump. +ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function & fetch_table_structure) { + auto create_table_query = std::make_shared(); + + auto table_id = getStorageID(); + create_table_query->table = getNestedTableName(); + create_table_query->database = table_id.database_name; + auto columns_declare_list = std::make_shared(); - auto columns_expression_list = std::make_shared(); - auto metadata_snapshot = getInMemoryMetadataPtr(); + auto order_by_expression = std::make_shared(); - for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) + auto metadata_snapshot = getInMemoryMetadataPtr(); + const auto & columns = metadata_snapshot->getColumns(); + NamesAndTypesList ordinary_columns_and_types; + + if (!columns.empty()) + { + ordinary_columns_and_types = columns.getOrdinary(); + } + else + { + auto table_structure = fetch_table_structure(); + + if (!table_structure.columns) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No columns returned for table {}.{}", table_id.database_name, table_id.table_name); + } + + StorageInMemoryMetadata storage_metadata; + + ordinary_columns_and_types = *table_structure.columns; + storage_metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); + setInMemoryMetadata(storage_metadata); + + if (!table_structure.primary_key_columns) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No primary key columns returned for table {}.{}", table_id.database_name, table_id.table_name); + } + + auto primary_key_columns = *table_structure.primary_key_columns; + + order_by_expression->name = "tuple"; + order_by_expression->arguments = std::make_shared(); + + for (const auto & column : primary_key_columns) + { + LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "kssenii table columns {}", column.name); + order_by_expression->arguments->children.emplace_back(std::make_shared(column.name)); + } + } + + for (const auto & [name, type] : ordinary_columns_and_types) { const auto & column_declaration = std::make_shared(); - column_declaration->name = column_type_and_name.name; - column_declaration->type = getColumnDeclaration(column_type_and_name.type); + + column_declaration->name = name; + column_declaration->type = getColumnDeclaration(type); + columns_expression_list->children.emplace_back(column_declaration); } @@ -160,29 +200,18 @@ std::shared_ptr StoragePostgreSQLReplica::getColumnsListFromStorage( columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_sign", "Int8", UInt64(1))); columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_version", "UInt64", UInt64(1))); - return columns_declare_list; -} + create_table_query->set(create_table_query->columns_list, columns_declare_list); - -ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery() const -{ - auto create_table_query = std::make_shared(); - - auto table_id = getStorageID(); - create_table_query->table = getNestedTableName(); - create_table_query->database = table_id.database_name; - //create_table_query->if_not_exists = true; - - create_table_query->set(create_table_query->columns_list, getColumnsListFromStorage()); + /// Not nullptr for single storage (because throws exception if not specified), nullptr otherwise. + auto primary_key_ast = getInMemoryMetadataPtr()->getPrimaryKeyAST(); auto storage = std::make_shared(); storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared("_version"))); - auto primary_key_ast = getInMemoryMetadataPtr()->getPrimaryKeyAST(); - if (!primary_key_ast) - primary_key_ast = std::make_shared("key"); - - storage->set(storage->order_by, primary_key_ast); + if (primary_key_ast) + storage->set(storage->order_by, primary_key_ast); + else + storage->set(storage->order_by, order_by_expression); create_table_query->set(create_table_query->storage, storage); @@ -190,7 +219,7 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery() const } -void StoragePostgreSQLReplica::createNestedIfNeeded() const +void StoragePostgreSQLReplica::createNestedIfNeeded(const std::function & fetch_table_structure) { nested_storage = tryGetNested(); @@ -198,17 +227,10 @@ void StoragePostgreSQLReplica::createNestedIfNeeded() const return; Context context_copy(*global_context); - const auto ast_create = getCreateNestedTableQuery(); + const auto ast_create = getCreateNestedTableQuery(fetch_table_structure); InterpreterCreateQuery interpreter(ast_create, context_copy); - try - { - interpreter.execute(); - } - catch (...) - { - throw; - } + interpreter.execute(); nested_storage = getNested(); } @@ -224,7 +246,7 @@ Context StoragePostgreSQLReplica::makeGetNestedTableContext() const } -StoragePtr StoragePostgreSQLReplica::getNested() const +StoragePtr StoragePostgreSQLReplica::getNested() { if (nested_storage) return nested_storage; @@ -237,7 +259,7 @@ StoragePtr StoragePostgreSQLReplica::getNested() const } -StoragePtr StoragePostgreSQLReplica::tryGetNested() const +StoragePtr StoragePostgreSQLReplica::tryGetNested() { if (nested_storage) return nested_storage; @@ -442,7 +464,10 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) factory.registerStorage( "PostgreSQLReplica", creator_fn, - StorageFactory::StorageFeatures{ .supports_settings = true, .supports_sort_order = true, .source_access_type = AccessType::POSTGRES, + StorageFactory::StorageFeatures{ + .supports_settings = true, + .supports_sort_order = true, + .source_access_type = AccessType::POSTGRES, }); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 277d6fc9313..e8de30afeb2 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -26,12 +26,6 @@ class StoragePostgreSQLReplica final : public ext::shared_ptr_helper; public: - StoragePostgreSQLReplica( - const StorageID & table_id_, - const String & metadata_path_, - const StorageInMemoryMetadata & storage_metadata, - const Context & context_); - StoragePostgreSQLReplica( const StorageID & table_id_, StoragePtr nested_storage_, @@ -56,15 +50,16 @@ public: /// Called right after shutdown() in case of drop query void shutdownFinal(); - void createNestedIfNeeded() const; + void createNestedIfNeeded(const std::function & fetch_table_structure); /// Can be nullptr - StoragePtr tryGetNested() const; + StoragePtr tryGetNested(); /// Throw if impossible to get - StoragePtr getNested() const; + StoragePtr getNested(); - void setNestedLoaded() const { nested_loaded.store(true); } + void setNestedLoaded() { nested_loaded.store(true); } + bool isNestedLoaded() { return nested_loaded.load(); } protected: StoragePostgreSQLReplica( @@ -80,11 +75,9 @@ private: std::shared_ptr getMaterializedColumnsDeclaration( const String name, const String type, UInt64 default_value) const; - std::shared_ptr getColumnsListFromStorage() const; - ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; - ASTPtr getCreateNestedTableQuery() const; + ASTPtr getCreateNestedTableQuery(const std::function & fetch_table_structure); std::string getNestedTableName() const; @@ -98,10 +91,10 @@ private: std::unique_ptr replication_settings; std::unique_ptr replication_handler; - bool is_postgresql_replica_database = false; + std::atomic nested_loaded = false; + StoragePtr nested_storage; - mutable std::atomic nested_loaded = false; - mutable StoragePtr nested_storage; + bool is_postgresql_replica_database = false; }; } diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 08affa518e7..5312448b98b 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -1,10 +1,11 @@ #include "StoragePostgreSQL.h" #if USE_LIBPQXX +#include +#include #include #include -#include #include #include #include @@ -17,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -83,9 +83,9 @@ Pipe StoragePostgreSQL::read( sample_block.insert({ column_data.type, column_data.name }); } - auto tx = std::make_unique(*connection->conn()); + auto tx = std::make_shared(*connection->conn()); return Pipe(std::make_shared( - std::make_shared(std::move(tx), query, sample_block, max_block_size_))); + std::make_shared>(tx, query, sample_block, max_block_size_))); } diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index eefdff1fa87..2e3f1f6385c 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -1,6 +1,9 @@ #include #if USE_LIBPQXX +#include +#include + #include #include #include @@ -9,8 +12,6 @@ #include #include #include "registerTableFunctions.h" -#include -#include namespace DB @@ -39,7 +40,7 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/, ColumnsDescription TableFunctionPostgreSQL::getActualTableStructure(const Context & context) const { const bool use_nulls = context.getSettingsRef().external_table_functions_use_nulls; - auto columns = fetchPostgreSQLTableStructure(connection->conn(), remote_table_name, use_nulls); + auto columns = fetchPostgreSQLTableStructure(connection->conn(), remote_table_name, use_nulls).columns; return ColumnsDescription{*columns}; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 7d5c36a83c9..aa11b9419b4 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -13,7 +13,7 @@ instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml' postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( - key Integer NOT NULL, value Integer) + key Integer NOT NULL, value Integer, PRIMARY KEY(key)) """ def get_postgres_conn(database=False): @@ -66,7 +66,7 @@ def started_cluster(): @pytest.fixture(autouse=True) -def rabbitmq_setup_teardown(): +def postgresql_setup_teardown(): yield # run test instance.query('DROP TABLE IF EXISTS test.postgresql_replica') @@ -107,7 +107,7 @@ def test_replicating_dml(started_cluster): "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50, 50)".format(i, i)) + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); From 010a640ed8f92bf42ee110d0e42a2ecd38406718 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 12 Feb 2021 10:05:13 +0000 Subject: [PATCH 029/931] Fix and test different data types --- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 32 ++++++- .../test.py | 85 ++++++++++++++++++- .../test_storage_postgresql_replica/test.py | 6 +- 3 files changed, 111 insertions(+), 12 deletions(-) diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 2eb156ad9f9..7def1a317be 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -124,6 +125,33 @@ ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_t if (which.isArray()) return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + /// getName() for decimal returns 'Decimal(precison, scale)', will get an error with it + if (which.isDecimal()) + { + auto make_decimal_expression = [&](std::string type_name) + { + auto ast_expression = std::make_shared(); + + ast_expression->name = type_name; + ast_expression->arguments = std::make_shared(); + ast_expression->arguments->children.emplace_back(std::make_shared(getDecimalScale(*data_type))); + + return ast_expression; + }; + + if (which.isDecimal32()) + return make_decimal_expression("Decimal32"); + + if (which.isDecimal64()) + return make_decimal_expression("Decimal64"); + + if (which.isDecimal128()) + return make_decimal_expression("Decimal128"); + + if (which.isDecimal256()) + return make_decimal_expression("Decimal256"); + } + return std::make_shared(data_type->getName()); } @@ -178,11 +206,9 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function

name = "tuple"; order_by_expression->arguments = std::make_shared(); + //TODO: check for nullable for (const auto & column : primary_key_columns) - { - LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "kssenii table columns {}", column.name); order_by_expression->arguments->children.emplace_back(std::make_shared(column.name)); - } } for (const auto & [name, type] : ordinary_columns_and_types) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index aa11b9419b4..a9e4fd9ee30 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -36,13 +36,13 @@ def create_postgres_table(cursor, table_name): cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) -def check_tables_are_synchronized(table_name): - expected = instance.query('select * from postgres_database.{} order by key;'.format(table_name)) - result = instance.query('select * from test_database.{} order by key;'.format(table_name)) +def check_tables_are_synchronized(table_name, order_by='key'): + expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) + result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) while result != expected: time.sleep(0.5) - result = instance.query('select * from test_database.{} order by key;'.format(table_name)) + result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) assert(result == expected) @@ -121,6 +121,8 @@ def test_replicating_dml(started_cluster): for i in range(NUM_TABLES): cursor.execute('DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;'.format(i, i)) + cursor.execute('UPDATE postgresql_replica_{} SET value = value - (value % 7) WHERE key > 128 AND key < 512;'.format(i)) + cursor.execute('DELETE FROM postgresql_replica_{} WHERE key % 7 = 1;'.format(i, i)) for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); @@ -132,6 +134,81 @@ def test_replicating_dml(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') +def test_different_data_types(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + cursor.execute('drop table if exists test_data_types;') + cursor.execute('drop table if exists test_array_data_type;') + + cursor.execute( + '''CREATE TABLE test_data_types ( + id integer PRIMARY KEY, a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, + h timestamp, i date, j decimal(5, 5), k numeric(5, 5))''') + + cursor.execute( + '''CREATE TABLE test_array_data_type + ( + key Integer NOT NULL PRIMARY KEY, + a Date[] NOT NULL, -- Date + b Timestamp[] NOT NULL, -- DateTime + c real[][] NOT NULL, -- Float32 + d double precision[][] NOT NULL, -- Float64 + e decimal(5, 5)[][][] NOT NULL, -- Decimal32 + f integer[][][] NOT NULL, -- Int32 + g Text[][][][][] NOT NULL, -- String + h Integer[][][], -- Nullable(Int32) + i Char(2)[][][][], -- Nullable(String) + k Char(2)[] -- Nullable(String) + )''') + + instance.query( + "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + for i in range(10): + instance.query(''' + INSERT INTO postgres_database.test_data_types VALUES + ({}, -32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12', '2000-05-12', 0.2, 0.2)'''.format(i)) + + check_tables_are_synchronized('test_data_types', 'id'); + result = instance.query('SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;') + assert(result == '0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12\t2000-05-12\t0.20000\t0.20000\n') + cursor.execute('drop table test_data_types;') + + instance.query("INSERT INTO postgres_database.test_array_data_type " + "VALUES (" + "0, " + "['2000-05-12', '2000-05-12'], " + "['2000-05-12 12:12:12', '2000-05-12 12:12:12'], " + "[[1.12345], [1.12345], [1.12345]], " + "[[1.1234567891], [1.1234567891], [1.1234567891]], " + "[[[0.11111, 0.11111]], [[0.22222, 0.22222]], [[0.33333, 0.33333]]], " + "[[[1, 1], [1, 1]], [[3, 3], [3, 3]], [[4, 4], [5, 5]]], " + "[[[[['winx', 'winx', 'winx']]]]], " + "[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]], " + "[[[[NULL]]]], " + "[]" + ")") + + expected = ( + "0\t" + + "['2000-05-12','2000-05-12']\t" + + "['2000-05-12 12:12:12','2000-05-12 12:12:12']\t" + + "[[1.12345],[1.12345],[1.12345]]\t" + + "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + + "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" + "[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t" + "[[[[['winx','winx','winx']]]]]\t" + "[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\t" + "[[[[NULL]]]]\t" + "[]\n" + ) + + check_tables_are_synchronized('test_array_data_type'); + result = instance.query('SELECT * FROM test_database.test_array_data_type ORDER BY key;') + instance.query("DROP DATABASE test_database") + assert(result == expected) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 8773c484039..646364a7ca3 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -52,7 +52,6 @@ def started_cluster(): instance.query(''' CREATE DATABASE postgres_database ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') - instance.query('CREATE DATABASE test') yield cluster @@ -140,6 +139,7 @@ def test_detach_attach_is_ok(started_cluster): instance.query('DETACH TABLE test.postgresql_replica') instance.query('ATTACH TABLE test.postgresql_replica') + time.sleep(0.5) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) @@ -206,10 +206,6 @@ def test_replicating_delete_queries(started_cluster): time.sleep(0.2) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') - postgresql_replica_check_result(result, True) - - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") result = instance.query('SELECT count() FROM test.postgresql_replica;') From 219dece1d0c732a941f0b1ec1b8cfed9b3d1c276 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 12 Feb 2021 15:48:01 +0000 Subject: [PATCH 030/931] Slightly better --- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 30 +++-- .../PostgreSQL/DatabasePostgreSQLReplica.h | 5 +- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 104 +++++++++--------- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 1 - .../PostgreSQLReplicationHandler.cpp | 31 +++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 3 + .../PostgreSQL/StoragePostgreSQLReplica.cpp | 24 ++-- .../PostgreSQL/StoragePostgreSQLReplica.h | 5 +- 8 files changed, 99 insertions(+), 104 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index a9b07a0c65b..049c17eaf8a 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -55,6 +55,7 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( : DatabaseOrdinary( database_name_, metadata_path_, "data/" + escapeForFileName(database_name_) + "/", "DatabasePostgreSQLReplica (" + database_name_ + ")", context) + , log(&Poco::Logger::get("PostgreSQLReplicaDatabaseEngine")) , global_context(context.getGlobalContext()) , metadata_path(metadata_path_) , database_engine_define(database_engine_define_->clone()) @@ -117,7 +118,7 @@ void DatabasePostgreSQLReplica::startSynchronization() } } - LOG_TRACE(&Poco::Logger::get("PostgreSQLReplicaDatabaseEngine"), "Loaded {} tables. Starting synchronization", tables.size()); + LOG_TRACE(log, "Loaded {} tables. Starting synchronization", tables.size()); replication_handler->startup(); } @@ -173,11 +174,20 @@ StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, con } -/// TODO: assert called from sync thread template void DatabasePostgreSQLReplica::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) { - Base::createTable(context, name, table, query); + if (context.hasQueryContext()) + { + auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; + if (storage_set.find("ReplacingMergeTree") != storage_set.end()) + { + Base::createTable(context, name, table, query); + return; + } + } + + LOG_WARNING(log, "Create table query allowed only for ReplacingMergeTree engine and from synchronization thread"); } @@ -188,20 +198,6 @@ void DatabasePostgreSQLReplica::dropTable(const Context & context, const S } -template -void DatabasePostgreSQLReplica::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) -{ - Base::attachTable(name, table, relative_table_path); -} - - -template -StoragePtr DatabasePostgreSQLReplica::detachTable(const String & name) -{ - return Base::detachTable(name); -} - - template void DatabasePostgreSQLReplica::drop(const Context & context) { diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h index a73acd7b27b..5847f47ebef 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h @@ -53,10 +53,6 @@ public: void dropTable(const Context & context, const String & name, bool no_delay) override; - void attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) override; - - StoragePtr detachTable(const String & name) override; - void drop(const Context & context) override; void shutdown() override; @@ -66,6 +62,7 @@ private: void startSynchronization(); StoragePtr getStorage(const String & name); + Poco::Logger * log; const Context global_context; String metadata_path; ASTPtr database_engine_define; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 3435abc1fa9..50896fa8394 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -1,4 +1,5 @@ #include "PostgreSQLReplicaConsumer.h" +#include "StoragePostgreSQLReplica.h" #include #include @@ -85,7 +86,6 @@ void PostgreSQLReplicaConsumer::stopSynchronization() void PostgreSQLReplicaConsumer::synchronizationStream() { auto start_time = std::chrono::steady_clock::now(); - LOG_TRACE(log, "Starting synchronization stream"); while (!stop_synchronization) { @@ -105,7 +105,6 @@ void PostgreSQLReplicaConsumer::synchronizationStream() void PostgreSQLReplicaConsumer::insertValue(BufferData & buffer, const std::string & value, size_t column_idx) { - LOG_TRACE(log, "INSERTING VALUE {}", value); const auto & sample = buffer.description.sample_block.getByPosition(column_idx); bool is_nullable = buffer.description.types[column_idx].second; @@ -198,21 +197,24 @@ void PostgreSQLReplicaConsumer::readTupleData( BufferData & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos, size); - /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data - LOG_DEBUG(log, "num_columns {}", num_columns); + LOG_DEBUG(log, "number of columns {}", num_columns); + for (int column_idx = 0; column_idx < num_columns; ++column_idx) { + /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data char identifier = readInt8(message, pos, size); Int32 col_len = readInt32(message, pos, size); String value; + for (int i = 0; i < col_len; ++i) { value += readInt8(message, pos, size); } + /// TODO: Check for null values and use insertDefaultValue insertValue(buffer, value, column_idx); - LOG_DEBUG(log, "identifier {}, col_len {}, value {}", identifier, col_len, value); + LOG_DEBUG(log, "Identifier: {}, column length: {}, value: {}", identifier, col_len, value); } switch (type) @@ -233,6 +235,9 @@ void PostgreSQLReplicaConsumer::readTupleData( } case PostgreSQLQuery::UPDATE: { + /// TODO: If table has primary key, skip old value and remove first insert with -1. + // Otherwise use replica identity full (with check) and use fisrt insert. + if (old_value) buffer.columns[num_columns]->insert(Int8(-1)); else @@ -245,33 +250,31 @@ void PostgreSQLReplicaConsumer::readTupleData( } } -/// test relation id can be shuffled ? + +/// https://www.postgresql.org/docs/13/protocol-logicalrep-message-formats.html void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replication_message, size_t size) { /// Skip '\x' size_t pos = 2; char type = readInt8(replication_message, pos, size); - LOG_TRACE(log, "TYPE: {}", type); + LOG_DEBUG(log, "Type of replication message: {}", type); + switch (type) { case 'B': // Begin { - Int64 transaction_end_lsn = readInt64(replication_message, pos, size); - Int64 transaction_commit_timestamp = readInt64(replication_message, pos, size); - LOG_DEBUG(log, "transaction lsn {}, transaction commit timespamp {}", - transaction_end_lsn, transaction_commit_timestamp); + readInt64(replication_message, pos, size); /// Int64 transaction end lsn + readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp break; } case 'C': // Commit { - readInt8(replication_message, pos, size); - Int64 commit_lsn = readInt64(replication_message, pos, size); - Int64 transaction_end_lsn = readInt64(replication_message, pos, size); - /// Since postgres epoch - Int64 transaction_commit_timestamp = readInt64(replication_message, pos, size); - LOG_DEBUG(log, "commit lsn {}, transaction lsn {}, transaction commit timestamp {}", - commit_lsn, transaction_end_lsn, transaction_commit_timestamp); + readInt8(replication_message, pos, size); /// unused flags + readInt64(replication_message, pos, size); /// Int64 commit lsn + readInt64(replication_message, pos, size); /// Int64 transaction end lsn + readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp + final_lsn = current_lsn; break; } @@ -280,38 +283,49 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati case 'R': // Relation { Int32 relation_id = readInt32(replication_message, pos, size); - String relation_namespace, relation_name; - readString(replication_message, pos, size, relation_namespace); + String relation_namespace, relation_name; + + readString(replication_message, pos, size, relation_namespace); readString(replication_message, pos, size, relation_name); + + /// TODO: Save relation id (unique to tables) and check if they might be shuffled in current block. + /// If shuffled, store tables based on those id's and insert accordingly. table_to_insert = relation_name; tables_to_sync.insert(table_to_insert); - LOG_DEBUG(log, "INSERTING TABLE {}", table_to_insert); Int8 replica_identity = readInt8(replication_message, pos, size); Int16 num_columns = readInt16(replication_message, pos, size); + /// TODO: If replica identity is not full, check if there will be a full columns list. LOG_DEBUG(log, - "Replication message type 'R', relation_id: {}, namespace: {}, relation name {}, replica identity {}, columns number {}", + "INFO: relation id: {}, namespace: {}, relation name: {}, replica identity: {}, columns number: {}", relation_id, relation_namespace, relation_name, replica_identity, num_columns); Int8 key; Int32 data_type_id, type_modifier; + + /// TODO: Check here if table structure has changed and, if possible, change table structure and redump table. for (uint16_t i = 0; i < num_columns; ++i) { String column_name; key = readInt8(replication_message, pos, size); readString(replication_message, pos, size, column_name); + data_type_id = readInt32(replication_message, pos, size); type_modifier = readInt32(replication_message, pos, size); - LOG_DEBUG(log, "Key {}, column name {}, data type id {}, type modifier {}", key, column_name, data_type_id, type_modifier); + + LOG_DEBUG(log, + "Key: {}, column name: {}, data type id: {}, type modifier: {}", + key, column_name, data_type_id, type_modifier); } if (storages.find(table_to_insert) == storages.end()) { - throw Exception(ErrorCodes::UNKNOWN_TABLE, - "Table {} does not exist, but is included in replication stream", table_to_insert); + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Storage for table {} does not exist, but is included in replication stream", table_to_insert); } + [[maybe_unused]] auto buffer_iter = buffers.find(table_to_insert); assert(buffer_iter != buffers.end()); @@ -324,7 +338,8 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati Int32 relation_id = readInt32(replication_message, pos, size); Int8 new_tuple = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "relationID {}, newTuple {} current insert tabel {}", relation_id, new_tuple, table_to_insert); + LOG_DEBUG(log, "relationID: {}, newTuple: {}, current insert table: {}", relation_id, new_tuple, table_to_insert); + auto buffer = buffers.find(table_to_insert); if (buffer == buffers.end()) { @@ -341,14 +356,17 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati LOG_DEBUG(log, "relationID {}, key {} current insert table {}", relation_id, primary_key_or_old_tuple_data, table_to_insert); + /// TODO: Two cases: based on primary keys and based on replica identity full. + /// Add first one and add a check for second one. + auto buffer = buffers.find(table_to_insert); readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); if (pos + 1 < size) { Int8 new_tuple_data = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "new tuple data {}", new_tuple_data); readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE); + LOG_DEBUG(log, "new tuple data: {}", new_tuple_data); } break; @@ -356,11 +374,9 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati case 'D': // Delete { Int32 relation_id = readInt32(replication_message, pos, size); - //Int8 index_replica_identity = readInt8(replication_message, pos); Int8 full_replica_identity = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "relationID {}, full replica identity {}", - relation_id, full_replica_identity); + LOG_DEBUG(log, "relationID: {}, full replica identity: {}", relation_id, full_replica_identity); auto buffer = buffers.find(table_to_insert); readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::DELETE); @@ -377,38 +393,32 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr tx) { - LOG_TRACE(log, "AVAILABLE TABLES {}", tables_to_sync.size()); for (const auto & table_name : tables_to_sync) { try { - LOG_TRACE(log, "ATTEMPT SYNCING TABLE {}", table_name); auto & buffer = buffers.find(table_name)->second; Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns)); if (result_rows.rows()) { - LOG_TRACE(log, "SYNCING TABLE {} rows {} max_block_size {}", table_name, result_rows.rows(), max_block_size); - metadata.commitMetadata(final_lsn, [&]() { - Context insert_context(*context); + auto storage = storages[table_name]; + + auto insert = std::make_shared(); + insert->table_id = storage->getStorageID(); + + auto insert_context(*context); insert_context.makeQueryContext(); insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - auto insert = std::make_shared(); - insert->table_id = storages[table_name]->getStorageID(); - InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); - - /// TODO: what if one block is not enough OneBlockInputStream input(result_rows); copyData(input, *block_io.out); - LOG_TRACE(log, "TABLE SYNC END"); - auto actual_lsn = advanceLSN(tx); buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); @@ -422,6 +432,7 @@ void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr } } + LOG_DEBUG(log, "Table sync end for {} tables", tables_to_sync.size()); tables_to_sync.clear(); tx->commit(); } @@ -429,8 +440,6 @@ void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr tx) { - LOG_TRACE(log, "CURRENT LSN FROM TO {}", final_lsn); - std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn); pqxx::result result{tx->exec(query_str)}; @@ -468,7 +477,6 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() if (!row) { - LOG_TRACE(log, "STREAM REPLICATION END"); stream.complete(); if (slot_empty) @@ -481,19 +489,17 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() } slot_empty = false; - current_lsn = (*row)[0]; - LOG_TRACE(log, "Replication message: {}", (*row)[1]); + processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } } catch (const pqxx::sql_error & e) { - /// sql replication interface has the problem that it registers relcache + /// For now sql replication interface is used and it has the problem that it registers relcache /// callbacks on each pg_logical_slot_get_changes and there is no way to invalidate them: /// https://github.com/postgres/postgres/blob/master/src/backend/replication/pgoutput/pgoutput.c#L1128 /// So at some point will get out of limit and then they will be cleaned. - std::string error_message = e.what(); if (error_message.find("out of relcache_callback_list slots") == std::string::npos) tryLogCurrentException(__PRETTY_FUNCTION__); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 9cd5f368a6d..a5ae25c3f53 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -27,7 +27,6 @@ PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadat void PostgreSQLReplicaMetadata::readMetadata() { - LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaMetadata"), "kssenii 1 {}", metadata_file); if (Poco::File(metadata_file).exists()) { ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index dd961186494..5a1ab778382 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes static const auto reschedule_ms = 500; -/// TODO: context should be const +/// TODO: add test for syncing only subset of databse tables PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, @@ -81,7 +81,6 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() } catch (...) { - /// TODO: throw tryLogCurrentException(__PRETTY_FUNCTION__); } } @@ -101,7 +100,6 @@ bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr() == "t"); - /// TODO: check if publication is still valid? if (publication_exists) LOG_TRACE(log, "Publication {} already exists. Using existing version", publication_name); @@ -131,26 +129,26 @@ void PostgreSQLReplicationHandler::createPublication(std::shared_ptr e.addMessage("while creating pg_publication"); throw; } - - /// TODO: check replica identity? - /// Requires changed replica identity for included table to be able to receive old values of updated rows. } void PostgreSQLReplicationHandler::startSynchronization() { - /// used commands require a specific transaction isolation mode. + /// Used commands require a specific transaction isolation mode. replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); auto tx = std::make_shared(*replication_connection->conn()); + bool new_publication = false; + if (publication_name.empty()) { publication_name = fmt::format("{}_ch_publication", database_name); - /// Publication defines what tables are included into replication stream. Should be deleted only if MaterializePostgreSQL - /// table is dropped. if (!isPublicationExist(tx)) + { createPublication(tx); + new_publication = true; + } } else if (!isPublicationExist(tx)) { @@ -175,9 +173,11 @@ void PostgreSQLReplicationHandler::startSynchronization() { initial_sync(); } - else if (!Poco::File(metadata_path).exists()) + else if (!Poco::File(metadata_path).exists() || new_publication) { - /// If replication slot exists and metadata file (where last synced version is written) does not exist, it is not normal. + /// In case of some failure, the following cases are possible (since publication and replication slot are reused): + /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. + /// 2. If created a new publication and replication slot existed before it was created, it is not ok. dropReplicationSlot(ntx, replication_slot); initial_sync(); } @@ -210,8 +210,6 @@ void PostgreSQLReplicationHandler::startSynchronization() void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) { - LOG_DEBUG(log, "Creating transaction snapshot"); - for (const auto & storage_data : storages) { try @@ -231,12 +229,9 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) /// Already connected to needed database, no need to add it to query. query_str = fmt::format("SELECT * FROM {}", storage_data.first); - Context insert_context(*context); - insert_context.makeQueryContext(); - insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - auto insert = std::make_shared(); insert->table_id = nested_storage->getStorageID(); + auto insert_context = storage_data.second->makeNestedTableContext(); InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); @@ -259,7 +254,7 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) } } - LOG_DEBUG(log, "Done loading from snapshot"); + LOG_DEBUG(log, "Table dump end"); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 5a44215a612..e36e1cd0490 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -40,12 +40,15 @@ private: using NontransactionPtr = std::shared_ptr; bool isPublicationExist(std::shared_ptr tx); + bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); void createPublication(std::shared_ptr tx); + void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name); void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); + void dropPublication(NontransactionPtr ntx); void waitConnectionAndStart(); diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 7def1a317be..e45d678ba0b 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -252,23 +252,23 @@ void StoragePostgreSQLReplica::createNestedIfNeeded(const std::functiondatabase = table_id.database_name; ast_drop->if_exists = true; - auto drop_context(*global_context); - drop_context.makeQueryContext(); - - auto interpreter = InterpreterDropQuery(ast_drop, drop_context); + auto context = makeNestedTableContext(); + auto interpreter = InterpreterDropQuery(ast_drop, context); interpreter.execute(); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index e8de30afeb2..4059cb744e6 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -58,7 +58,10 @@ public: /// Throw if impossible to get StoragePtr getNested(); + Context makeNestedTableContext() const; + void setNestedLoaded() { nested_loaded.store(true); } + bool isNestedLoaded() { return nested_loaded.load(); } protected: @@ -81,8 +84,6 @@ private: std::string getNestedTableName() const; - Context makeGetNestedTableContext() const; - void dropNested(); std::string remote_table_name; From 44f4f1a41208bc1b65062b21301ca95134b591cf Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 12 Feb 2021 18:21:55 +0000 Subject: [PATCH 031/931] Allow to replicate a subset of database tables --- src/Databases/DatabaseFactory.cpp | 5 +- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 10 +- .../PostgreSQL/DatabasePostgreSQLReplica.h | 8 +- .../PostgreSQL/PostgreSQLReplicaSettings.h | 3 +- .../PostgreSQLReplicationHandler.cpp | 146 ++++++++---------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 12 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 7 +- .../test.py | 54 ++++++- .../test_storage_postgresql_replica/test.py | 1 + 9 files changed, 140 insertions(+), 106 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index d4b7674f73b..f92c0157e74 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -103,8 +103,9 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String && engine_name != "Lazy" && engine_define->engine->arguments) throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS); - if (engine_define->engine->parameters || engine_define->partition_by || engine_define->primary_key || engine_define->order_by || - engine_define->sample_by || (!endsWith(engine_name, "MySQL") && engine_define->settings)) + if (engine_define->engine->parameters || engine_define->partition_by || engine_define->primary_key || + engine_define->order_by || engine_define->sample_by || + (!endsWith(engine_name, "MySQL") && (engine_name != "PostgreSQLReplica") && engine_define->settings)) throw Exception("Database engine " + engine_name + " cannot have parameters, primary_key, order_by, sample_by, settings", ErrorCodes::UNKNOWN_ELEMENT_IN_AST); diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 049c17eaf8a..0052f8fe10d 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -39,8 +39,6 @@ namespace ErrorCodes static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; -/// TODO: add detach, after which table structure is updated, need to update StoragePtr and recreate nested_storage. -/// Also pass new storagePtr to replication Handler. Stop replication stream mean while? template<> DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( @@ -91,19 +89,15 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( template void DatabasePostgreSQLReplica::startSynchronization() { - auto publication_name = global_context.getMacros()->expand(settings->postgresql_publication_name.value); - auto replication_slot = global_context.getMacros()->expand(settings->postgresql_replication_slot_name.value); - replication_handler = std::make_unique( remote_database_name, connection->conn_str(), metadata_path + METADATA_SUFFIX, std::make_shared(global_context), - replication_slot, - publication_name, settings->postgresql_max_block_size.changed ? settings->postgresql_max_block_size.value - : (global_context.getSettingsRef().max_insert_block_size.value)); + : (global_context.getSettingsRef().max_insert_block_size.value), + global_context.getMacros()->expand(settings->postgresql_tables_list.value)); std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->conn()); diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h index 5847f47ebef..24763e697e6 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h @@ -40,6 +40,7 @@ public: std::unique_ptr settings_); String getEngineName() const override { return "PostgreSQLReplica"; } + String getMetadataPath() const override { return metadata_path; } void loadStoredObjects(Context &, bool, bool force_attach) override; @@ -60,6 +61,7 @@ public: private: void startSynchronization(); + StoragePtr getStorage(const String & name); Poco::Logger * log; @@ -72,12 +74,6 @@ private: std::shared_ptr replication_handler; std::map tables; - - bool checkPostgresTable(const String & table_name) const; - std::unordered_set fetchTablesList() const; - StoragePtr fetchTable(const String & table_name, const Context & context, const bool table_checked) const; - void removeOutdatedTables(); - ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h index 72b7f98ea6e..1dbd6b0a65b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h @@ -8,9 +8,8 @@ namespace DB #define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ - M(String, postgresql_replication_slot_name, "", "PostgreSQL replication slot name.", 0) \ - M(String, postgresql_publication_name, "", "PostgreSQL publication name.", 0) \ M(UInt64, postgresql_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ + M(String, postgresql_tables_list, "", "List of tables for PostgreSQLReplica database engine", 0) \ DECLARE_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 5a1ab778382..76dede3ff4c 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -29,22 +29,19 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & conn_str, const std::string & metadata_path_, std::shared_ptr context_, - const std::string & publication_name_, - const std::string & replication_slot_name_, - const size_t max_block_size_) + const size_t max_block_size_, + const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) , context(context_) , database_name(database_name_) , connection_str(conn_str) , metadata_path(metadata_path_) - , publication_name(publication_name_) - , replication_slot(replication_slot_name_) , max_block_size(max_block_size_) + , tables_list(tables_list_) , connection(std::make_shared(conn_str)) - , replication_connection(std::make_shared(fmt::format("{} replication=database", connection->conn_str()))) { - if (replication_slot.empty()) - replication_slot = fmt::format("{}_ch_replication_slot", database_name); + replication_slot = fmt::format("{}_ch_replication_slot", database_name); + publication_name = fmt::format("{}_ch_publication", database_name); startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); startup_task->deactivate(); @@ -93,71 +90,12 @@ void PostgreSQLReplicationHandler::shutdown() } -bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) -{ - std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); - pqxx::result result{tx->exec(query_str)}; - assert(!result.empty()); - bool publication_exists = (result[0][0].as() == "t"); - - if (publication_exists) - LOG_TRACE(log, "Publication {} already exists. Using existing version", publication_name); - - return publication_exists; -} - - -void PostgreSQLReplicationHandler::createPublication(std::shared_ptr tx) -{ - String table_names; - for (const auto & storage_data : storages) - { - if (!table_names.empty()) - table_names += ", "; - table_names += storage_data.first; - } - - /// 'ONLY' means just a table, without descendants. - std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, table_names); - try - { - tx->exec(query_str); - LOG_TRACE(log, "Created publication {} with tables list: {}", publication_name, table_names); - } - catch (Exception & e) - { - e.addMessage("while creating pg_publication"); - throw; - } -} - - void PostgreSQLReplicationHandler::startSynchronization() { - /// Used commands require a specific transaction isolation mode. + createPublicationIfNeeded(connection->conn()); + + auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection->conn_str())); replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - - auto tx = std::make_shared(*replication_connection->conn()); - bool new_publication = false; - - if (publication_name.empty()) - { - publication_name = fmt::format("{}_ch_publication", database_name); - - if (!isPublicationExist(tx)) - { - createPublication(tx); - new_publication = true; - } - } - else if (!isPublicationExist(tx)) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Publication name '{}' is spesified in table arguments, but it does not exist", publication_name); - } - tx->commit(); - auto ntx = std::make_shared(*replication_connection->conn()); std::string snapshot_name, start_lsn; @@ -173,7 +111,7 @@ void PostgreSQLReplicationHandler::startSynchronization() { initial_sync(); } - else if (!Poco::File(metadata_path).exists() || new_publication) + else if (!Poco::File(metadata_path).exists() || new_publication_created) { /// In case of some failure, the following cases are possible (since publication and replication slot are reused): /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. @@ -258,6 +196,59 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) } +bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) +{ + std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); + pqxx::result result{tx->exec(query_str)}; + assert(!result.empty()); + bool publication_exists = (result[0][0].as() == "t"); + + if (publication_exists) + LOG_INFO(log, "Publication {} already exists. Using existing version", publication_name); + + return publication_exists; +} + + +void PostgreSQLReplicationHandler::createPublicationIfNeeded( + PostgreSQLConnection::ConnectionPtr connection_) +{ + if (new_publication_created) + return; + + auto tx = std::make_shared(*connection_); + + if (!isPublicationExist(tx)) + { + if (tables_list.empty()) + { + for (const auto & storage_data : storages) + { + if (!tables_list.empty()) + tables_list += ", "; + tables_list += storage_data.first; + } + } + + /// 'ONLY' means just a table, without descendants. + std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, tables_list); + try + { + tx->exec(query_str); + new_publication_created = true; + LOG_TRACE(log, "Created publication {} with tables list: {}", publication_name, tables_list); + } + catch (Exception & e) + { + e.addMessage("while creating pg_publication"); + throw; + } + } + + tx->commit(); +} + + bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name) { std::string query_str = fmt::format("SELECT active, restart_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); @@ -304,9 +295,6 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, st void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) { - if (publication_name.empty()) - return; - std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); ntx->exec(query_str); } @@ -328,7 +316,6 @@ void PostgreSQLReplicationHandler::shutdownFinal() } -/// TODO: publication can be created with option `whole_database`. Check this case. std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_) { auto publication_exist = [&]() @@ -339,14 +326,17 @@ std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTable return exist; }; - if (publication_name.empty() || !publication_exist()) + if (publication_exist()) + { + return fetchTablesFromPublication(connection_); + } + else if (tables_list.empty()) { - /// Replicate the whole database and create our own pg_publication return fetchPostgreSQLTablesList(connection_); } else { - /// Replicate only tables, which are included in a pg_publication + createPublicationIfNeeded(connection_); return fetchTablesFromPublication(connection_); } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index e36e1cd0490..a51c497c21d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -20,9 +20,8 @@ public: const std::string & conn_str_, const std::string & metadata_path_, std::shared_ptr context_, - const std::string & publication_slot_name_, - const std::string & replication_slot_name_, - const size_t max_block_size_); + const size_t max_block_size_, + const String tables_list = ""); void startup(); @@ -43,7 +42,7 @@ private: bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); - void createPublication(std::shared_ptr tx); + void createPublicationIfNeeded(PostgreSQLConnection::ConnectionPtr connection_); void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name); @@ -62,14 +61,15 @@ private: Poco::Logger * log; std::shared_ptr context; const std::string database_name, connection_str, metadata_path; - std::string publication_name, replication_slot; const size_t max_block_size; + std::string tables_list, replication_slot, publication_name; - PostgreSQLConnectionPtr connection, replication_connection; + PostgreSQLConnectionPtr connection; std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task; std::atomic tables_loaded = false; + bool new_publication_created = false; std::unordered_map storages; std::unordered_map nested_storages; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index e45d678ba0b..30855e1b2a8 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -62,8 +62,6 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( connection_str, metadata_path, global_context, - global_context->getMacros()->expand(replication_settings->postgresql_replication_slot_name.value), - global_context->getMacros()->expand(replication_settings->postgresql_publication_name.value), replication_settings->postgresql_max_block_size.changed ? replication_settings->postgresql_max_block_size.value : (global_context->getSettingsRef().max_insert_block_size.value) @@ -346,7 +344,10 @@ void StoragePostgreSQLReplica::dropNested() NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const { - return NamesAndTypesList{}; + if (nested_storage) + return nested_storage->getVirtuals(); + + return {}; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index a9e4fd9ee30..10cd5d2a263 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -32,6 +32,7 @@ def create_postgres_db(cursor, name): def create_postgres_table(cursor, table_name): + cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) cursor.execute(postgres_table_template.format(table_name)) cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) @@ -71,7 +72,8 @@ def postgresql_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') -def test_load_and_sync_all_database(started_cluster): +def test_load_and_sync_all_database_tables(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) cursor = conn.cursor() NUM_TABLES = 5 @@ -95,6 +97,7 @@ def test_load_and_sync_all_database(started_cluster): def test_replicating_dml(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) cursor = conn.cursor() NUM_TABLES = 5 @@ -135,6 +138,7 @@ def test_replicating_dml(started_cluster): def test_different_data_types(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) cursor = conn.cursor() cursor.execute('drop table if exists test_data_types;') @@ -209,6 +213,54 @@ def test_different_data_types(started_cluster): assert(result == expected) +def test_load_and_sync_subset_of_database_tables(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 10 + + publication_tables = '' + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i)) + + if i < NUM_TABLES/2: + if publication_tables != '': + publication_tables += ', ' + publication_tables += table_name + + instance.query(''' + CREATE DATABASE test_database + ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + SETTINGS postgresql_tables_list = '{}'; + '''.format(publication_tables)) + assert 'test_database' in instance.query('SHOW DATABASES') + + time.sleep(1) + + result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') + assert(int(result) == NUM_TABLES/2) + + database_tables = instance.query('SHOW TABLES FROM test_database') + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + if i < NUM_TABLES/2: + assert table_name in database_tables + else: + assert table_name not in database_tables + instance.query("INSERT INTO postgres_database.{} SELECT 50 + number, {} from numbers(100)".format(table_name, i)) + + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + if i < NUM_TABLES/2: + check_tables_are_synchronized(table_name); + cursor.execute('drop table {};'.format(table_name)) + + instance.query("DROP DATABASE test_database") + assert 'test_database' not in instance.query('SHOW DATABASES') + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 646364a7ca3..57d3b5288fb 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -30,6 +30,7 @@ def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) def create_postgres_table(cursor, table_name): + cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) cursor.execute(postgres_table_template.format(table_name)) cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) From 95c93aca41f39875f3080ce29d2b0fdfc9d67f66 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 13 Feb 2021 20:46:52 +0000 Subject: [PATCH 032/931] Better table sync --- .../PostgreSQLBlockInputStream.cpp | 2 -- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 11 ---------- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 6 ++++-- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 21 ++++++++++++++++--- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 6 ------ .../PostgreSQLReplicationHandler.cpp | 9 -------- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 4 +--- 7 files changed, 23 insertions(+), 36 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index 9e3c6b1bb89..7e9aa40e904 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -138,8 +138,6 @@ void PostgreSQLBlockInputStream::readSuffix() } } - - } #endif diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 0052f8fe10d..7960270391f 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -27,19 +27,8 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int NOT_IMPLEMENTED; - extern const int UNKNOWN_TABLE; - extern const int TABLE_IS_DROPPED; - extern const int TABLE_ALREADY_EXISTS; -} - - static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; - template<> DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( const Context & context, diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 50896fa8394..dd915197093 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -45,7 +45,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( { for (const auto & [table_name, storage] : storages) { - buffers.emplace(table_name, BufferData(storage->getInMemoryMetadata().getSampleBlock())); + buffers.emplace(table_name, BufferData(storage)); } wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ synchronizationStream(); }); @@ -408,15 +408,17 @@ void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr auto insert = std::make_shared(); insert->table_id = storage->getStorageID(); + insert->columns = buffer.columnsAST; auto insert_context(*context); insert_context.makeQueryContext(); insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - InterpreterInsertQuery interpreter(insert, insert_context); + InterpreterInsertQuery interpreter(insert, insert_context, true); auto block_io = interpreter.execute(); OneBlockInputStream input(result_rows); + assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica table sync"); copyData(input, *block_io.out); auto actual_lsn = advanceLSN(tx); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 0973ba7f785..cb1c76829f1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -9,6 +9,7 @@ #include #include #include +#include namespace DB @@ -48,16 +49,30 @@ private: { ExternalResultDescription description; MutableColumns columns; + std::shared_ptr columnsAST; /// Needed for insertPostgreSQLValue() method to parse array std::unordered_map array_info; - BufferData(const Block block) + BufferData(StoragePtr storage) { - description.init(block); + const auto storage_metadata = storage->getInMemoryMetadataPtr(); + description.init(storage_metadata->getSampleBlock()); columns = description.sample_block.cloneEmptyColumns(); - for (const auto idx : ext::range(0, description.sample_block.columns())) + const auto & storage_columns = storage_metadata->getColumns().getAllPhysical(); + auto insert_columns = std::make_shared(); + size_t idx = 0; + assert(description.sample_block.columns() == storage_columns.size()); + + for (const auto & column : storage_columns) + { if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); + idx++; + + insert_columns->children.emplace_back(std::make_shared(column.name)); + } + + columnsAST = std::move(insert_columns); } }; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index a5ae25c3f53..1eb4abb6a6d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -11,12 +11,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadata_file_path) : metadata_file(metadata_file_path) , tmp_metadata_file(metadata_file_path + ".tmp") diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 76dede3ff4c..bc50e5ab270 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -13,17 +13,8 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int UNKNOWN_TABLE; - extern const int LOGICAL_ERROR; -} - static const auto reschedule_ms = 500; -/// TODO: add test for syncing only subset of databse tables - PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & conn_str, diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 30855e1b2a8..9c514595ccc 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -29,6 +29,7 @@ namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; @@ -51,9 +52,6 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( { setInMemoryMetadata(storage_metadata); - is_postgresql_replica_database = DatabaseCatalog::instance().getDatabase( - getStorageID().database_name)->getEngineName() == "PostgreSQLReplica"; - auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name; From 272431bcae2f1fee8eaedc231eb1b4f1054159a2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 15 Feb 2021 22:49:13 +0000 Subject: [PATCH 033/931] Fix types check, better tests, try fix build --- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 15 ++++++-- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 2 +- .../PostgreSQL/PostgreSQLReplicaMetadata.cpp | 2 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 2 +- .../test.py | 34 ++++++++++++++----- 5 files changed, 41 insertions(+), 14 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 7960270391f..29c3ac7491a 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -131,7 +131,18 @@ void DatabasePostgreSQLReplica::loadStoredObjects( Context & context, bool has_force_restore_data_flag, bool force_attach) { Base::loadStoredObjects(context, has_force_restore_data_flag, force_attach); - startSynchronization(); + + try + { + startSynchronization(); + } + catch (...) + { + tryLogCurrentException(Base::log, "Cannot load nested database objects for PostgreSQL database engine."); + + if (!force_attach) + throw; + } } @@ -207,7 +218,7 @@ DatabaseTablesIteratorPtr DatabasePostgreSQLReplica::getTablesIterator( Tables nested_tables; for (const auto & [table_name, storage] : tables) { - auto nested_storage = storage->as()->tryGetNested(); + auto nested_storage = storage->template as()->tryGetNested(); if (nested_storage) nested_tables[table_name] = nested_storage; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index dd915197093..c2d59ddf516 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -236,7 +236,7 @@ void PostgreSQLReplicaConsumer::readTupleData( case PostgreSQLQuery::UPDATE: { /// TODO: If table has primary key, skip old value and remove first insert with -1. - // Otherwise use replica identity full (with check) and use fisrt insert. + // Otherwise use replica identity full (with check) and use first insert. if (old_value) buffer.columns[num_columns]->insert(Int8(-1)); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 1eb4abb6a6d..8a45b415ad0 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -66,7 +66,7 @@ void PostgreSQLReplicaMetadata::writeMetadata(bool append_metadata) } -/// While data is recieved, version is updated. Before table sync, write last version to tmp file. +/// While data is received, version is updated. Before table sync, write last version to tmp file. /// Then sync data to table and rename tmp to non-tmp. void PostgreSQLReplicaMetadata::commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc) { diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 9c514595ccc..4348330c832 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -121,7 +121,7 @@ ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_t if (which.isArray()) return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); - /// getName() for decimal returns 'Decimal(precison, scale)', will get an error with it + /// getName() for decimal returns 'Decimal(precision, scale)', will get an error with it if (which.isDecimal()) { auto make_decimal_expression = [&](std::string type_name) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 10cd5d2a263..50f7ded4354 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -37,10 +37,19 @@ def create_postgres_table(cursor, table_name): cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) +def assert_nested_table_is_created(table_name): + database_tables = instance.query('SHOW TABLES FROM test_database') + while table_name not in database_tables: + time.sleep(0.2) + database_tables = instance.query('SHOW TABLES FROM test_database') + assert(table_name in database_tables) + + def check_tables_are_synchronized(table_name, order_by='key'): expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) + assert_nested_table_is_created(table_name) while result != expected: time.sleep(0.5) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) @@ -79,15 +88,17 @@ def test_load_and_sync_all_database_tables(started_cluster): NUM_TABLES = 5 for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i)) + table_name = 'postgresql_replica_{}'.format(i) + create_postgres_table(cursor, table_name); + instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name)) instance.query("CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") assert 'test_database' in instance.query('SHOW DATABASES') for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - cursor.execute('drop table postgresql_replica_{};'.format(i)) + table_name = 'postgresql_replica_{}'.format(i) + check_tables_are_synchronized(table_name); + cursor.execute('drop table {};'.format(table_name)) result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') assert(int(result) == NUM_TABLES) @@ -113,7 +124,8 @@ def test_replicating_dml(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + table_name = 'postgresql_replica_{}'.format(i) + check_tables_are_synchronized(table_name); for i in range(NUM_TABLES): cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;'.format(i, i, i)) @@ -225,7 +237,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i)) - if i < NUM_TABLES/2: + if i < int(NUM_TABLES/2): if publication_tables != '': publication_tables += ', ' publication_tables += table_name @@ -239,13 +251,17 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): time.sleep(1) + for i in range(int(NUM_TABLES/2)): + table_name = 'postgresql_replica_{}'.format(i) + assert_nested_table_is_created(table_name) + result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') - assert(int(result) == NUM_TABLES/2) + assert(int(result) == int(NUM_TABLES/2)) database_tables = instance.query('SHOW TABLES FROM test_database') for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) - if i < NUM_TABLES/2: + if i < int(NUM_TABLES/2): assert table_name in database_tables else: assert table_name not in database_tables @@ -253,7 +269,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) - if i < NUM_TABLES/2: + if i < int(NUM_TABLES/2): check_tables_are_synchronized(table_name); cursor.execute('drop table {};'.format(table_name)) From 44c39d4609186bea6ffb469187270ec769b3e0cc Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 16 Feb 2021 19:00:28 +0000 Subject: [PATCH 034/931] better --- .../PostgreSQL/PostgreSQLReplicationHandler.cpp | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index bc50e5ab270..2b1b0e03649 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -309,19 +309,7 @@ void PostgreSQLReplicationHandler::shutdownFinal() std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_) { - auto publication_exist = [&]() - { - auto tx = std::make_shared(*connection_); - bool exist = isPublicationExist(tx); - tx->commit(); - return exist; - }; - - if (publication_exist()) - { - return fetchTablesFromPublication(connection_); - } - else if (tables_list.empty()) + if (tables_list.empty()) { return fetchPostgreSQLTablesList(connection_); } From 46dd137a5a4f7efe9349ae275f810c44ff618b0b Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 17 Feb 2021 20:42:18 +0000 Subject: [PATCH 035/931] Fix tests --- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp | 7 +++++-- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 2 +- src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp | 1 - .../test_postgresql_replica_database_engine/test.py | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index c2d59ddf516..35590a92709 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -211,8 +211,11 @@ void PostgreSQLReplicaConsumer::readTupleData( value += readInt8(message, pos, size); } - /// TODO: Check for null values and use insertDefaultValue - insertValue(buffer, value, column_idx); + /// For arrays default for null is inserted when converted to clickhouse array + if (value == "NULL") + insertDefaultValue(buffer, column_idx); + else + insertValue(buffer, value, column_idx); LOG_DEBUG(log, "Identifier: {}, column length: {}, value: {}", identifier, col_len, value); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 2b1b0e03649..ba86978cdf0 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -62,7 +62,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() } catch (const pqxx::broken_connection & pqxx_error) { - LOG_ERROR(log, "Unable to set up connection. Reconnection attempt continue. Error message: {}", + LOG_ERROR(log, "Unable to set up connection. Reconnection attempt will continue. Error message: {}", pqxx_error.what()); startup_task->scheduleAfter(reschedule_ms); diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 4348330c832..fe5b0223ef4 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -202,7 +202,6 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function

name = "tuple"; order_by_expression->arguments = std::make_shared(); - //TODO: check for nullable for (const auto & column : primary_key_columns) order_by_expression->arguments->children.emplace_back(std::make_shared(column.name)); } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 50f7ded4354..8fc2f5c9c69 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -46,10 +46,11 @@ def assert_nested_table_is_created(table_name): def check_tables_are_synchronized(table_name, order_by='key'): + assert_nested_table_is_created(table_name) + expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) - assert_nested_table_is_created(table_name) while result != expected: time.sleep(0.5) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) From 1480e951798f14a56786d78b839074e35552a6b8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 18 Feb 2021 06:06:37 +0000 Subject: [PATCH 036/931] Fix build, fix arcadia --- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 2 -- src/Storages/PostgreSQL/PostgreSQLConnection.cpp | 5 +---- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp | 4 +++- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h | 10 ++++++++-- src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp | 3 +++ src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h | 2 +- src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp | 5 +++++ src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h | 8 ++++++++ .../PostgreSQL/PostgreSQLReplicationHandler.cpp | 3 +++ src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h | 8 +++++++- src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp | 2 ++ src/Storages/PostgreSQL/StoragePostgreSQLReplica.h | 4 ++++ src/Storages/PostgreSQL/insertPostgreSQLValue.h | 2 +- 13 files changed, 46 insertions(+), 12 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index a3624236f76..8b8b13d031a 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -14,8 +14,6 @@ #include #include -#include - namespace DB { diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp b/src/Storages/PostgreSQL/PostgreSQLConnection.cpp index 668550ec721..d65f98591d5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLConnection.cpp @@ -1,9 +1,6 @@ -#if !defined(ARCADIA_BUILD) -#include "config_core.h" -#endif +#include #if USE_LIBPQXX -#include #include #include diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 35590a92709..64e201106d8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -1,4 +1,6 @@ #include "PostgreSQLReplicaConsumer.h" + +#if USE_LIBPQXX #include "StoragePostgreSQLReplica.h" #include @@ -526,4 +528,4 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() } - +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index cb1c76829f1..11bba9c8c14 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -1,15 +1,20 @@ #pragma once +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaMetadata.h" -#include "pqxx/pqxx" +#include "insertPostgreSQLValue.h" #include #include #include -#include #include #include +#include "pqxx/pqxx" // Y_IGNORE namespace DB @@ -118,3 +123,4 @@ private: } +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp index 8a45b415ad0..ad9ef4b22d3 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp @@ -1,5 +1,6 @@ #include "PostgreSQLReplicaMetadata.h" +#if USE_LIBPQXX #include #include #include @@ -95,3 +96,5 @@ void PostgreSQLReplicaMetadata::commitMetadata(std::string & lsn, const std::fun } } + +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h index ca7a258e24c..f7e566cce90 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h @@ -10,7 +10,7 @@ class PostgreSQLReplicaMetadata public: PostgreSQLReplicaMetadata(const std::string & metadata_file_path); - void commitMetadata(std::string & lsn, const std::function & syncTableFunc); + void commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc); void readMetadata(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp index aa1fec92ef4..dc714cb5488 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp @@ -1,4 +1,6 @@ #include "PostgreSQLReplicaSettings.h" + +#if USE_LIBPQXX #include #include #include @@ -37,4 +39,7 @@ void PostgreSQLReplicaSettings::loadFromQuery(ASTStorage & storage_def) storage_def.set(storage_def.settings, settings_ast); } } + } + +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h index 1dbd6b0a65b..5ea2a5cd1f6 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h @@ -1,7 +1,13 @@ #pragma once +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX #include + namespace DB { class ASTStorage; @@ -19,3 +25,5 @@ struct PostgreSQLReplicaSettings : public BaseSettings #include #include @@ -342,3 +343,5 @@ PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( } } + +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index a51c497c21d..5d973ca34fe 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,5 +1,10 @@ #pragma once +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX #include "PostgreSQLConnection.h" #include "PostgreSQLReplicaConsumer.h" #include "PostgreSQLReplicaMetadata.h" @@ -75,6 +80,7 @@ private: std::unordered_map nested_storages; }; - } +#endif + diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index fe5b0223ef4..86c1a717905 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -1,5 +1,6 @@ #include "StoragePostgreSQLReplica.h" +#if USE_LIBPQXX #include #include #include @@ -495,3 +496,4 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) } +#endif diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 4059cb744e6..56fc059aa39 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -1,7 +1,10 @@ #pragma once +#if !defined(ARCADIA_BUILD) #include "config_core.h" +#endif +#if USE_LIBPQXX #include "PostgreSQLReplicationHandler.h" #include "PostgreSQLReplicaSettings.h" @@ -100,3 +103,4 @@ private: } +#endif diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.h b/src/Storages/PostgreSQL/insertPostgreSQLValue.h index d9f24247935..dd093cd4c5b 100644 --- a/src/Storages/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Storages/PostgreSQL/insertPostgreSQLValue.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include // Y_IGNORE namespace DB From 9cc63780de2dca946ff012e6d507ed2860241df9 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 18 Feb 2021 18:14:05 +0000 Subject: [PATCH 037/931] More correct update query --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 96 ++++++++++++------- .../test.py | 5 +- .../test_storage_postgresql_replica/test.py | 11 ++- 3 files changed, 73 insertions(+), 39 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 64e201106d8..2dd21268c1e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -201,26 +201,34 @@ void PostgreSQLReplicaConsumer::readTupleData( Int16 num_columns = readInt16(message, pos, size); LOG_DEBUG(log, "number of columns {}", num_columns); - for (int column_idx = 0; column_idx < num_columns; ++column_idx) + auto proccess_column_value = [&](Int8 identifier, Int16 column_idx) { - /// 'n' means nullable, 'u' means TOASTed value, 't' means text formatted data - char identifier = readInt8(message, pos, size); - Int32 col_len = readInt32(message, pos, size); - String value; - - for (int i = 0; i < col_len; ++i) + LOG_DEBUG(log, "Identifier: {}", identifier); + switch (identifier) { - value += readInt8(message, pos, size); + case 'n': /// NULL + { + insertDefaultValue(buffer, column_idx); + break; + } + case 't': /// Text formatted value + { + Int32 col_len = readInt32(message, pos, size); + String value; + + for (int i = 0; i < col_len; ++i) + value += readInt8(message, pos, size); + + insertValue(buffer, value, column_idx); + break; + } + case 'u': /// Toasted (unchanged) value TODO:! + break; } + }; - /// For arrays default for null is inserted when converted to clickhouse array - if (value == "NULL") - insertDefaultValue(buffer, column_idx); - else - insertValue(buffer, value, column_idx); - - LOG_DEBUG(log, "Identifier: {}, column length: {}, value: {}", identifier, col_len, value); - } + for (int column_idx = 0; column_idx < num_columns; ++column_idx) + proccess_column_value(readInt8(message, pos, size), column_idx); switch (type) { @@ -240,10 +248,7 @@ void PostgreSQLReplicaConsumer::readTupleData( } case PostgreSQLQuery::UPDATE: { - /// TODO: If table has primary key, skip old value and remove first insert with -1. - // Otherwise use replica identity full (with check) and use first insert. - - if (old_value) + if (old_value) /// Only if replica identity is set to full buffer.columns[num_columns]->insert(Int8(-1)); else buffer.columns[num_columns]->insert(Int8(1)); @@ -302,7 +307,6 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati Int8 replica_identity = readInt8(replication_message, pos, size); Int16 num_columns = readInt16(replication_message, pos, size); - /// TODO: If replica identity is not full, check if there will be a full columns list. LOG_DEBUG(log, "INFO: relation id: {}, namespace: {}, relation name: {}, replica identity: {}, columns number: {}", relation_id, relation_namespace, relation_name, replica_identity, num_columns); @@ -351,28 +355,52 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati throw Exception(ErrorCodes::UNKNOWN_TABLE, "Buffer for table {} does not exist", table_to_insert); } + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT); break; } case 'U': // Update { Int32 relation_id = readInt32(replication_message, pos, size); - Int8 primary_key_or_old_tuple_data = readInt8(replication_message, pos, size); - - LOG_DEBUG(log, "relationID {}, key {} current insert table {}", relation_id, primary_key_or_old_tuple_data, table_to_insert); - - /// TODO: Two cases: based on primary keys and based on replica identity full. - /// Add first one and add a check for second one. + LOG_DEBUG(log, "relationID {}, current insert table {}", relation_id, table_to_insert); auto buffer = buffers.find(table_to_insert); - readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); - - if (pos + 1 < size) + auto proccess_identifier = [&](Int8 identifier) -> bool { - Int8 new_tuple_data = readInt8(replication_message, pos, size); - readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE); - LOG_DEBUG(log, "new tuple data: {}", new_tuple_data); - } + LOG_DEBUG(log, "Identifier: {}", identifier); + bool read_next = true; + switch (identifier) + { + case 'K': /// TODO:! + { + /// Only if changed column(s) are part of replica identity index + break; + } + case 'O': + { + /// Old row. Only of replica identity is set to full. + /// (For the case when a table does not have any primary key.) + /// TODO: Need to find suitable order_by for nested table (Now it throws if no primary key) + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); + break; + } + case 'N': + { + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE); + read_next = false; + break; + } + } + + return read_next; + }; + + /// Read either 'K' or 'O'. Never both of them. Also possible not to get both of them. + bool read_next = proccess_identifier(readInt8(replication_message, pos, size)); + + /// 'N'. Always present, but could come in place of 'K' and 'O'. + if (read_next) + proccess_identifier(readInt8(replication_message, pos, size)); break; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 8fc2f5c9c69..1faf4924685 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,10 +31,11 @@ def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) -def create_postgres_table(cursor, table_name): +def create_postgres_table(cursor, table_name, replica_identity_full=False): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) cursor.execute(postgres_table_template.format(table_name)) - cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) + if replica_identity_full: + cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) def assert_nested_table_is_created(table_name): diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 57d3b5288fb..bb7ff709b6d 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -13,9 +13,10 @@ instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml' postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( - key Integer NOT NULL, value Integer) + key Integer NOT NULL, value Integer, PRIMARY KEY(key)) """ + def get_postgres_conn(database=False): if database == True: conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" @@ -26,13 +27,17 @@ def get_postgres_conn(database=False): conn.autocommit = True return conn + def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) -def create_postgres_table(cursor, table_name): + +def create_postgres_table(cursor, table_name, replica_identity_full=False): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) cursor.execute(postgres_table_template.format(table_name)) - cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) + if replica_identity_full: + cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) + def postgresql_replica_check_result(result, check=False, ref_file='test_postgresql_replica.reference'): fpath = p.join(p.dirname(__file__), ref_file) From 3107f82a3b8da5c4e3f7f4b30f890d083eb3b689 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 18 Feb 2021 18:20:52 +0000 Subject: [PATCH 038/931] Fix clang tidy --- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 2 +- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 14 +++++++------- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 2 +- src/Storages/PostgreSQL/StoragePostgreSQLReplica.h | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 29c3ac7491a..58fedc01e36 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -96,7 +96,7 @@ void DatabasePostgreSQLReplica::startSynchronization() if (storage) { - replication_handler->addStorage(table_name, storage.get()->template as()); + replication_handler->addStorage(table_name, storage->template as()); tables[table_name] = storage; } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 11bba9c8c14..23ab7e22f3e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -83,8 +83,8 @@ private: using Buffers = std::unordered_map; - void insertDefaultValue(BufferData & buffer, size_t column_idx); - void insertValue(BufferData & buffer, const std::string & value, size_t column_idx); + static void insertDefaultValue(BufferData & buffer, size_t column_idx); + static void insertValue(BufferData & buffer, const std::string & value, size_t column_idx); enum class PostgreSQLQuery { @@ -95,11 +95,11 @@ private: void readTupleData(BufferData & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false); - void readString(const char * message, size_t & pos, size_t size, String & result); - Int64 readInt64(const char * message, size_t & pos, size_t size); - Int32 readInt32(const char * message, size_t & pos, size_t size); - Int16 readInt16(const char * message, size_t & pos, size_t size); - Int8 readInt8(const char * message, size_t & pos, size_t size); + static void readString(const char * message, size_t & pos, size_t size, String & result); + static Int64 readInt64(const char * message, size_t & pos, size_t size); + static Int32 readInt32(const char * message, size_t & pos, size_t size); + static Int16 readInt16(const char * message, size_t & pos, size_t size); + static Int8 readInt8(const char * message, size_t & pos, size_t size); Poco::Logger * log; std::shared_ptr context; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 86c1a717905..b4b25c3eae9 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -95,7 +95,7 @@ std::string StoragePostgreSQLReplica::getNestedTableName() const std::shared_ptr StoragePostgreSQLReplica::getMaterializedColumnsDeclaration( - const String name, const String type, UInt64 default_value) const + const String name, const String type, UInt64 default_value) { auto column_declaration = std::make_shared(); diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index 56fc059aa39..a0e27ef046d 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -78,8 +78,8 @@ protected: std::unique_ptr replication_settings_); private: - std::shared_ptr getMaterializedColumnsDeclaration( - const String name, const String type, UInt64 default_value) const; + static std::shared_ptr getMaterializedColumnsDeclaration( + const String name, const String type, UInt64 default_value); ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; From ff8b54ffd42750e4646dcccb35e7a43cb7f6f139 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 18 Feb 2021 23:33:01 +0000 Subject: [PATCH 039/931] Make sure postgres table schema changes do not break replication --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 73 +++++++++++++++++-- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 16 ++++ .../test.py | 48 +++++++++++- 3 files changed, 127 insertions(+), 10 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 2dd21268c1e..f0970ff994e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -203,7 +203,8 @@ void PostgreSQLReplicaConsumer::readTupleData( auto proccess_column_value = [&](Int8 identifier, Int16 column_idx) { - LOG_DEBUG(log, "Identifier: {}", identifier); + char id = identifier; + LOG_DEBUG(log, "Identifier: {}", id); switch (identifier) { case 'n': /// NULL @@ -216,7 +217,7 @@ void PostgreSQLReplicaConsumer::readTupleData( Int32 col_len = readInt32(message, pos, size); String value; - for (int i = 0; i < col_len; ++i) + for (Int16 i = 0; i < col_len; ++i) value += readInt8(message, pos, size); insertValue(buffer, value, column_idx); @@ -299,11 +300,10 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati readString(replication_message, pos, size, relation_namespace); readString(replication_message, pos, size, relation_name); - /// TODO: Save relation id (unique to tables) and check if they might be shuffled in current block. - /// If shuffled, store tables based on those id's and insert accordingly. table_to_insert = relation_name; tables_to_sync.insert(table_to_insert); + /// TODO: Add replica identity settings to metadata (needed for update) Int8 replica_identity = readInt8(replication_message, pos, size); Int16 num_columns = readInt16(replication_message, pos, size); @@ -311,10 +311,30 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati "INFO: relation id: {}, namespace: {}, relation name: {}, replica identity: {}, columns number: {}", relation_id, relation_namespace, relation_name, replica_identity, num_columns); - Int8 key; - Int32 data_type_id, type_modifier; + /// Cache table schema data to be able to detect schema changes, because ddl is not + /// replicated with postgresql logical replication protocol, but some table schema info + /// is received if it is the first time we received dml message for given relation in current session or + /// if relation definition has changed since the last relation definition message. + Int8 key; /// Flags. 0 or 1 (if part of the key). Not needed for now. + Int32 data_type_id; + Int32 type_modifier; /// For example, n in varchar(n) + + bool new_relation_definition = false; + if (relation_id_to_name.find(relation_id) == relation_id_to_name.end()) + { + relation_id_to_name.emplace(relation_id, relation_name); + schema_data.emplace(relation_id, SchemaData(num_columns)); + new_relation_definition = true; + } + + auto & current_schema_data = schema_data.find(relation_id)->second; + + if (current_schema_data.number_of_columns != num_columns) + { + markTableAsSkippedUntilReload(relation_id, relation_name); + break; + } - /// TODO: Check here if table structure has changed and, if possible, change table structure and redump table. for (uint16_t i = 0; i < num_columns; ++i) { String column_name; @@ -327,6 +347,20 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati LOG_DEBUG(log, "Key: {}, column name: {}, data type id: {}, type modifier: {}", key, column_name, data_type_id, type_modifier); + + if (new_relation_definition) + { + current_schema_data.column_identifiers.emplace_back(std::make_tuple(data_type_id, type_modifier)); + } + else + { + if (current_schema_data.column_identifiers[i].first != data_type_id + || current_schema_data.column_identifiers[i].second != type_modifier) + { + markTableAsSkippedUntilReload(relation_id, relation_name); + break; + } + } } if (storages.find(table_to_insert) == storages.end()) @@ -345,6 +379,10 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati case 'I': // Insert { Int32 relation_id = readInt32(replication_message, pos, size); + + if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + break; + Int8 new_tuple = readInt8(replication_message, pos, size); LOG_DEBUG(log, "relationID: {}, newTuple: {}, current insert table: {}", relation_id, new_tuple, table_to_insert); @@ -362,12 +400,17 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati case 'U': // Update { Int32 relation_id = readInt32(replication_message, pos, size); + + if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + break; + LOG_DEBUG(log, "relationID {}, current insert table {}", relation_id, table_to_insert); auto buffer = buffers.find(table_to_insert); auto proccess_identifier = [&](Int8 identifier) -> bool { - LOG_DEBUG(log, "Identifier: {}", identifier); + char id = identifier; + LOG_DEBUG(log, "Identifier: {}", id); bool read_next = true; switch (identifier) { @@ -407,6 +450,10 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati case 'D': // Delete { Int32 relation_id = readInt32(replication_message, pos, size); + + if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + break; + Int8 full_replica_identity = readInt8(replication_message, pos, size); LOG_DEBUG(log, "relationID: {}, full replica identity: {}", relation_id, full_replica_identity); @@ -424,6 +471,16 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati } +/// TODO: If some table has a changed structure, we can stop current stream (after remembering last valid WAL position) +/// and advance lsn up to this position. Then make changes to nested table and continue the same way. +void PostgreSQLReplicaConsumer::markTableAsSkippedUntilReload(Int32 relation_id, const String & relation_name) +{ + skip_until_reload.insert(relation_id); + auto & buffer = buffers.find(relation_name)->second; + buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); +} + + void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr tx) { for (const auto & table_name : tables_to_sync) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 23ab7e22f3e..170f85aef7a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -101,6 +101,8 @@ private: static Int16 readInt16(const char * message, size_t & pos, size_t size); static Int8 readInt8(const char * message, size_t & pos, size_t size); + void markTableAsSkippedUntilReload(Int32 relation_id, const String & relation_name); + Poco::Logger * log; std::shared_ptr context; const std::string replication_slot_name, publication_name; @@ -119,6 +121,20 @@ private: Storages storages; Buffers buffers; + + std::unordered_map relation_id_to_name; + + struct SchemaData + { + Int16 number_of_columns; + /// data_type_id and type_modifier + std::vector> column_identifiers; + + SchemaData(Int16 number_of_columns_) : number_of_columns(number_of_columns_) {} + }; + + std::unordered_map schema_data; + std::unordered_set skip_until_reload; }; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 1faf4924685..f6995355758 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -2,6 +2,7 @@ import pytest import time import psycopg2 import os.path as p +import random from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry @@ -15,6 +16,10 @@ postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( key Integer NOT NULL, value Integer, PRIMARY KEY(key)) """ +postgres_table_template_2 = """ + CREATE TABLE IF NOT EXISTS {} ( + key Integer NOT NULL, value1 Integer, value2 Integer, value3 Integer, PRIMARY KEY(key)) + """ def get_postgres_conn(database=False): if database == True: @@ -31,9 +36,9 @@ def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) -def create_postgres_table(cursor, table_name, replica_identity_full=False): +def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) - cursor.execute(postgres_table_template.format(table_name)) + cursor.execute(template.format(table_name)) if replica_identity_full: cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) @@ -83,6 +88,7 @@ def postgresql_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') +@pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -109,6 +115,7 @@ def test_load_and_sync_all_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') +@pytest.mark.timeout(120) def test_replicating_dml(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -151,6 +158,7 @@ def test_replicating_dml(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') +@pytest.mark.timeout(120) def test_different_data_types(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -190,6 +198,13 @@ def test_different_data_types(started_cluster): check_tables_are_synchronized('test_data_types', 'id'); result = instance.query('SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;') assert(result == '0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12\t2000-05-12\t0.20000\t0.20000\n') + + for i in range(10): + col = random.choice(['a', 'b', 'c']) + cursor.execute('UPDATE test_data_types SET {} = {};'.format(col, i)) + cursor.execute('''UPDATE test_data_types SET i = '2020-12-12';'''.format(col, i)) + + check_tables_are_synchronized('test_data_types', 'id'); cursor.execute('drop table test_data_types;') instance.query("INSERT INTO postgres_database.test_array_data_type " @@ -227,6 +242,7 @@ def test_different_data_types(started_cluster): assert(result == expected) +@pytest.mark.timeout(120) def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -279,6 +295,34 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') +@pytest.mark.timeout(120) +def test_table_schema_changes(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(50)".format(i, i, i, i)) + + instance.query( + "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + cursor.execute("ALTER TABLE postgresql_replica_3 DROP COLUMN value2") + + for i in range(NUM_TABLES): + cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) + cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) + + # Wait to check nothing breaks + time.sleep(5) + # TODO + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 0424770f6805fe6a2176fd16675d13620e3d3e21 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 19 Feb 2021 10:40:59 +0000 Subject: [PATCH 040/931] Handle ddl part 1 --- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 3 +- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 73 +++++------- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 13 +-- .../PostgreSQLReplicationHandler.cpp | 105 ++++++++++++++++-- .../PostgreSQL/PostgreSQLReplicationHandler.h | 15 ++- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 25 ++++- .../PostgreSQL/StoragePostgreSQLReplica.h | 4 +- .../test.py | 15 ++- 8 files changed, 167 insertions(+), 86 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 58fedc01e36..7ce2e47bb02 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -164,7 +164,6 @@ StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, con return table->second; return StoragePtr{}; - } @@ -177,6 +176,7 @@ void DatabasePostgreSQLReplica::createTable(const Context & context, const if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { Base::createTable(context, name, table, query); + /// TODO: Update table cached tables list or not return; } } @@ -188,6 +188,7 @@ void DatabasePostgreSQLReplica::createTable(const Context & context, const template void DatabasePostgreSQLReplica::dropTable(const Context & context, const String & name, bool no_delay) { + /// TODO: If called from non sync thread, add dropped storage to skip list Base::dropTable(context, name, no_delay); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index f0970ff994e..efc5c4614e7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -22,10 +22,6 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; } -static const auto reschedule_ms = 500; -static const auto max_thread_work_duration_ms = 60000; - - PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( std::shared_ptr context_, PostgreSQLConnectionPtr connection_, @@ -49,13 +45,10 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( { buffers.emplace(table_name, BufferData(storage)); } - - wal_reader_task = context->getSchedulePool().createTask("PostgreSQLReplicaWALReader", [this]{ synchronizationStream(); }); - wal_reader_task->deactivate(); } -void PostgreSQLReplicaConsumer::startSynchronization() +void PostgreSQLReplicaConsumer::readMetadata() { try { @@ -73,35 +66,6 @@ void PostgreSQLReplicaConsumer::startSynchronization() { tryLogCurrentException(__PRETTY_FUNCTION__); } - - wal_reader_task->activateAndSchedule(); -} - - -void PostgreSQLReplicaConsumer::stopSynchronization() -{ - stop_synchronization.store(true); - wal_reader_task->deactivate(); -} - - -void PostgreSQLReplicaConsumer::synchronizationStream() -{ - auto start_time = std::chrono::steady_clock::now(); - - while (!stop_synchronization) - { - if (!readFromReplicationSlot()) - break; - - auto end_time = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast(end_time - start_time); - if (duration.count() > max_thread_work_duration_ms) - break; - } - - if (!stop_synchronization) - wal_reader_task->scheduleAfter(reschedule_ms); } @@ -331,8 +295,8 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati if (current_schema_data.number_of_columns != num_columns) { - markTableAsSkippedUntilReload(relation_id, relation_name); - break; + markTableAsSkipped(relation_id, relation_name); + return; } for (uint16_t i = 0; i < num_columns; ++i) @@ -357,8 +321,8 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati if (current_schema_data.column_identifiers[i].first != data_type_id || current_schema_data.column_identifiers[i].second != type_modifier) { - markTableAsSkippedUntilReload(relation_id, relation_name); - break; + markTableAsSkipped(relation_id, relation_name); + return; } } } @@ -380,7 +344,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + if (skip_list.find(relation_id) != skip_list.end()) break; Int8 new_tuple = readInt8(replication_message, pos, size); @@ -401,7 +365,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + if (skip_list.find(relation_id) != skip_list.end()) break; LOG_DEBUG(log, "relationID {}, current insert table {}", relation_id, table_to_insert); @@ -451,7 +415,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_until_reload.find(relation_id) != skip_until_reload.end()) + if (skip_list.find(relation_id) != skip_list.end()) break; Int8 full_replica_identity = readInt8(replication_message, pos, size); @@ -471,13 +435,12 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati } -/// TODO: If some table has a changed structure, we can stop current stream (after remembering last valid WAL position) -/// and advance lsn up to this position. Then make changes to nested table and continue the same way. -void PostgreSQLReplicaConsumer::markTableAsSkippedUntilReload(Int32 relation_id, const String & relation_name) +void PostgreSQLReplicaConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name) { - skip_until_reload.insert(relation_id); + skip_list.insert(relation_id); auto & buffer = buffers.find(relation_name)->second; buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); + LOG_DEBUG(log, "Table {} is skipped temporarily", relation_name); } @@ -611,6 +574,20 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() return true; } + +bool PostgreSQLReplicaConsumer::consume(NameSet & skipped_tables) +{ + if (!readFromReplicationSlot() || !skip_list.empty()) + { + for (const auto & relation_id : skip_list) + skipped_tables.insert(relation_id_to_name[relation_id]); + + return false; + } + + return true; +} + } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 170f85aef7a..0f2062214c1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -10,6 +10,7 @@ #include "insertPostgreSQLValue.h" #include +#include #include #include #include @@ -19,6 +20,7 @@ namespace DB { +using NestedReloadFunc = std::function; class PostgreSQLReplicaConsumer { @@ -35,9 +37,9 @@ public: const size_t max_block_size_, Storages storages_); - void startSynchronization(); + void readMetadata(); - void stopSynchronization(); + bool consume(NameSet & skipped_tables); private: void synchronizationStream(); @@ -101,7 +103,7 @@ private: static Int16 readInt16(const char * message, size_t & pos, size_t size); static Int8 readInt8(const char * message, size_t & pos, size_t size); - void markTableAsSkippedUntilReload(Int32 relation_id, const String & relation_name); + void markTableAsSkipped(Int32 relation_id, const String & relation_name); Poco::Logger * log; std::shared_ptr context; @@ -116,9 +118,6 @@ private: std::string table_to_insert; std::unordered_set tables_to_sync; - BackgroundSchedulePool::TaskHolder wal_reader_task; - std::atomic stop_synchronization = false; - Storages storages; Buffers buffers; @@ -134,7 +133,7 @@ private: }; std::unordered_map schema_data; - std::unordered_set skip_until_reload; + std::unordered_set skip_list; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 440fda0347b..a6b1ca64330 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -15,6 +15,8 @@ namespace DB { static const auto reschedule_ms = 500; +static const auto max_thread_work_duration_ms = 60000; + PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, @@ -37,6 +39,9 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); startup_task->deactivate(); + + consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); + consumer_task->deactivate(); } @@ -77,8 +82,8 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() void PostgreSQLReplicationHandler::shutdown() { - if (consumer) - consumer->stopSynchronization(); + stop_synchronization.store(true); + consumer_task->deactivate(); } @@ -95,7 +100,7 @@ void PostgreSQLReplicationHandler::startSynchronization() auto initial_sync = [&]() { createReplicationSlot(ntx, start_lsn, snapshot_name); - loadFromSnapshot(snapshot_name); + loadFromSnapshot(snapshot_name, storages); }; /// Replication slot should be deleted with drop table only and created only once, reused after detach. @@ -124,7 +129,7 @@ void PostgreSQLReplicationHandler::startSynchronization() consumer = std::make_shared( context, - std::move(connection), + connection, replication_slot, publication_name, metadata_path, @@ -132,15 +137,15 @@ void PostgreSQLReplicationHandler::startSynchronization() max_block_size, nested_storages); - consumer->startSynchronization(); + consumer_task->activateAndSchedule(); replication_connection->conn()->close(); } -void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) +void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages) { - for (const auto & storage_data : storages) + for (const auto & storage_data : sync_storages) { try { @@ -159,18 +164,19 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) /// Already connected to needed database, no need to add it to query. query_str = fmt::format("SELECT * FROM {}", storage_data.first); + const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); + auto insert_context = storage_data.second->makeNestedTableContext(); + auto insert = std::make_shared(); insert->table_id = nested_storage->getStorageID(); - auto insert_context = storage_data.second->makeNestedTableContext(); InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); - const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); - PostgreSQLBlockInputStream input(tx, query_str, sample_block, DEFAULT_BLOCK_SIZE); + assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica load from snapshot"); copyData(input, *block_io.out); storage_data.second->setNestedLoaded(); @@ -188,6 +194,35 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name) } +void PostgreSQLReplicationHandler::consumerFunc() +{ + auto start_time = std::chrono::steady_clock::now(); + NameSet skipped_tables; + + while (!stop_synchronization) + { + bool reschedule = !consumer->consume(skipped_tables); + + if (!skipped_tables.empty()) + { + reloadFromSnapshot(skipped_tables); + skipped_tables.clear(); + } + + if (reschedule) + break; + + auto end_time = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + if (duration.count() > max_thread_work_duration_ms) + break; + } + + if (!stop_synchronization) + consumer_task->scheduleAfter(reschedule_ms); +} + + bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) { std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); @@ -258,9 +293,16 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, } -void PostgreSQLReplicationHandler::createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name) +void PostgreSQLReplicationHandler::createReplicationSlot( + NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name, bool temporary) { - std::string query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot); + std::string query_str; + + if (!temporary) + query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot); + else + query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot + "_tmp"); + try { pqxx::result result{ntx->exec(query_str)}; @@ -342,6 +384,45 @@ PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( return fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true); } + +/// TODO: After temporary replication slot is created, we have a start lsn. In replication stream +/// when get message for a table and this table turn out to be in a skip list, check +/// if current lsn position is >= start lsn position for skipped table. If so, we can +/// remove this table fromm skip list and consume changes without any loss. +std::string PostgreSQLReplicationHandler::reloadFromSnapshot(NameSet & table_names) +{ + String start_lsn; + try + { + auto tx = std::make_shared(*connection->conn()); + Storages sync_storages; + for (const auto & table_name : table_names) + { + auto storage = storages[table_name]; + sync_storages[table_name] = storage; + storage->dropNested(); + } + tx->commit(); + + auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection_str)); + replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); + + auto ntx = std::make_shared(*replication_connection->conn()); + std::string snapshot_name; + createReplicationSlot(ntx, start_lsn, snapshot_name, true); + ntx->commit(); + + loadFromSnapshot(snapshot_name, sync_storages); + + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + + return start_lsn; +} + } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 5d973ca34fe..bf7b80bbe9b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -42,6 +42,7 @@ public: private: using NontransactionPtr = std::shared_ptr; + using Storages = std::unordered_map; bool isPublicationExist(std::shared_ptr tx); @@ -49,7 +50,7 @@ private: void createPublicationIfNeeded(PostgreSQLConnection::ConnectionPtr connection_); - void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name); + void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); @@ -59,10 +60,14 @@ private: void startSynchronization(); - void loadFromSnapshot(std::string & snapshot_name); + void consumerFunc(); + + void loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages); std::unordered_set fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_); + std::string reloadFromSnapshot(NameSet & table_names); + Poco::Logger * log; std::shared_ptr context; const std::string database_name, connection_str, metadata_path; @@ -72,11 +77,11 @@ private: PostgreSQLConnectionPtr connection; std::shared_ptr consumer; - BackgroundSchedulePool::TaskHolder startup_task; - std::atomic tables_loaded = false; + BackgroundSchedulePool::TaskHolder startup_task, consumer_task; + std::atomic tables_loaded = false, stop_synchronization = false; bool new_publication_created = false; - std::unordered_map storages; + Storages storages; std::unordered_map nested_storages; }; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index b4b25c3eae9..db3b9afed07 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -172,7 +172,7 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function

getColumns(); NamesAndTypesList ordinary_columns_and_types; - if (!columns.empty()) + if (!is_postgresql_replica_database) { ordinary_columns_and_types = columns.getOrdinary(); } @@ -243,16 +243,26 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function

& fetch_table_structure) { - nested_storage = tryGetNested(); + if (nested_loaded) + { + nested_storage = tryGetNested(); - if (nested_storage) - return; + if (nested_storage) + return; + } auto context = makeNestedTableContext(); const auto ast_create = getCreateNestedTableQuery(fetch_table_structure); - InterpreterCreateQuery interpreter(ast_create, context); - interpreter.execute(); + try + { + InterpreterCreateQuery interpreter(ast_create, context); + interpreter.execute(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } nested_storage = getNested(); } @@ -337,6 +347,9 @@ void StoragePostgreSQLReplica::dropNested() auto context = makeNestedTableContext(); auto interpreter = InterpreterDropQuery(ast_drop, context); interpreter.execute(); + + nested_loaded.store(false); + nested_storage = nullptr; } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index a0e27ef046d..d2bb80307fc 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -67,6 +67,8 @@ public: bool isNestedLoaded() { return nested_loaded.load(); } + void dropNested(); + protected: StoragePostgreSQLReplica( const StorageID & table_id_, @@ -87,8 +89,6 @@ private: std::string getNestedTableName() const; - void dropNested(); - std::string remote_table_name; std::shared_ptr global_context; diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index f6995355758..b7ba810f7a2 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -304,23 +304,28 @@ def test_table_schema_changes(started_cluster): for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(50)".format(i, i, i, i)) + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) instance.query( "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + for i in range(NUM_TABLES): + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - cursor.execute("ALTER TABLE postgresql_replica_3 DROP COLUMN value2") + expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); + cursor.execute("ALTER TABLE postgresql_replica_4 DROP COLUMN value2") for i in range(NUM_TABLES): cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) - # Wait to check nothing breaks - time.sleep(5) - # TODO + time.sleep(4) + print("Sync check") + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); if __name__ == '__main__': From d0d90538ea8d3d7405791a2d3c5699472f0f76f2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Feb 2021 18:04:10 +0000 Subject: [PATCH 041/931] Tiny fix --- src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index db3b9afed07..f85b9510f50 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -336,6 +336,8 @@ void StoragePostgreSQLReplica::shutdownFinal() void StoragePostgreSQLReplica::dropNested() { + nested_loaded.store(false); + auto table_id = nested_storage->getStorageID(); auto ast_drop = std::make_shared(); @@ -348,7 +350,6 @@ void StoragePostgreSQLReplica::dropNested() auto interpreter = InterpreterDropQuery(ast_drop, context); interpreter.execute(); - nested_loaded.store(false); nested_storage = nullptr; } From 883cc2c0efdca82fc9a324ec003f274c9aeaba83 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Feb 2021 18:37:54 +0000 Subject: [PATCH 042/931] Fixes --- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp | 2 +- .../PostgreSQL/PostgreSQLReplicationHandler.cpp | 2 +- src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp | 11 +++++++---- src/Storages/PostgreSQL/StoragePostgreSQLReplica.h | 1 + 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index efc5c4614e7..aecbb7b3bec 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -181,7 +181,7 @@ void PostgreSQLReplicaConsumer::readTupleData( Int32 col_len = readInt32(message, pos, size); String value; - for (Int16 i = 0; i < col_len; ++i) + for (Int32 i = 0; i < col_len; ++i) value += readInt8(message, pos, size); insertValue(buffer, value, column_idx); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index a6b1ca64330..c95b4bacad9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -398,7 +398,7 @@ std::string PostgreSQLReplicationHandler::reloadFromSnapshot(NameSet & table_nam Storages sync_storages; for (const auto & table_name : table_names) { - auto storage = storages[table_name]; + auto * storage = storages[table_name]; sync_storages[table_name] = storage; storage->dropNested(); } diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index f85b9510f50..c54f9a31cd4 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -336,7 +336,8 @@ void StoragePostgreSQLReplica::shutdownFinal() void StoragePostgreSQLReplica::dropNested() { - nested_loaded.store(false); + std::lock_guard lock(nested_mutex); + nested_loaded = false; auto table_id = nested_storage->getStorageID(); auto ast_drop = std::make_shared(); @@ -373,7 +374,9 @@ Pipe StoragePostgreSQLReplica::read( unsigned num_streams) { /// If initial table sync has not yet finished, nested tables might not be created yet. - if (!nested_loaded) + /// Or nested table might be attempted to get dropped. (Second mutex lock in dropNested()). + std::unique_lock lock(nested_mutex, std::defer_lock); + if (!nested_loaded || !lock.try_lock()) { LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Table {} is not loaded yet", getNestedTableName()); return Pipe(); @@ -383,7 +386,7 @@ Pipe StoragePostgreSQLReplica::read( if (!nested_storage) getNested(); - auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto storage_lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); @@ -428,7 +431,7 @@ Pipe StoragePostgreSQLReplica::read( nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); - pipe.addTableLock(lock); + pipe.addTableLock(storage_lock); if (!expressions->children.empty() && !pipe.empty()) { diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h index d2bb80307fc..4d407f337ad 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h @@ -97,6 +97,7 @@ private: std::atomic nested_loaded = false; StoragePtr nested_storage; + std::mutex nested_mutex; bool is_postgresql_replica_database = false; }; From 427aad80a14734e52f79a4ee3ba4e8607bef092c Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Feb 2021 21:28:14 +0000 Subject: [PATCH 043/931] Avoid table does not exist errors if nested is unavailable --- .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 5 +- .../PostgreSQL/DatabasePostgreSQLReplica.h | 1 - .../PostgreSQL/StoragePostgreSQLReplica.cpp | 118 +++++++++--------- .../test.py | 4 +- 4 files changed, 64 insertions(+), 64 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 7ce2e47bb02..35d808c0dec 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -88,6 +88,7 @@ void DatabasePostgreSQLReplica::startSynchronization() : (global_context.getSettingsRef().max_insert_block_size.value), global_context.getMacros()->expand(settings->postgresql_tables_list.value)); + /// TODO: may be no need to always fetch std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->conn()); for (const auto & table_name : tables_to_replicate) @@ -160,7 +161,9 @@ StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, con } auto table = tables.find(name); - if (table != tables.end() && table->second->as()->isNestedLoaded()) + /// Here it is possible that nested table is temporarily out of reach, but return storage anyway, + /// it will not allow to read if nested is unavailable at the moment + if (table != tables.end()) return table->second; return StoragePtr{}; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h index 24763e697e6..d8cb2ff5a6d 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h @@ -58,7 +58,6 @@ public: void shutdown() override; - private: void startSynchronization(); diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index c54f9a31cd4..0f895c21ae4 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -352,6 +352,7 @@ void StoragePostgreSQLReplica::dropNested() interpreter.execute(); nested_storage = nullptr; + LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Dropped (or temporarily) nested table {}", getNestedTableName()); } @@ -373,79 +374,78 @@ Pipe StoragePostgreSQLReplica::read( size_t max_block_size, unsigned num_streams) { - /// If initial table sync has not yet finished, nested tables might not be created yet. - /// Or nested table might be attempted to get dropped. (Second mutex lock in dropNested()). + /// TODO: are there other places where this lock is needed std::unique_lock lock(nested_mutex, std::defer_lock); - if (!nested_loaded || !lock.try_lock()) + + if (nested_loaded && lock.try_lock()) { - LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Table {} is not loaded yet", getNestedTableName()); - return Pipe(); - } + if (!nested_storage) + getNested(); - /// Should throw if there is no nested storage - if (!nested_storage) - getNested(); + auto storage_lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - auto storage_lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); + Block nested_header = nested_metadata->getSampleBlock(); - const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); + ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); + ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); - Block nested_header = nested_metadata->getSampleBlock(); - ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); - ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); + NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - - if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) - { - auto & tables_in_select_query = select_query->tables()->as(); - - if (!tables_in_select_query.children.empty()) + if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) { - auto & tables_element = tables_in_select_query.children[0]->as(); + auto & tables_in_select_query = select_query->tables()->as(); - if (tables_element.table_expression) - tables_element.table_expression->as().final = true; + if (!tables_in_select_query.children.empty()) + { + auto & tables_element = tables_in_select_query.children[0]->as(); + + if (tables_element.table_expression) + tables_element.table_expression->as().final = true; + } } - } - String filter_column_name; - Names require_columns_name = column_names; - ASTPtr expressions = std::make_shared(); - if (column_names_set.empty() || !column_names_set.count(sign_column.name)) - { - require_columns_name.emplace_back(sign_column.name); - - const auto & sign_column_name = std::make_shared(sign_column.name); - const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); - - expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); - filter_column_name = expressions->children.back()->getColumnName(); - - for (const auto & column_name : column_names) - expressions->children.emplace_back(std::make_shared(column_name)); - } - - Pipe pipe = nested_storage->read( - require_columns_name, - nested_metadata, query_info, context, - processed_stage, max_block_size, num_streams); - - pipe.addTableLock(storage_lock); - - if (!expressions->children.empty() && !pipe.empty()) - { - Block pipe_header = pipe.getHeader(); - auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); - ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); - - pipe.addSimpleTransform([&](const Block & header) + String filter_column_name; + Names require_columns_name = column_names; + ASTPtr expressions = std::make_shared(); + if (column_names_set.empty() || !column_names_set.count(sign_column.name)) { - return std::make_shared(header, expression_actions, filter_column_name, false); - }); + require_columns_name.emplace_back(sign_column.name); + + const auto & sign_column_name = std::make_shared(sign_column.name); + const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); + + expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); + filter_column_name = expressions->children.back()->getColumnName(); + + for (const auto & column_name : column_names) + expressions->children.emplace_back(std::make_shared(column_name)); + } + + Pipe pipe = nested_storage->read( + require_columns_name, + nested_metadata, query_info, context, + processed_stage, max_block_size, num_streams); + + pipe.addTableLock(storage_lock); + + if (!expressions->children.empty() && !pipe.empty()) + { + Block pipe_header = pipe.getHeader(); + auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); + ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); + + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, expression_actions, filter_column_name, false); + }); + } + + return pipe; } - return pipe; + LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Nested table {} is unavailable or is not loaded yet", getNestedTableName()); + return Pipe(); } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index b7ba810f7a2..1b6887ace51 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -316,14 +316,12 @@ def test_table_schema_changes(started_cluster): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); - cursor.execute("ALTER TABLE postgresql_replica_4 DROP COLUMN value2") + cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(random.randint(0, 4))) for i in range(NUM_TABLES): cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) - time.sleep(4) - print("Sync check") for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); From 2e3bdd662e87c2e87970acd1aa2655773117802f Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 21 Feb 2021 22:41:18 +0000 Subject: [PATCH 044/931] Handle ddl part 2 --- .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 130 ++++++++++++++---- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 73 ++++++---- .../PostgreSQLReplicationHandler.cpp | 59 +++++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 8 +- .../test.py | 13 +- 5 files changed, 197 insertions(+), 86 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index aecbb7b3bec..9c76db7c0e6 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -43,11 +43,36 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( { for (const auto & [table_name, storage] : storages) { - buffers.emplace(table_name, BufferData(storage)); + buffers.emplace(table_name, Buffer(storage)); } } +void PostgreSQLReplicaConsumer::Buffer::fillBuffer(StoragePtr storage) +{ + const auto storage_metadata = storage->getInMemoryMetadataPtr(); + description.init(storage_metadata->getSampleBlock()); + + columns = description.sample_block.cloneEmptyColumns(); + const auto & storage_columns = storage_metadata->getColumns().getAllPhysical(); + auto insert_columns = std::make_shared(); + + assert(description.sample_block.columns() == storage_columns.size()); + size_t idx = 0; + + for (const auto & column : storage_columns) + { + if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) + preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); + idx++; + + insert_columns->children.emplace_back(std::make_shared(column.name)); + } + + columnsAST = std::move(insert_columns); +} + + void PostgreSQLReplicaConsumer::readMetadata() { try @@ -69,7 +94,7 @@ void PostgreSQLReplicaConsumer::readMetadata() } -void PostgreSQLReplicaConsumer::insertValue(BufferData & buffer, const std::string & value, size_t column_idx) +void PostgreSQLReplicaConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); bool is_nullable = buffer.description.types[column_idx].second; @@ -95,7 +120,7 @@ void PostgreSQLReplicaConsumer::insertValue(BufferData & buffer, const std::stri } -void PostgreSQLReplicaConsumer::insertDefaultValue(BufferData & buffer, size_t column_idx) +void PostgreSQLReplicaConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column); @@ -160,7 +185,7 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos, [ void PostgreSQLReplicaConsumer::readTupleData( - BufferData & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) + Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos, size); LOG_DEBUG(log, "number of columns {}", num_columns); @@ -251,6 +276,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp final_lsn = current_lsn; + LOG_DEBUG(log, "Commit lsn: {}", getLSNValue(current_lsn)); break; } case 'O': // Origin @@ -275,18 +301,17 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati "INFO: relation id: {}, namespace: {}, relation name: {}, replica identity: {}, columns number: {}", relation_id, relation_namespace, relation_name, replica_identity, num_columns); - /// Cache table schema data to be able to detect schema changes, because ddl is not - /// replicated with postgresql logical replication protocol, but some table schema info - /// is received if it is the first time we received dml message for given relation in current session or - /// if relation definition has changed since the last relation definition message. + if (!isSyncAllowed(relation_id)) + return; + Int8 key; /// Flags. 0 or 1 (if part of the key). Not needed for now. Int32 data_type_id; Int32 type_modifier; /// For example, n in varchar(n) bool new_relation_definition = false; - if (relation_id_to_name.find(relation_id) == relation_id_to_name.end()) + if (schema_data.find(relation_id) == schema_data.end()) { - relation_id_to_name.emplace(relation_id, relation_name); + relation_id_to_name[relation_id] = relation_name; schema_data.emplace(relation_id, SchemaData(num_columns)); new_relation_definition = true; } @@ -344,8 +369,8 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_list.find(relation_id) != skip_list.end()) - break; + if (!isSyncAllowed(relation_id)) + return; Int8 new_tuple = readInt8(replication_message, pos, size); @@ -365,11 +390,11 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_list.find(relation_id) != skip_list.end()) - break; - LOG_DEBUG(log, "relationID {}, current insert table {}", relation_id, table_to_insert); + if (!isSyncAllowed(relation_id)) + return; + auto buffer = buffers.find(table_to_insert); auto proccess_identifier = [&](Int8 identifier) -> bool { @@ -415,8 +440,8 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati { Int32 relation_id = readInt32(replication_message, pos, size); - if (skip_list.find(relation_id) != skip_list.end()) - break; + if (!isSyncAllowed(relation_id)) + return; Int8 full_replica_identity = readInt8(replication_message, pos, size); @@ -435,15 +460,6 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati } -void PostgreSQLReplicaConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name) -{ - skip_list.insert(relation_id); - auto & buffer = buffers.find(relation_name)->second; - buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); - LOG_DEBUG(log, "Table {} is skipped temporarily", relation_name); -} - - void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr tx) { for (const auto & table_name : tables_to_sync) @@ -505,7 +521,39 @@ String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptrsecond; + if (table_start_lsn.empty()) + return false; + + if (getLSNValue(current_lsn) >= getLSNValue(table_start_lsn)) + { + skip_list.erase(table_with_lsn); + LOG_DEBUG(log, "Sync is allowed for relation id: {}", relation_id); + + return true; + } + + return false; +} + + +void PostgreSQLReplicaConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name) +{ + skip_list.insert({relation_id, ""}); + schema_data.erase(relation_id); + auto & buffer = buffers.find(relation_name)->second; + buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); + LOG_DEBUG(log, "Table {} is skipped temporarily", relation_name); +} + + +/// Read binary changes from replication slot via COPY command (starting from current lsn in a slot). bool PostgreSQLReplicaConsumer::readFromReplicationSlot() { std::shared_ptr tx; @@ -545,6 +593,7 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() slot_empty = false; current_lsn = (*row)[0]; + LOG_DEBUG(log, "Current lsn: {}", getLSNValue(current_lsn)); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } @@ -575,12 +624,15 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() } -bool PostgreSQLReplicaConsumer::consume(NameSet & skipped_tables) +bool PostgreSQLReplicaConsumer::consume(std::vector> & skipped_tables) { if (!readFromReplicationSlot() || !skip_list.empty()) { - for (const auto & relation_id : skip_list) - skipped_tables.insert(relation_id_to_name[relation_id]); + for (const auto & [relation_id, lsn] : skip_list) + { + if (lsn.empty()) + skipped_tables.emplace_back(std::make_pair(relation_id, relation_id_to_name[relation_id])); + } return false; } @@ -588,6 +640,24 @@ bool PostgreSQLReplicaConsumer::consume(NameSet & skipped_tables) return true; } + +void PostgreSQLReplicaConsumer::updateNested(const String & table_name, StoragePtr nested_storage) +{ + storages[table_name] = nested_storage; + auto & buffer = buffers.find(table_name)->second; + buffer.fillBuffer(nested_storage); +} + + +void PostgreSQLReplicaConsumer::updateSkipList(const std::unordered_map & tables_with_lsn) +{ + for (const auto & [relation_id, lsn] : tables_with_lsn) + { + if (!lsn.empty()) + skip_list[relation_id] = lsn; /// start_lsn + } +} + } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 0f2062214c1..f8b214db4b7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -18,9 +18,13 @@ #include "pqxx/pqxx" // Y_IGNORE +/// TODO: There is ALTER PUBLICATION command to dynamically add and remove tables for replicating (the command is transactional). +/// This can also be supported. (Probably, if in a replication stream comes a relation name, which does not currenly +/// exist in CH, it can be loaded from snapshot and handled the same way as some ddl by comparing lsn positions of wal, +/// but there is the case that a known table has been just renamed, then the previous version might be just dropped by user). + namespace DB { -using NestedReloadFunc = std::function; class PostgreSQLReplicaConsumer { @@ -39,7 +43,11 @@ public: void readMetadata(); - bool consume(NameSet & skipped_tables); + bool consume(std::vector> & skipped_tables); + + void updateNested(const String & table_name, StoragePtr nested_table); + + void updateSkipList(const std::unordered_map & tables_with_lsn); private: void synchronizationStream(); @@ -52,7 +60,9 @@ private: void processReplicationMessage(const char * replication_message, size_t size); - struct BufferData + bool isSyncAllowed(Int32 relation_id); + + struct Buffer { ExternalResultDescription description; MutableColumns columns; @@ -60,33 +70,14 @@ private: /// Needed for insertPostgreSQLValue() method to parse array std::unordered_map array_info; - BufferData(StoragePtr storage) - { - const auto storage_metadata = storage->getInMemoryMetadataPtr(); - description.init(storage_metadata->getSampleBlock()); - columns = description.sample_block.cloneEmptyColumns(); - const auto & storage_columns = storage_metadata->getColumns().getAllPhysical(); - auto insert_columns = std::make_shared(); - size_t idx = 0; - assert(description.sample_block.columns() == storage_columns.size()); - - for (const auto & column : storage_columns) - { - if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray) - preparePostgreSQLArrayInfo(array_info, idx, description.sample_block.getByPosition(idx).type); - idx++; - - insert_columns->children.emplace_back(std::make_shared(column.name)); - } - - columnsAST = std::move(insert_columns); - } + Buffer(StoragePtr storage) { fillBuffer(storage); } + void fillBuffer(StoragePtr storage); }; - using Buffers = std::unordered_map; + using Buffers = std::unordered_map; - static void insertDefaultValue(BufferData & buffer, size_t column_idx); - static void insertValue(BufferData & buffer, const std::string & value, size_t column_idx); + static void insertDefaultValue(Buffer & buffer, size_t column_idx); + static void insertValue(Buffer & buffer, const std::string & value, size_t column_idx); enum class PostgreSQLQuery { @@ -95,7 +86,7 @@ private: DELETE }; - void readTupleData(BufferData & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false); + void readTupleData(Buffer & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false); static void readString(const char * message, size_t & pos, size_t size, String & result); static Int64 readInt64(const char * message, size_t & pos, size_t size); @@ -105,6 +96,14 @@ private: void markTableAsSkipped(Int32 relation_id, const String & relation_name); + /// lsn - log sequnce nuumber, like wal offset (64 bit). + Int64 getLSNValue(const std::string & lsn) + { + Int64 upper_half, lower_half; + std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); + return (upper_half << 32) + lower_half; + } + Poco::Logger * log; std::shared_ptr context; const std::string replication_slot_name, publication_name; @@ -116,6 +115,8 @@ private: const size_t max_block_size; std::string table_to_insert; + + /// List of tables which need to be synced after last replication stream. std::unordered_set tables_to_sync; Storages storages; @@ -132,8 +133,22 @@ private: SchemaData(Int16 number_of_columns_) : number_of_columns(number_of_columns_) {} }; + /// Cache for table schema data to be able to detect schema changes, because ddl is not + /// replicated with postgresql logical replication protocol, but some table schema info + /// is received if it is the first time we received dml message for given relation in current session or + /// if relation definition has changed since the last relation definition message. std::unordered_map schema_data; - std::unordered_set skip_list; + + /// skip_list contains relation ids for tables on which ddl was perfomed, which can break synchronization. + /// This breaking changes are detected in replication stream in according replication message and table is added to skip list. + /// After it is finished, a temporary replication slot is created with 'export snapshot' option, and start_lsn is returned. + /// Skipped tables are reloaded from snapshot (nested tables are also updated). Afterwards, if a replication message is + /// related to a table in a skip_list, we compare current lsn with start_lsn, which was returned with according snapshot. + /// If current_lsn >= table_start_lsn, we can safely remove table from skip list and continue its synchronization. + std::unordered_map skip_list; + + /// Mapping from table name which is currently in a skip_list to a table_start_lsn for future comparison with current_lsn. + //NameToNameMap start_lsn_for_skipped; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c95b4bacad9..83afe658661 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -120,8 +120,15 @@ void PostgreSQLReplicationHandler::startSynchronization() { for (const auto & [table_name, storage] : storages) { - nested_storages[table_name] = storage->getNested(); - storage->setNestedLoaded(); + try + { + nested_storages[table_name] = storage->getNested(); + storage->setNestedLoaded(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } } @@ -143,13 +150,15 @@ void PostgreSQLReplicationHandler::startSynchronization() } -void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages) +NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages) { + NameSet success_tables; for (const auto & storage_data : sync_storages) { try { auto tx = std::make_shared(*connection->conn()); + const auto & table_name = storage_data.first; /// Specific isolation level is required to read from snapshot. tx->set_variable("transaction_isolation", "'repeatable read'"); @@ -157,7 +166,7 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); tx->exec(query_str); - storage_data.second->createNestedIfNeeded([&]() { return fetchTableStructure(tx, storage_data.first); }); + storage_data.second->createNestedIfNeeded([&]() { return fetchTableStructure(tx, table_name); }); auto nested_storage = storage_data.second->getNested(); /// Load from snapshot, which will show table state before creation of replication slot. @@ -180,7 +189,12 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, copyData(input, *block_io.out); storage_data.second->setNestedLoaded(); - nested_storages[storage_data.first] = nested_storage; + nested_storages[table_name] = nested_storage; + + /// This is needed if this method is called from reloadFromSnapshot() method below. + success_tables.insert(table_name); + if (consumer) + consumer->updateNested(table_name, nested_storage); } catch (Exception & e) { @@ -191,23 +205,21 @@ void PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, } LOG_DEBUG(log, "Table dump end"); + return success_tables; } void PostgreSQLReplicationHandler::consumerFunc() { auto start_time = std::chrono::steady_clock::now(); - NameSet skipped_tables; + std::vector> skipped_tables; while (!stop_synchronization) { bool reschedule = !consumer->consume(skipped_tables); if (!skipped_tables.empty()) - { - reloadFromSnapshot(skipped_tables); - skipped_tables.clear(); - } + consumer->updateSkipList(reloadFromSnapshot(skipped_tables)); if (reschedule) break; @@ -350,7 +362,7 @@ void PostgreSQLReplicationHandler::shutdownFinal() } -std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_) +NameSet PostgreSQLReplicationHandler::fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_) { if (tables_list.empty()) { @@ -364,7 +376,7 @@ std::unordered_set PostgreSQLReplicationHandler::fetchRequiredTable } -std::unordered_set PostgreSQLReplicationHandler::fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_) +NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_) { std::string query = fmt::format("SELECT tablename FROM pg_publication_tables WHERE pubname = '{}'", publication_name); std::unordered_set tables; @@ -385,19 +397,17 @@ PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( } -/// TODO: After temporary replication slot is created, we have a start lsn. In replication stream -/// when get message for a table and this table turn out to be in a skip list, check -/// if current lsn position is >= start lsn position for skipped table. If so, we can -/// remove this table fromm skip list and consume changes without any loss. -std::string PostgreSQLReplicationHandler::reloadFromSnapshot(NameSet & table_names) +std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapshot( + const std::vector> & relation_data) { - String start_lsn; + std::unordered_map tables_start_lsn; try { auto tx = std::make_shared(*connection->conn()); Storages sync_storages; - for (const auto & table_name : table_names) + for (const auto & relation : relation_data) { + const auto & table_name = relation.second; auto * storage = storages[table_name]; sync_storages[table_name] = storage; storage->dropNested(); @@ -408,19 +418,24 @@ std::string PostgreSQLReplicationHandler::reloadFromSnapshot(NameSet & table_nam replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); auto ntx = std::make_shared(*replication_connection->conn()); - std::string snapshot_name; + std::string snapshot_name, start_lsn; createReplicationSlot(ntx, start_lsn, snapshot_name, true); ntx->commit(); - loadFromSnapshot(snapshot_name, sync_storages); + auto success_tables = loadFromSnapshot(snapshot_name, sync_storages); + for (const auto & relation : relation_data) + { + if (success_tables.find(relation.second) != success_tables.end()) + tables_start_lsn[relation.first] = start_lsn; + } } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } - return start_lsn; + return tables_start_lsn; } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index bf7b80bbe9b..f428ed9720a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -36,7 +36,7 @@ public: void addStorage(const std::string & table_name, StoragePostgreSQLReplica * storage); - std::unordered_set fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); + NameSet fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); PostgreSQLTableStructure fetchTableStructure(std::shared_ptr tx, const std::string & table_name); @@ -62,11 +62,11 @@ private: void consumerFunc(); - void loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages); + NameSet loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages); - std::unordered_set fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_); + NameSet fetchTablesFromPublication(PostgreSQLConnection::ConnectionPtr connection_); - std::string reloadFromSnapshot(NameSet & table_names); + std::unordered_map reloadFromSnapshot(const std::vector> & relation_data); Poco::Logger * log; std::shared_ptr context; diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 1b6887ace51..ec82a1050c2 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -316,7 +316,9 @@ def test_table_schema_changes(started_cluster): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); - cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(random.randint(0, 4))) + + altered_table = random.randint(0, 4) + cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) for i in range(NUM_TABLES): cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) @@ -325,6 +327,15 @@ def test_table_schema_changes(started_cluster): for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + for i in range(NUM_TABLES): + if i != altered_table: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) + else: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + if __name__ == '__main__': cluster.start() From 8a48bb24ce683c843f9fe85b2c1f98f698bac0c0 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 21 Feb 2021 23:13:58 +0000 Subject: [PATCH 045/931] Fix typos, comments --- src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp | 3 --- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h | 4 ++-- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 4 ++-- src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp | 1 - 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 35d808c0dec..9105dc9ba25 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -88,7 +88,6 @@ void DatabasePostgreSQLReplica::startSynchronization() : (global_context.getSettingsRef().max_insert_block_size.value), global_context.getMacros()->expand(settings->postgresql_tables_list.value)); - /// TODO: may be no need to always fetch std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->conn()); for (const auto & table_name : tables_to_replicate) @@ -179,7 +178,6 @@ void DatabasePostgreSQLReplica::createTable(const Context & context, const if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { Base::createTable(context, name, table, query); - /// TODO: Update table cached tables list or not return; } } @@ -191,7 +189,6 @@ void DatabasePostgreSQLReplica::createTable(const Context & context, const template void DatabasePostgreSQLReplica::dropTable(const Context & context, const String & name, bool no_delay) { - /// TODO: If called from non sync thread, add dropped storage to skip list Base::dropTable(context, name, no_delay); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index f8b214db4b7..55f8a949cd1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -19,7 +19,7 @@ /// TODO: There is ALTER PUBLICATION command to dynamically add and remove tables for replicating (the command is transactional). -/// This can also be supported. (Probably, if in a replication stream comes a relation name, which does not currenly +/// This can also be supported. (Probably, if in a replication stream comes a relation name, which does not currently /// exist in CH, it can be loaded from snapshot and handled the same way as some ddl by comparing lsn positions of wal, /// but there is the case that a known table has been just renamed, then the previous version might be just dropped by user). @@ -139,7 +139,7 @@ private: /// if relation definition has changed since the last relation definition message. std::unordered_map schema_data; - /// skip_list contains relation ids for tables on which ddl was perfomed, which can break synchronization. + /// skip_list contains relation ids for tables on which ddl was performed, which can break synchronization. /// This breaking changes are detected in replication stream in according replication message and table is added to skip list. /// After it is finished, a temporary replication slot is created with 'export snapshot' option, and start_lsn is returned. /// Skipped tables are reloaded from snapshot (nested tables are also updated). Afterwards, if a replication message is diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 83afe658661..eccaa3c7acf 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -420,9 +420,9 @@ std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapsh auto ntx = std::make_shared(*replication_connection->conn()); std::string snapshot_name, start_lsn; createReplicationSlot(ntx, start_lsn, snapshot_name, true); - ntx->commit(); - + /// This snapshot is valid up to the end of the transaction, which exported it. auto success_tables = loadFromSnapshot(snapshot_name, sync_storages); + ntx->commit(); for (const auto & relation : relation_data) { diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 0f895c21ae4..4b2e746a557 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -374,7 +374,6 @@ Pipe StoragePostgreSQLReplica::read( size_t max_block_size, unsigned num_streams) { - /// TODO: are there other places where this lock is needed std::unique_lock lock(nested_mutex, std::defer_lock); if (nested_loaded && lock.try_lock()) From ace76bb7fa32133c26264ac693581b98d41f6305 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 22 Feb 2021 12:35:53 +0000 Subject: [PATCH 046/931] Better, remove debug logs --- src/Core/Settings.h | 1 + .../PostgreSQL/DatabasePostgreSQLReplica.cpp | 7 +- src/Interpreters/InterpreterCreateQuery.cpp | 6 + .../PostgreSQL/PostgreSQLReplicaConsumer.cpp | 287 ++++++++++-------- .../PostgreSQL/PostgreSQLReplicaConsumer.h | 18 +- .../PostgreSQL/PostgreSQLReplicaSettings.h | 5 +- .../PostgreSQLReplicationHandler.cpp | 35 ++- .../PostgreSQL/PostgreSQLReplicationHandler.h | 5 +- .../PostgreSQL/StoragePostgreSQLReplica.cpp | 9 +- .../configs/users.xml | 8 + .../test.py | 41 ++- .../test_storage_postgresql_replica/test.py | 25 +- 12 files changed, 264 insertions(+), 183 deletions(-) create mode 100644 tests/integration/test_postgresql_replica_database_engine/configs/users.xml diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2d7b7811390..aaf5da3bab7 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -402,6 +402,7 @@ class IColumn; M(Bool, cast_keep_nullable, false, "CAST operator keep Nullable for result data type", 0) \ M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ + M(Bool, allow_experimental_database_postgresql_replica, false, "Allow to create database with Engine=PostgreSQLReplica(...).", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp index 9105dc9ba25..2b491e62fab 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp @@ -83,10 +83,11 @@ void DatabasePostgreSQLReplica::startSynchronization() connection->conn_str(), metadata_path + METADATA_SUFFIX, std::make_shared(global_context), - settings->postgresql_max_block_size.changed - ? settings->postgresql_max_block_size.value + settings->postgresql_replica_max_block_size.changed + ? settings->postgresql_replica_max_block_size.value : (global_context.getSettingsRef().max_insert_block_size.value), - global_context.getMacros()->expand(settings->postgresql_tables_list.value)); + settings->postgresql_replica_allow_minimal_ddl, true, + settings->postgresql_replica_tables_list.value); std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->conn()); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 2b1dddde78c..46af167eaa4 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -214,6 +214,12 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) "Enable allow_experimental_database_replicated to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } + if (create.storage->engine->name == "PostgreSQLReplica" && !context.getSettingsRef().allow_experimental_database_postgresql_replica && !internal) + { + throw Exception("PostgreSQLReplica is an experimental database engine. " + "Enable allow_experimental_database_postgresql_replica to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); + } + DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", context); if (create.uuid != UUIDHelpers::Nil) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp index 9c76db7c0e6..91e48e9c358 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp @@ -30,6 +30,8 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, + bool allow_minimal_ddl_, + bool is_postgresql_replica_database_engine_, Storages storages_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) , context(context_) @@ -39,6 +41,8 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( , connection(std::move(connection_)) , current_lsn(start_lsn) , max_block_size(max_block_size_) + , allow_minimal_ddl(allow_minimal_ddl_) + , is_postgresql_replica_database_engine(is_postgresql_replica_database_engine_) , storages(storages_) { for (const auto & [table_name, storage] : storages) @@ -188,12 +192,9 @@ void PostgreSQLReplicaConsumer::readTupleData( Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos, size); - LOG_DEBUG(log, "number of columns {}", num_columns); auto proccess_column_value = [&](Int8 identifier, Int16 column_idx) { - char id = identifier; - LOG_DEBUG(log, "Identifier: {}", id); switch (identifier) { case 'n': /// NULL @@ -212,7 +213,10 @@ void PostgreSQLReplicaConsumer::readTupleData( insertValue(buffer, value, column_idx); break; } - case 'u': /// Toasted (unchanged) value TODO:! + case 'u': /// TOAST value && unchanged at the same time. Actual value is not sent. + /// TOAST values are not supported. (TOAST values are values that are considered in postgres + /// to be too large to be stored directly) + insertDefaultValue(buffer, column_idx); break; } }; @@ -238,7 +242,8 @@ void PostgreSQLReplicaConsumer::readTupleData( } case PostgreSQLQuery::UPDATE: { - if (old_value) /// Only if replica identity is set to full + /// Process old value in case changed value is a primary key. + if (old_value) buffer.columns[num_columns]->insert(Int8(-1)); else buffer.columns[num_columns]->insert(Int8(1)); @@ -258,8 +263,6 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati size_t pos = 2; char type = readInt8(replication_message, pos, size); - LOG_DEBUG(log, "Type of replication message: {}", type); - switch (type) { case 'B': // Begin @@ -268,103 +271,6 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp break; } - case 'C': // Commit - { - readInt8(replication_message, pos, size); /// unused flags - readInt64(replication_message, pos, size); /// Int64 commit lsn - readInt64(replication_message, pos, size); /// Int64 transaction end lsn - readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp - - final_lsn = current_lsn; - LOG_DEBUG(log, "Commit lsn: {}", getLSNValue(current_lsn)); - break; - } - case 'O': // Origin - break; - case 'R': // Relation - { - Int32 relation_id = readInt32(replication_message, pos, size); - - String relation_namespace, relation_name; - - readString(replication_message, pos, size, relation_namespace); - readString(replication_message, pos, size, relation_name); - - table_to_insert = relation_name; - tables_to_sync.insert(table_to_insert); - - /// TODO: Add replica identity settings to metadata (needed for update) - Int8 replica_identity = readInt8(replication_message, pos, size); - Int16 num_columns = readInt16(replication_message, pos, size); - - LOG_DEBUG(log, - "INFO: relation id: {}, namespace: {}, relation name: {}, replica identity: {}, columns number: {}", - relation_id, relation_namespace, relation_name, replica_identity, num_columns); - - if (!isSyncAllowed(relation_id)) - return; - - Int8 key; /// Flags. 0 or 1 (if part of the key). Not needed for now. - Int32 data_type_id; - Int32 type_modifier; /// For example, n in varchar(n) - - bool new_relation_definition = false; - if (schema_data.find(relation_id) == schema_data.end()) - { - relation_id_to_name[relation_id] = relation_name; - schema_data.emplace(relation_id, SchemaData(num_columns)); - new_relation_definition = true; - } - - auto & current_schema_data = schema_data.find(relation_id)->second; - - if (current_schema_data.number_of_columns != num_columns) - { - markTableAsSkipped(relation_id, relation_name); - return; - } - - for (uint16_t i = 0; i < num_columns; ++i) - { - String column_name; - key = readInt8(replication_message, pos, size); - readString(replication_message, pos, size, column_name); - - data_type_id = readInt32(replication_message, pos, size); - type_modifier = readInt32(replication_message, pos, size); - - LOG_DEBUG(log, - "Key: {}, column name: {}, data type id: {}, type modifier: {}", - key, column_name, data_type_id, type_modifier); - - if (new_relation_definition) - { - current_schema_data.column_identifiers.emplace_back(std::make_tuple(data_type_id, type_modifier)); - } - else - { - if (current_schema_data.column_identifiers[i].first != data_type_id - || current_schema_data.column_identifiers[i].second != type_modifier) - { - markTableAsSkipped(relation_id, relation_name); - return; - } - } - } - - if (storages.find(table_to_insert) == storages.end()) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Storage for table {} does not exist, but is included in replication stream", table_to_insert); - } - - [[maybe_unused]] auto buffer_iter = buffers.find(table_to_insert); - assert(buffer_iter != buffers.end()); - - break; - } - case 'Y': // Type - break; case 'I': // Insert { Int32 relation_id = readInt32(replication_message, pos, size); @@ -373,46 +279,44 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati return; Int8 new_tuple = readInt8(replication_message, pos, size); + const auto & table_name = relation_id_to_name[relation_id]; + auto buffer = buffers.find(table_name); - LOG_DEBUG(log, "relationID: {}, newTuple: {}, current insert table: {}", relation_id, new_tuple, table_to_insert); - - auto buffer = buffers.find(table_to_insert); if (buffer == buffers.end()) - { - throw Exception(ErrorCodes::UNKNOWN_TABLE, - "Buffer for table {} does not exist", table_to_insert); - } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Buffer for table {} does not exist", table_name); + + if (new_tuple) + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT); - readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT); break; } case 'U': // Update { Int32 relation_id = readInt32(replication_message, pos, size); - LOG_DEBUG(log, "relationID {}, current insert table {}", relation_id, table_to_insert); - if (!isSyncAllowed(relation_id)) return; - auto buffer = buffers.find(table_to_insert); + const auto & table_name = relation_id_to_name[relation_id]; + auto buffer = buffers.find(table_name); + auto proccess_identifier = [&](Int8 identifier) -> bool { - char id = identifier; - LOG_DEBUG(log, "Identifier: {}", id); bool read_next = true; switch (identifier) { - case 'K': /// TODO:! + case 'K': { - /// Only if changed column(s) are part of replica identity index + /// Only if changed column(s) are part of replica identity index (for now it can be only + /// be primary key - default values for replica identity index). In this case, first comes a tuple + /// with old replica identity indexes and all other values will come as nulls. Then comes a full new row. + readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); break; } case 'O': { - /// Old row. Only of replica identity is set to full. - /// (For the case when a table does not have any primary key.) - /// TODO: Need to find suitable order_by for nested table (Now it throws if no primary key) + /// Old row. Only if replica identity is set to full. (For the case when a table does not have any + /// primary key, for now not supported, requires to find suitable order by key(s) for nested table.) readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); break; } @@ -443,14 +347,118 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati if (!isSyncAllowed(relation_id)) return; - Int8 full_replica_identity = readInt8(replication_message, pos, size); + /// 0 or 1 if replica identity is set to full. For now only default replica identity is supported (with primary keys). + readInt8(replication_message, pos, size); - LOG_DEBUG(log, "relationID: {}, full replica identity: {}", relation_id, full_replica_identity); - - auto buffer = buffers.find(table_to_insert); + const auto & table_name = relation_id_to_name[relation_id]; + auto buffer = buffers.find(table_name); readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::DELETE); + break; } + case 'C': // Commit + { + readInt8(replication_message, pos, size); /// unused flags + readInt64(replication_message, pos, size); /// Int64 commit lsn + readInt64(replication_message, pos, size); /// Int64 transaction end lsn + readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp + + final_lsn = current_lsn; + LOG_DEBUG(log, "Commit lsn: {}", getLSNValue(current_lsn)); /// Will be removed + + break; + } + case 'R': // Relation + { + Int32 relation_id = readInt32(replication_message, pos, size); + + String relation_namespace, relation_name; + + readString(replication_message, pos, size, relation_namespace); + readString(replication_message, pos, size, relation_name); + + if (!isSyncAllowed(relation_id)) + return; + + /// 'd' - default (primary key if any) + /// 'n' - nothing + /// 'f' - all columns (set replica identity full) + /// 'i' - user defined index with indisreplident set + /// For database engine now supported only 'd', for table engine 'f' is also allowed. + char replica_identity = readInt8(replication_message, pos, size); + + if (replica_identity != 'd' && (replica_identity != 'f' || is_postgresql_replica_database_engine)) + { + LOG_WARNING(log, + "Table has replica identity {} - not supported. " + "For database engine only default (with primary keys) replica identity is supported." + "For table engine full replica identity is also supported. Table will be skipped."); + markTableAsSkipped(relation_id, relation_name); + return; + } + + Int16 num_columns = readInt16(replication_message, pos, size); + + Int32 data_type_id; + Int32 type_modifier; /// For example, n in varchar(n) + + bool new_relation_definition = false; + if (schema_data.find(relation_id) == schema_data.end()) + { + relation_id_to_name[relation_id] = relation_name; + schema_data.emplace(relation_id, SchemaData(num_columns)); + new_relation_definition = true; + } + + auto & current_schema_data = schema_data.find(relation_id)->second; + + if (current_schema_data.number_of_columns != num_columns) + { + markTableAsSkipped(relation_id, relation_name); + return; + } + + for (uint16_t i = 0; i < num_columns; ++i) + { + String column_name; + readInt8(replication_message, pos, size); /// Marks column as part of replica identity index + readString(replication_message, pos, size, column_name); + + data_type_id = readInt32(replication_message, pos, size); + type_modifier = readInt32(replication_message, pos, size); + + if (new_relation_definition) + { + current_schema_data.column_identifiers.emplace_back(std::make_tuple(data_type_id, type_modifier)); + } + else + { + if (current_schema_data.column_identifiers[i].first != data_type_id + || current_schema_data.column_identifiers[i].second != type_modifier) + { + markTableAsSkipped(relation_id, relation_name); + return; + } + } + } + + if (storages.find(relation_name) == storages.end()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Storage for table {} does not exist, but is included in replication stream", relation_name); + } + + [[maybe_unused]] auto buffer_iter = buffers.find(relation_name); + assert(buffer_iter != buffers.end()); + + tables_to_sync.insert(relation_name); + + break; + } + case 'O': // Origin + break; + case 'Y': // Type + break; case 'T': // Truncate break; default: @@ -533,8 +541,10 @@ bool PostgreSQLReplicaConsumer::isSyncAllowed(Int32 relation_id) if (getLSNValue(current_lsn) >= getLSNValue(table_start_lsn)) { + LOG_TRACE(log, "Synchronization is resumed for table: {} (start_lsn: {})", + relation_id_to_name[relation_id], table_start_lsn); + skip_list.erase(table_with_lsn); - LOG_DEBUG(log, "Sync is allowed for relation id: {}", relation_id); return true; } @@ -549,7 +559,10 @@ void PostgreSQLReplicaConsumer::markTableAsSkipped(Int32 relation_id, const Stri schema_data.erase(relation_id); auto & buffer = buffers.find(relation_name)->second; buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); - LOG_DEBUG(log, "Table {} is skipped temporarily", relation_name); + if (!allow_minimal_ddl) + LOG_WARNING(log, "Table {} is skipped, because table schema has changed", relation_name); + else + LOG_TRACE(log, "Table {} is skipped temporarily. ID: {}", relation_name, relation_id); } @@ -593,7 +606,6 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() slot_empty = false; current_lsn = (*row)[0]; - LOG_DEBUG(log, "Current lsn: {}", getLSNValue(current_lsn)); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } @@ -619,19 +631,26 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() return false; } - syncTables(tx); + if (!tables_to_sync.empty()) + { + syncTables(tx); + } + return true; } bool PostgreSQLReplicaConsumer::consume(std::vector> & skipped_tables) { - if (!readFromReplicationSlot() || !skip_list.empty()) + if (!readFromReplicationSlot()) { - for (const auto & [relation_id, lsn] : skip_list) + if (allow_minimal_ddl && !skip_list.empty()) { - if (lsn.empty()) - skipped_tables.emplace_back(std::make_pair(relation_id, relation_id_to_name[relation_id])); + for (const auto & [relation_id, lsn] : skip_list) + { + if (lsn.empty()) + skipped_tables.emplace_back(std::make_pair(relation_id, relation_id_to_name[relation_id])); + } } return false; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 55f8a949cd1..720e2cf72d5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -39,19 +39,19 @@ public: const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, + bool allow_minimal_ddl_, + bool is_postgresql_replica_database_engine_, Storages storages_); void readMetadata(); bool consume(std::vector> & skipped_tables); - void updateNested(const String & table_name, StoragePtr nested_table); + void updateNested(const String & table_name, StoragePtr nested_storage); void updateSkipList(const std::unordered_map & tables_with_lsn); private: - void synchronizationStream(); - bool readFromReplicationSlot(); void syncTables(std::shared_ptr tx); @@ -99,9 +99,9 @@ private: /// lsn - log sequnce nuumber, like wal offset (64 bit). Int64 getLSNValue(const std::string & lsn) { - Int64 upper_half, lower_half; - std::sscanf(lsn.data(), "%lX/%lX", &upper_half, &lower_half); - return (upper_half << 32) + lower_half; + UInt32 upper_half, lower_half; + std::sscanf(lsn.data(), "%X/%X", &upper_half, &lower_half); + return (static_cast(upper_half) << 32) + lower_half; } Poco::Logger * log; @@ -113,6 +113,7 @@ private: std::string current_lsn, final_lsn; const size_t max_block_size; + bool allow_minimal_ddl, is_postgresql_replica_database_engine; std::string table_to_insert; @@ -145,10 +146,9 @@ private: /// Skipped tables are reloaded from snapshot (nested tables are also updated). Afterwards, if a replication message is /// related to a table in a skip_list, we compare current lsn with start_lsn, which was returned with according snapshot. /// If current_lsn >= table_start_lsn, we can safely remove table from skip list and continue its synchronization. + /// No needed message, related to reloaded table will be missed, because messages are not consumed in the meantime, + /// i.e. we will not miss the first start_lsn position for reloaded table. std::unordered_map skip_list; - - /// Mapping from table name which is currently in a skip_list to a table_start_lsn for future comparison with current_lsn. - //NameToNameMap start_lsn_for_skipped; }; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h index 5ea2a5cd1f6..0f084ac6108 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h @@ -14,8 +14,9 @@ namespace DB #define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ - M(UInt64, postgresql_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ - M(String, postgresql_tables_list, "", "List of tables for PostgreSQLReplica database engine", 0) \ + M(UInt64, postgresql_replica_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ + M(String, postgresql_replica_tables_list, "", "List of tables for PostgreSQLReplica database engine", 0) \ + M(Bool, postgresql_replica_allow_minimal_ddl, 0, "Allow to track minimal possible ddl. By default, table after ddl will get into a skip list", 0) \ DECLARE_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index eccaa3c7acf..ee15c1ec13d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -24,6 +24,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & metadata_path_, std::shared_ptr context_, const size_t max_block_size_, + bool allow_minimal_ddl_, + bool is_postgresql_replica_database_engine_, const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) , context(context_) @@ -31,6 +33,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , connection_str(conn_str) , metadata_path(metadata_path_) , max_block_size(max_block_size_) + , allow_minimal_ddl(allow_minimal_ddl_) + , is_postgresql_replica_database_engine(is_postgresql_replica_database_engine_) , tables_list(tables_list_) , connection(std::make_shared(conn_str)) { @@ -113,7 +117,7 @@ void PostgreSQLReplicationHandler::startSynchronization() /// In case of some failure, the following cases are possible (since publication and replication slot are reused): /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. /// 2. If created a new publication and replication slot existed before it was created, it is not ok. - dropReplicationSlot(ntx, replication_slot); + dropReplicationSlot(ntx); initial_sync(); } else @@ -142,6 +146,8 @@ void PostgreSQLReplicationHandler::startSynchronization() metadata_path, start_lsn, max_block_size, + allow_minimal_ddl, + is_postgresql_replica_database_engine, nested_storages); consumer_task->activateAndSchedule(); @@ -310,32 +316,41 @@ void PostgreSQLReplicationHandler::createReplicationSlot( { std::string query_str; - if (!temporary) - query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot); + std::string slot_name; + if (temporary) + slot_name = replication_slot + "_tmp"; else - query_str = fmt::format("CREATE_REPLICATION_SLOT {} TEMPORARY LOGICAL pgoutput EXPORT_SNAPSHOT", replication_slot + "_tmp"); + slot_name = replication_slot; + + query_str = fmt::format("CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT", slot_name); try { pqxx::result result{ntx->exec(query_str)}; start_lsn = result[0][1].as(); snapshot_name = result[0][2].as(); - LOG_TRACE(log, "Created replication slot: {}, start lsn: {}, snapshot: {}", - replication_slot, start_lsn, snapshot_name); + LOG_TRACE(log, "Created replication slot: {}, start lsn: {}", replication_slot, start_lsn); } catch (Exception & e) { - e.addMessage("while creating PostgreSQL replication slot {}", replication_slot); + e.addMessage("while creating PostgreSQL replication slot {}", slot_name); throw; } } -void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, std::string & slot_name) +void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, bool temporary) { + std::string slot_name; + if (temporary) + slot_name = replication_slot + "_tmp"; + else + slot_name = replication_slot; + std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); + ntx->exec(query_str); - LOG_TRACE(log, "Dropped replication slot {}", slot_name); + LOG_TRACE(log, "Dropped replication slot: {}", slot_name); } @@ -356,7 +371,7 @@ void PostgreSQLReplicationHandler::shutdownFinal() dropPublication(tx); if (isReplicationSlotExist(tx, replication_slot)) - dropReplicationSlot(tx, replication_slot); + dropReplicationSlot(tx); tx->commit(); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index f428ed9720a..506335d8eb2 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -26,6 +26,8 @@ public: const std::string & metadata_path_, std::shared_ptr context_, const size_t max_block_size_, + bool allow_minimal_ddl_, + bool is_postgresql_replica_database_engine_, const String tables_list = ""); void startup(); @@ -52,7 +54,7 @@ private: void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); - void dropReplicationSlot(NontransactionPtr tx, std::string & slot_name); + void dropReplicationSlot(NontransactionPtr tx, bool temporary = false); void dropPublication(NontransactionPtr ntx); @@ -72,6 +74,7 @@ private: std::shared_ptr context; const std::string database_name, connection_str, metadata_path; const size_t max_block_size; + bool allow_minimal_ddl, is_postgresql_replica_database_engine; std::string tables_list, replication_slot, publication_name; PostgreSQLConnectionPtr connection; diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp index 4b2e746a557..dbacc995c67 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp @@ -61,11 +61,10 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( connection_str, metadata_path, global_context, - replication_settings->postgresql_max_block_size.changed - ? replication_settings->postgresql_max_block_size.value - : (global_context->getSettingsRef().max_insert_block_size.value) - - ); + replication_settings->postgresql_replica_max_block_size.changed + ? replication_settings->postgresql_replica_max_block_size.value + : global_context->getSettingsRef().max_insert_block_size.value, + replication_settings->postgresql_replica_allow_minimal_ddl.value, false); } diff --git a/tests/integration/test_postgresql_replica_database_engine/configs/users.xml b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml new file mode 100644 index 00000000000..948093dbf4c --- /dev/null +++ b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml @@ -0,0 +1,8 @@ + + + + + 1 + + + diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index ec82a1050c2..9d5686175ab 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -10,7 +10,7 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True) +instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], user_configs = ['configs/users.xml'], with_postgres=True) postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -88,7 +88,7 @@ def postgresql_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_load_and_sync_all_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -115,7 +115,7 @@ def test_load_and_sync_all_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_replicating_dml(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -158,7 +158,7 @@ def test_replicating_dml(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_different_data_types(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -242,7 +242,7 @@ def test_different_data_types(started_cluster): assert(result == expected) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -263,7 +263,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query(''' CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS postgresql_tables_list = '{}'; + SETTINGS postgresql_replica_tables_list = '{}'; '''.format(publication_tables)) assert 'test_database' in instance.query('SHOW DATABASES') @@ -295,7 +295,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -307,7 +307,10 @@ def test_table_schema_changes(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) instance.query( - "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + """CREATE DATABASE test_database + ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + SETTINGS postgresql_replica_allow_minimal_ddl = 1; + """) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) @@ -336,6 +339,28 @@ def test_table_schema_changes(started_cluster): for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + for i in range(NUM_TABLES): + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + instance.query("DROP DATABASE test_database") + + +@pytest.mark.timeout(120) +def test_changing_replica_identity_value(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)") + + instance.query( + "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)") + check_tables_are_synchronized('postgresql_replica'); + cursor.execute("UPDATE postgresql_replica SET key=key-25 WHERE key<100 ") + check_tables_are_synchronized('postgresql_replica'); + if __name__ == '__main__': cluster.start() diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index bb7ff709b6d..2ca07092607 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -70,7 +70,7 @@ def rabbitmq_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_initial_load_from_snapshot(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -93,7 +93,7 @@ def test_initial_load_from_snapshot(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_no_connection_at_startup(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -120,7 +120,7 @@ def test_no_connection_at_startup(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_detach_attach_is_ok(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -145,13 +145,16 @@ def test_detach_attach_is_ok(started_cluster): instance.query('DETACH TABLE test.postgresql_replica') instance.query('ATTACH TABLE test.postgresql_replica') - time.sleep(0.5) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + while postgresql_replica_check_result(result) == False: + time.sleep(0.5) + result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + cursor.execute('DROP TABLE postgresql_replica;') postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_replicating_insert_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -192,7 +195,7 @@ def test_replicating_insert_queries(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_replicating_delete_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -230,7 +233,7 @@ def test_replicating_delete_queries(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_replicating_update_queries(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -261,7 +264,7 @@ def test_replicating_update_queries(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_resume_from_written_version(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -303,7 +306,7 @@ def test_resume_from_written_version(started_cluster): postgresql_replica_check_result(result, True) -@pytest.mark.timeout(120) +@pytest.mark.timeout(320) def test_many_replication_messages(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() @@ -318,7 +321,7 @@ def test_many_replication_messages(started_cluster): PRIMARY KEY(key)) ENGINE = PostgreSQLReplica( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - SETTINGS postgresql_max_block_size = 50000; + SETTINGS postgresql_replica_max_block_size = 50000; ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') @@ -361,7 +364,7 @@ def test_many_replication_messages(started_cluster): cursor.execute('DROP TABLE postgresql_replica;') -@pytest.mark.timeout(180) +@pytest.mark.timeout(320) def test_connection_loss(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() From 28c0a64c07b0bab34d8464a632e8740a0388213e Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 22 Feb 2021 13:05:29 +0000 Subject: [PATCH 047/931] Comment --- src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h | 5 ----- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h index 720e2cf72d5..37b36d4bfc5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h @@ -18,11 +18,6 @@ #include "pqxx/pqxx" // Y_IGNORE -/// TODO: There is ALTER PUBLICATION command to dynamically add and remove tables for replicating (the command is transactional). -/// This can also be supported. (Probably, if in a replication stream comes a relation name, which does not currently -/// exist in CH, it can be loaded from snapshot and handled the same way as some ddl by comparing lsn positions of wal, -/// but there is the case that a known table has been just renamed, then the previous version might be just dropped by user). - namespace DB { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 506335d8eb2..7b9605be5dc 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -14,6 +14,11 @@ namespace DB { +/// IDEA: There is ALTER PUBLICATION command to dynamically add and remove tables for replicating (the command is transactional). +/// (Probably, if in a replication stream comes a relation name, which does not currently +/// exist in CH, it can be loaded via snapshot while stream is stopped and then comparing wal positions with +/// current lsn and table start lsn. + class StoragePostgreSQLReplica; class PostgreSQLReplicationHandler From 0760759dc6b1f14fbc3fe5b18ee9b9967bdc15a7 Mon Sep 17 00:00:00 2001 From: hexiaoting Date: Tue, 9 Mar 2021 16:44:56 +0800 Subject: [PATCH 048/931] Add DataTypeMap support LowCardinality and FixedString type --- .../DataTypeLowCardinalityHelpers.cpp | 17 +++++++++ src/Functions/array/arrayElement.cpp | 37 ++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp index a68dc30d5c2..80d154b6234 100644 --- a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp +++ b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp @@ -1,11 +1,13 @@ #include #include #include +#include #include #include #include #include +#include #include @@ -39,6 +41,11 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type) return std::make_shared(elements); } + if (const auto * map_type = typeid_cast(type.get())) + { + return std::make_shared(recursiveRemoveLowCardinality(map_type->getKeyType()), recursiveRemoveLowCardinality(map_type->getValueType())); + } + if (const auto * low_cardinality_type = typeid_cast(type.get())) return low_cardinality_type->getDictionaryType(); @@ -78,6 +85,16 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column) return ColumnTuple::create(columns); } + if (const auto * column_map = typeid_cast(column.get())) + { + auto nested = column_map->getNestedColumnPtr(); + auto nested_no_lc = recursiveRemoveLowCardinality(nested); + if (nested.get() == nested_no_lc.get()) + return column; + + return ColumnMap::create(nested_no_lc); + } + if (const auto * column_low_cardinality = typeid_cast(column.get())) return column_low_cardinality->convertToFullColumn(); diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index 7d053988cae..cfe8e7839b1 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -110,6 +111,9 @@ private: static bool matchKeyToIndexString(const IColumn & data, const Offsets & offsets, const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); + static bool matchKeyToIndexFixedString(const IColumn & data, const Offsets & offsets, + const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs); + static bool matchKeyToIndexStringConst(const IColumn & data, const Offsets & offsets, const Field & index, PaddedPODArray & matched_idxs); @@ -767,6 +771,19 @@ struct MatcherString } }; +struct MatcherFixedString +{ + const ColumnFixedString & data; + const ColumnFixedString & index; + + bool match(size_t row_data, size_t row_index) const + { + auto data_ref = data.getDataAt(row_data); + auto index_ref = index.getDataAt(row_index); + return memequalSmallAllowOverflow15(index_ref.data, index_ref.size, data_ref.data, data_ref.size); + } +}; + struct MatcherStringConst { const ColumnString & data; @@ -863,6 +880,23 @@ bool FunctionArrayElement::matchKeyToIndexString( return true; } +bool FunctionArrayElement::matchKeyToIndexFixedString( + const IColumn & data, const Offsets & offsets, + const ColumnsWithTypeAndName & arguments, PaddedPODArray & matched_idxs) +{ + const auto * index_string = checkAndGetColumn(arguments[1].column.get()); + if (!index_string) + return false; + + const auto * data_string = checkAndGetColumn(&data); + if (!data_string) + return false; + + MatcherFixedString matcher{*data_string, *index_string}; + executeMatchKeyToIndex(offsets, matched_idxs, matcher); + return true; +} + template bool FunctionArrayElement::matchKeyToIndexNumberConst( const IColumn & data, const Offsets & offsets, @@ -910,7 +944,8 @@ bool FunctionArrayElement::matchKeyToIndex( || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexString(data, offsets, arguments, matched_idxs); + || matchKeyToIndexString(data, offsets, arguments, matched_idxs) + || matchKeyToIndexFixedString(data, offsets, arguments, matched_idxs); } bool FunctionArrayElement::matchKeyToIndexConst( From 37749eecde99021924b0eb5459462a58a77165a8 Mon Sep 17 00:00:00 2001 From: hexiaoting Date: Tue, 9 Mar 2021 16:50:13 +0800 Subject: [PATCH 049/931] Add test cases --- .../01763_support_map_lowcardinality_type.reference | 2 ++ .../01763_support_map_lowcardinality_type.sql | 12 ++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 tests/queries/0_stateless/01763_support_map_lowcardinality_type.reference create mode 100644 tests/queries/0_stateless/01763_support_map_lowcardinality_type.sql diff --git a/tests/queries/0_stateless/01763_support_map_lowcardinality_type.reference b/tests/queries/0_stateless/01763_support_map_lowcardinality_type.reference new file mode 100644 index 00000000000..8fdcdf3d8d5 --- /dev/null +++ b/tests/queries/0_stateless/01763_support_map_lowcardinality_type.reference @@ -0,0 +1,2 @@ +b +{'1':1} 1 0 diff --git a/tests/queries/0_stateless/01763_support_map_lowcardinality_type.sql b/tests/queries/0_stateless/01763_support_map_lowcardinality_type.sql new file mode 100644 index 00000000000..ccade153ca1 --- /dev/null +++ b/tests/queries/0_stateless/01763_support_map_lowcardinality_type.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS map_lc; +SET allow_experimental_map_type = 1; +CREATE TABLE map_lc +( + `kv` Map(LowCardinality(String), LowCardinality(String)) +) +ENGINE = Memory; + +INSERT INTO map_lc select map('a', 'b'); +SELECT kv['a'] FROM map_lc; +DROP TABLE map_lc; +SELECT map(toFixedString('1',1),1) AS m, m[toFixedString('1',1)],m[toFixedString('1',2)]; From 072a68f8ab9cd5c043cc147eef945791d686bf45 Mon Sep 17 00:00:00 2001 From: hexiaoting Date: Fri, 12 Mar 2021 16:08:16 +0800 Subject: [PATCH 050/931] Fix build error --- src/DataTypes/DataTypeLowCardinalityHelpers.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp index 80d154b6234..41ba81814d0 100644 --- a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp +++ b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp @@ -87,7 +87,7 @@ ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column) if (const auto * column_map = typeid_cast(column.get())) { - auto nested = column_map->getNestedColumnPtr(); + const auto & nested = column_map->getNestedColumnPtr(); auto nested_no_lc = recursiveRemoveLowCardinality(nested); if (nested.get() == nested_no_lc.get()) return column; From 87c740730bcbcd1a093f4b3899ebc7bca713c81b Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 17 Mar 2021 09:58:10 +0000 Subject: [PATCH 051/931] Rename to MaterializePostgreSQL --- src/Databases/DatabaseFactory.cpp | 16 ++--- ....cpp => DatabaseMaterializePostgreSQL.cpp} | 42 ++++++------- ...lica.h => DatabaseMaterializePostgreSQL.h} | 12 ++-- src/Interpreters/InterpreterDropQuery.cpp | 4 +- ....cpp => MaterializePostgreSQLConsumer.cpp} | 44 +++++++------- ...umer.h => MaterializePostgreSQLConsumer.h} | 8 +-- ....cpp => MaterializePostgreSQLMetadata.cpp} | 14 ++--- ...data.h => MaterializePostgreSQLMetadata.h} | 4 +- ....cpp => MaterializePostgreSQLSettings.cpp} | 6 +- ...ings.h => MaterializePostgreSQLSettings.h} | 6 +- .../PostgreSQLReplicationHandler.cpp | 8 +-- .../PostgreSQL/PostgreSQLReplicationHandler.h | 13 ++-- ...a.cpp => StorageMaterializePostgreSQL.cpp} | 60 +++++++++---------- ...plica.h => StorageMaterializePostgreSQL.h} | 16 ++--- src/Storages/registerStorages.cpp | 4 +- 15 files changed, 128 insertions(+), 129 deletions(-) rename src/Databases/PostgreSQL/{DatabasePostgreSQLReplica.cpp => DatabaseMaterializePostgreSQL.cpp} (77%) rename src/Databases/PostgreSQL/{DatabasePostgreSQLReplica.h => DatabaseMaterializePostgreSQL.h} (84%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaConsumer.cpp => MaterializePostgreSQLConsumer.cpp} (91%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaConsumer.h => MaterializePostgreSQLConsumer.h} (97%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaMetadata.cpp => MaterializePostgreSQLMetadata.cpp} (79%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaMetadata.h => MaterializePostgreSQLMetadata.h} (81%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaSettings.cpp => MaterializePostgreSQLSettings.cpp} (77%) rename src/Storages/PostgreSQL/{PostgreSQLReplicaSettings.h => MaterializePostgreSQLSettings.h} (71%) rename src/Storages/PostgreSQL/{StoragePostgreSQLReplica.cpp => StorageMaterializePostgreSQL.cpp} (87%) rename src/Storages/PostgreSQL/{StoragePostgreSQLReplica.h => StorageMaterializePostgreSQL.h} (83%) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index f153b792caf..d5147b5539b 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -36,9 +36,9 @@ #if USE_LIBPQXX #include // Y_IGNORE -#include +#include #include -#include +#include #endif namespace DB @@ -101,14 +101,14 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String const UUID & uuid = create.uuid; bool engine_may_have_arguments = engine_name == "MySQL" || engine_name == "MaterializeMySQL" || engine_name == "Lazy" || - engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "PostgreSQLReplica"; + engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "MaterializePostgreSQL"; if (engine_define->engine->arguments && !engine_may_have_arguments) throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS); bool has_unexpected_element = engine_define->engine->parameters || engine_define->partition_by || engine_define->primary_key || engine_define->order_by || engine_define->sample_by; - bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "PostgreSQLReplica"; + bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializePostgreSQL"; if (has_unexpected_element || (!may_have_settings && engine_define->settings)) throw Exception("Database engine " + engine_name + " cannot have parameters, primary_key, order_by, sample_by, settings", ErrorCodes::UNKNOWN_ELEMENT_IN_AST); @@ -254,7 +254,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared( context, metadata_path, engine_define, database_name, postgres_database_name, connection, use_table_cache); } - else if (engine_name == "PostgreSQLReplica") + else if (engine_name == "MaterializePostgreSQL") { const ASTFunction * engine = engine_define->engine; @@ -279,21 +279,21 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String auto connection = std::make_shared( postgres_database_name, parsed_host_port.first, parsed_host_port.second, username, password); - auto postgresql_replica_settings = std::make_unique(); + auto postgresql_replica_settings = std::make_unique(); if (engine_define->settings) postgresql_replica_settings->loadFromQuery(*engine_define); if (create.uuid == UUIDHelpers::Nil) { - return std::make_shared>( + return std::make_shared>( context, metadata_path, uuid, engine_define, database_name, postgres_database_name, connection, std::move(postgresql_replica_settings)); } else { - return std::make_shared>( + return std::make_shared>( context, metadata_path, uuid, engine_define, database_name, postgres_database_name, connection, std::move(postgresql_replica_settings)); diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp similarity index 77% rename from src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp rename to src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 2b491e62fab..62c00dfd4a2 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -1,9 +1,9 @@ -#include +#include #if USE_LIBPQXX #include -#include +#include #include #include @@ -30,7 +30,7 @@ namespace DB static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; template<> -DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( +DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const Context & context, const String & metadata_path_, UUID /* uuid */, @@ -38,11 +38,11 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( const String & database_name_, const String & postgres_database_name, PostgreSQLConnectionPtr connection_, - std::unique_ptr settings_) + std::unique_ptr settings_) : DatabaseOrdinary( database_name_, metadata_path_, "data/" + escapeForFileName(database_name_) + "/", - "DatabasePostgreSQLReplica (" + database_name_ + ")", context) - , log(&Poco::Logger::get("PostgreSQLReplicaDatabaseEngine")) + "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context) + , log(&Poco::Logger::get("MaterializePostgreSQLDatabaseEngine")) , global_context(context.getGlobalContext()) , metadata_path(metadata_path_) , database_engine_define(database_engine_define_->clone()) @@ -55,7 +55,7 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( template<> -DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( +DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const Context & context, const String & metadata_path_, UUID uuid, @@ -63,8 +63,8 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( const String & database_name_, const String & postgres_database_name, PostgreSQLConnectionPtr connection_, - std::unique_ptr settings_) - : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabasePostgreSQLReplica (" + database_name_ + ")", context) + std::unique_ptr settings_) + : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context) , global_context(context.getGlobalContext()) , metadata_path(metadata_path_) , database_engine_define(database_engine_define_->clone()) @@ -76,7 +76,7 @@ DatabasePostgreSQLReplica::DatabasePostgreSQLReplica( template -void DatabasePostgreSQLReplica::startSynchronization() +void DatabaseMaterializePostgreSQL::startSynchronization() { replication_handler = std::make_unique( remote_database_name, @@ -97,7 +97,7 @@ void DatabasePostgreSQLReplica::startSynchronization() if (storage) { - replication_handler->addStorage(table_name, storage->template as()); + replication_handler->addStorage(table_name, storage->template as()); tables[table_name] = storage; } } @@ -108,19 +108,19 @@ void DatabasePostgreSQLReplica::startSynchronization() template -StoragePtr DatabasePostgreSQLReplica::getStorage(const String & name) +StoragePtr DatabaseMaterializePostgreSQL::getStorage(const String & name) { auto storage = tryGetTable(name, global_context); if (storage) return storage; - return StoragePostgreSQLReplica::create(StorageID(database_name, name), StoragePtr{}, global_context); + return StorageMaterializePostgreSQL::create(StorageID(database_name, name), StoragePtr{}, global_context); } template -void DatabasePostgreSQLReplica::shutdown() +void DatabaseMaterializePostgreSQL::shutdown() { if (replication_handler) replication_handler->shutdown(); @@ -128,7 +128,7 @@ void DatabasePostgreSQLReplica::shutdown() template -void DatabasePostgreSQLReplica::loadStoredObjects( +void DatabaseMaterializePostgreSQL::loadStoredObjects( Context & context, bool has_force_restore_data_flag, bool force_attach) { Base::loadStoredObjects(context, has_force_restore_data_flag, force_attach); @@ -149,7 +149,7 @@ void DatabasePostgreSQLReplica::loadStoredObjects( template -StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, const Context & context) const +StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const Context & context) const { if (context.hasQueryContext()) { @@ -171,7 +171,7 @@ StoragePtr DatabasePostgreSQLReplica::tryGetTable(const String & name, con template -void DatabasePostgreSQLReplica::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializePostgreSQL::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) { if (context.hasQueryContext()) { @@ -188,14 +188,14 @@ void DatabasePostgreSQLReplica::createTable(const Context & context, const template -void DatabasePostgreSQLReplica::dropTable(const Context & context, const String & name, bool no_delay) +void DatabaseMaterializePostgreSQL::dropTable(const Context & context, const String & name, bool no_delay) { Base::dropTable(context, name, no_delay); } template -void DatabasePostgreSQLReplica::drop(const Context & context) +void DatabaseMaterializePostgreSQL::drop(const Context & context) { if (replication_handler) { @@ -214,13 +214,13 @@ void DatabasePostgreSQLReplica::drop(const Context & context) template -DatabaseTablesIteratorPtr DatabasePostgreSQLReplica::getTablesIterator( +DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( const Context & /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) { Tables nested_tables; for (const auto & [table_name, storage] : tables) { - auto nested_storage = storage->template as()->tryGetNested(); + auto nested_storage = storage->template as()->tryGetNested(); if (nested_storage) nested_tables[table_name] = nested_storage; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h similarity index 84% rename from src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h rename to src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index d8cb2ff5a6d..fd6d5982fdf 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQLReplica.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -7,7 +7,7 @@ #if USE_LIBPQXX #include -#include +#include #include #include @@ -25,11 +25,11 @@ using PostgreSQLConnectionPtr = std::shared_ptr; template -class DatabasePostgreSQLReplica : public Base +class DatabaseMaterializePostgreSQL : public Base { public: - DatabasePostgreSQLReplica( + DatabaseMaterializePostgreSQL( const Context & context, const String & metadata_path_, UUID uuid, @@ -37,9 +37,9 @@ public: const String & dbname_, const String & postgres_dbname, PostgreSQLConnectionPtr connection_, - std::unique_ptr settings_); + std::unique_ptr settings_); - String getEngineName() const override { return "PostgreSQLReplica"; } + String getEngineName() const override { return "MaterializePostgreSQL"; } String getMetadataPath() const override { return metadata_path; } @@ -69,7 +69,7 @@ private: ASTPtr database_engine_define; String database_name, remote_database_name; PostgreSQLConnectionPtr connection; - std::unique_ptr settings; + std::unique_ptr settings; std::shared_ptr replication_handler; std::map tables; diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 575f5b43a32..1a282b6ef5d 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -23,7 +23,7 @@ #endif #if USE_LIBPQXX -# include +# include #endif namespace DB @@ -192,7 +192,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(const ASTDropQuery & query, Dat #if USE_LIBPQXX if (table->getName() == "PostgreSQLReplica") - table->as()->shutdownFinal(); + table->as()->shutdownFinal(); #endif TableExclusiveLockHolder table_lock; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp similarity index 91% rename from src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp rename to src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 91e48e9c358..8e7dc81647e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -1,7 +1,7 @@ -#include "PostgreSQLReplicaConsumer.h" +#include "MaterializePostgreSQLConsumer.h" #if USE_LIBPQXX -#include "StoragePostgreSQLReplica.h" +#include "StorageMaterializePostgreSQL.h" #include #include @@ -22,7 +22,7 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; } -PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( +MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( std::shared_ptr context_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, @@ -52,7 +52,7 @@ PostgreSQLReplicaConsumer::PostgreSQLReplicaConsumer( } -void PostgreSQLReplicaConsumer::Buffer::fillBuffer(StoragePtr storage) +void MaterializePostgreSQLConsumer::Buffer::fillBuffer(StoragePtr storage) { const auto storage_metadata = storage->getInMemoryMetadataPtr(); description.init(storage_metadata->getSampleBlock()); @@ -77,7 +77,7 @@ void PostgreSQLReplicaConsumer::Buffer::fillBuffer(StoragePtr storage) } -void PostgreSQLReplicaConsumer::readMetadata() +void MaterializePostgreSQLConsumer::readMetadata() { try { @@ -98,7 +98,7 @@ void PostgreSQLReplicaConsumer::readMetadata() } -void PostgreSQLReplicaConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) +void MaterializePostgreSQLConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); bool is_nullable = buffer.description.types[column_idx].second; @@ -124,14 +124,14 @@ void PostgreSQLReplicaConsumer::insertValue(Buffer & buffer, const std::string & } -void PostgreSQLReplicaConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx) +void MaterializePostgreSQLConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column); } -void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, size_t size, String & result) +void MaterializePostgreSQLConsumer::readString(const char * message, size_t & pos, size_t size, String & result) { assert(size > pos + 2); char current = unhex2(message + pos); @@ -145,7 +145,7 @@ void PostgreSQLReplicaConsumer::readString(const char * message, size_t & pos, s } -Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 8); Int32 result = (UInt32(unhex2(message + pos)) << 24) @@ -157,7 +157,7 @@ Int32 PostgreSQLReplicaConsumer::readInt32(const char * message, size_t & pos, [ } -Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 4); Int16 result = (UInt32(unhex2(message + pos)) << 8) @@ -167,7 +167,7 @@ Int16 PostgreSQLReplicaConsumer::readInt16(const char * message, size_t & pos, [ } -Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int8 MaterializePostgreSQLConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 2); Int8 result = unhex2(message + pos); @@ -176,7 +176,7 @@ Int8 PostgreSQLReplicaConsumer::readInt8(const char * message, size_t & pos, [[m } -Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 16); Int64 result = (UInt64(unhex4(message + pos)) << 48) @@ -188,7 +188,7 @@ Int64 PostgreSQLReplicaConsumer::readInt64(const char * message, size_t & pos, [ } -void PostgreSQLReplicaConsumer::readTupleData( +void MaterializePostgreSQLConsumer::readTupleData( Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos, size); @@ -257,7 +257,7 @@ void PostgreSQLReplicaConsumer::readTupleData( /// https://www.postgresql.org/docs/13/protocol-logicalrep-message-formats.html -void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replication_message, size_t size) +void MaterializePostgreSQLConsumer::processReplicationMessage(const char * replication_message, size_t size) { /// Skip '\x' size_t pos = 2; @@ -468,7 +468,7 @@ void PostgreSQLReplicaConsumer::processReplicationMessage(const char * replicati } -void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr tx) +void MaterializePostgreSQLConsumer::syncTables(std::shared_ptr tx) { for (const auto & table_name : tables_to_sync) { @@ -517,7 +517,7 @@ void PostgreSQLReplicaConsumer::syncTables(std::shared_ptr } -String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr tx) +String MaterializePostgreSQLConsumer::advanceLSN(std::shared_ptr tx) { std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn); pqxx::result result{tx->exec(query_str)}; @@ -529,7 +529,7 @@ String PostgreSQLReplicaConsumer::advanceLSN(std::shared_ptr tx; bool slot_empty = true; @@ -640,7 +640,7 @@ bool PostgreSQLReplicaConsumer::readFromReplicationSlot() } -bool PostgreSQLReplicaConsumer::consume(std::vector> & skipped_tables) +bool MaterializePostgreSQLConsumer::consume(std::vector> & skipped_tables) { if (!readFromReplicationSlot()) { @@ -660,7 +660,7 @@ bool PostgreSQLReplicaConsumer::consume(std::vector> & } -void PostgreSQLReplicaConsumer::updateNested(const String & table_name, StoragePtr nested_storage) +void MaterializePostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage) { storages[table_name] = nested_storage; auto & buffer = buffers.find(table_name)->second; @@ -668,7 +668,7 @@ void PostgreSQLReplicaConsumer::updateNested(const String & table_name, StorageP } -void PostgreSQLReplicaConsumer::updateSkipList(const std::unordered_map & tables_with_lsn) +void MaterializePostgreSQLConsumer::updateSkipList(const std::unordered_map & tables_with_lsn) { for (const auto & [relation_id, lsn] : tables_with_lsn) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h similarity index 97% rename from src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h rename to src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 37b36d4bfc5..74dd5d67ccf 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -6,7 +6,7 @@ #if USE_LIBPQXX #include "PostgreSQLConnection.h" -#include "PostgreSQLReplicaMetadata.h" +#include "MaterializePostgreSQLMetadata.h" #include "insertPostgreSQLValue.h" #include @@ -21,12 +21,12 @@ namespace DB { -class PostgreSQLReplicaConsumer +class MaterializePostgreSQLConsumer { public: using Storages = std::unordered_map; - PostgreSQLReplicaConsumer( + MaterializePostgreSQLConsumer( std::shared_ptr context_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, @@ -103,7 +103,7 @@ private: std::shared_ptr context; const std::string replication_slot_name, publication_name; - PostgreSQLReplicaMetadata metadata; + MaterializePostgreSQLMetadata metadata; PostgreSQLConnectionPtr connection; std::string current_lsn, final_lsn; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp similarity index 79% rename from src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp rename to src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp index ad9ef4b22d3..5cc68b429c0 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp @@ -1,4 +1,4 @@ -#include "PostgreSQLReplicaMetadata.h" +#include "MaterializePostgreSQLMetadata.h" #if USE_LIBPQXX #include @@ -12,7 +12,7 @@ namespace DB { -PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadata_file_path) +MaterializePostgreSQLMetadata::MaterializePostgreSQLMetadata(const std::string & metadata_file_path) : metadata_file(metadata_file_path) , tmp_metadata_file(metadata_file_path + ".tmp") , last_version(1) @@ -20,7 +20,7 @@ PostgreSQLReplicaMetadata::PostgreSQLReplicaMetadata(const std::string & metadat } -void PostgreSQLReplicaMetadata::readMetadata() +void MaterializePostgreSQLMetadata::readMetadata() { if (Poco::File(metadata_file).exists()) { @@ -41,13 +41,13 @@ void PostgreSQLReplicaMetadata::readMetadata() last_lsn = actual_lsn; } - LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + LOG_DEBUG(&Poco::Logger::get("MaterializePostgreSQLMetadata"), "Last written version is {}. (From metadata file {})", last_version, metadata_file); } } -void PostgreSQLReplicaMetadata::writeMetadata(bool append_metadata) +void MaterializePostgreSQLMetadata::writeMetadata(bool append_metadata) { WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); @@ -69,7 +69,7 @@ void PostgreSQLReplicaMetadata::writeMetadata(bool append_metadata) /// While data is received, version is updated. Before table sync, write last version to tmp file. /// Then sync data to table and rename tmp to non-tmp. -void PostgreSQLReplicaMetadata::commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc) +void MaterializePostgreSQLMetadata::commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc) { std::string actual_lsn; last_lsn = lsn; @@ -90,7 +90,7 @@ void PostgreSQLReplicaMetadata::commitMetadata(std::string & lsn, const std::fun if (actual_lsn != last_lsn) { writeMetadata(true); - LOG_WARNING(&Poco::Logger::get("PostgreSQLReplicaMetadata"), + LOG_WARNING(&Poco::Logger::get("MaterializePostgreSQLMetadata"), "Last written LSN {} is not equal to actual LSN {}", last_lsn, actual_lsn); } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h similarity index 81% rename from src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h rename to src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h index f7e566cce90..d09adb61363 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaMetadata.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h @@ -5,10 +5,10 @@ namespace DB { -class PostgreSQLReplicaMetadata +class MaterializePostgreSQLMetadata { public: - PostgreSQLReplicaMetadata(const std::string & metadata_file_path); + MaterializePostgreSQLMetadata(const std::string & metadata_file_path); void commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp similarity index 77% rename from src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp rename to src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp index dc714cb5488..48fe61b4182 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp @@ -1,4 +1,4 @@ -#include "PostgreSQLReplicaSettings.h" +#include "MaterializePostgreSQLSettings.h" #if USE_LIBPQXX #include @@ -15,9 +15,9 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -IMPLEMENT_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) +IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) -void PostgreSQLReplicaSettings::loadFromQuery(ASTStorage & storage_def) +void MaterializePostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) { if (storage_def.settings) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h similarity index 71% rename from src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h rename to src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h index 0f084ac6108..0df618f513e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaSettings.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h @@ -15,12 +15,12 @@ namespace DB #define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ M(UInt64, postgresql_replica_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ - M(String, postgresql_replica_tables_list, "", "List of tables for PostgreSQLReplica database engine", 0) \ + M(String, postgresql_replica_tables_list, "", "List of tables for MaterializePostgreSQL database engine", 0) \ M(Bool, postgresql_replica_allow_minimal_ddl, 0, "Allow to track minimal possible ddl. By default, table after ddl will get into a skip list", 0) \ -DECLARE_SETTINGS_TRAITS(PostgreSQLReplicaSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) +DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) -struct PostgreSQLReplicaSettings : public BaseSettings +struct MaterializePostgreSQLSettings : public BaseSettings { void loadFromQuery(ASTStorage & storage_def); }; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index ee15c1ec13d..9e7364c9bb6 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -3,7 +3,7 @@ #if USE_LIBPQXX #include #include -#include +#include #include #include @@ -27,7 +27,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( bool allow_minimal_ddl_, bool is_postgresql_replica_database_engine_, const String tables_list_) - : log(&Poco::Logger::get("PostgreSQLReplicaHandler")) + : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) , database_name(database_name_) , connection_str(conn_str) @@ -49,7 +49,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } -void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StoragePostgreSQLReplica * storage) +void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage) { storages[table_name] = storage; } @@ -138,7 +138,7 @@ void PostgreSQLReplicationHandler::startSynchronization() ntx->commit(); - consumer = std::make_shared( + consumer = std::make_shared( context, connection, replication_slot, diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 7b9605be5dc..12fbd782887 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -6,8 +6,8 @@ #if USE_LIBPQXX #include "PostgreSQLConnection.h" -#include "PostgreSQLReplicaConsumer.h" -#include "PostgreSQLReplicaMetadata.h" +#include "MaterializePostgreSQLConsumer.h" +#include "MaterializePostgreSQLMetadata.h" #include @@ -19,7 +19,7 @@ namespace DB /// exist in CH, it can be loaded via snapshot while stream is stopped and then comparing wal positions with /// current lsn and table start lsn. -class StoragePostgreSQLReplica; +class StorageMaterializePostgreSQL; class PostgreSQLReplicationHandler { @@ -41,7 +41,7 @@ public: void shutdownFinal(); - void addStorage(const std::string & table_name, StoragePostgreSQLReplica * storage); + void addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage); NameSet fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); @@ -49,7 +49,7 @@ public: private: using NontransactionPtr = std::shared_ptr; - using Storages = std::unordered_map; + using Storages = std::unordered_map; bool isPublicationExist(std::shared_ptr tx); @@ -83,7 +83,7 @@ private: std::string tables_list, replication_slot, publication_name; PostgreSQLConnectionPtr connection; - std::shared_ptr consumer; + std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task, consumer_task; std::atomic tables_loaded = false, stop_synchronization = false; @@ -96,4 +96,3 @@ private: } #endif - diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp similarity index 87% rename from src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp rename to src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index dbacc995c67..f239b9e78d4 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -1,4 +1,4 @@ -#include "StoragePostgreSQLReplica.h" +#include "StorageMaterializePostgreSQL.h" #if USE_LIBPQXX #include @@ -36,20 +36,20 @@ namespace ErrorCodes static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; -StoragePostgreSQLReplica::StoragePostgreSQLReplica( +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, const String & remote_database_name, const String & remote_table_name_, const String & connection_str, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - std::unique_ptr replication_settings_) + std::unique_ptr replication_settings_) : IStorage(table_id_) , remote_table_name(remote_table_name_) , global_context(std::make_shared(context_.getGlobalContext())) , replication_settings(std::move(replication_settings_)) , is_postgresql_replica_database( - DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "PostgreSQLReplica") + DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") { setInMemoryMetadata(storage_metadata); @@ -68,7 +68,7 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( } -StoragePostgreSQLReplica::StoragePostgreSQLReplica( +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, StoragePtr nested_storage_, const Context & context_) @@ -76,13 +76,13 @@ StoragePostgreSQLReplica::StoragePostgreSQLReplica( , global_context(std::make_shared(context_)) , nested_storage(nested_storage_) , is_postgresql_replica_database( - DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "PostgreSQLReplica") + DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") { } -std::string StoragePostgreSQLReplica::getNestedTableName() const +std::string StorageMaterializePostgreSQL::getNestedTableName() const { auto table_name = getStorageID().table_name; @@ -93,7 +93,7 @@ std::string StoragePostgreSQLReplica::getNestedTableName() const } -std::shared_ptr StoragePostgreSQLReplica::getMaterializedColumnsDeclaration( +std::shared_ptr StorageMaterializePostgreSQL::getMaterializedColumnsDeclaration( const String name, const String type, UInt64 default_value) { auto column_declaration = std::make_shared(); @@ -111,7 +111,7 @@ std::shared_ptr StoragePostgreSQLReplica::getMaterializedC } -ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_type) const +ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) const { WhichDataType which(data_type); @@ -152,10 +152,10 @@ ASTPtr StoragePostgreSQLReplica::getColumnDeclaration(const DataTypePtr & data_t } -/// For single storage PostgreSQLReplica get columns and primary key columns from storage definition. -/// For database engine PostgreSQLReplica get columns and primary key columns by fetching from PostgreSQL, also using the same +/// For single storage MaterializePostgreSQL get columns and primary key columns from storage definition. +/// For database engine MaterializePostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same /// transaction with snapshot, which is used for initial tables dump. -ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function & fetch_table_structure) +ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(const std::function & fetch_table_structure) { auto create_table_query = std::make_shared(); @@ -240,7 +240,7 @@ ASTPtr StoragePostgreSQLReplica::getCreateNestedTableQuery(const std::function

& fetch_table_structure) +void StorageMaterializePostgreSQL::createNestedIfNeeded(const std::function & fetch_table_structure) { if (nested_loaded) { @@ -267,7 +267,7 @@ void StoragePostgreSQLReplica::createNestedIfNeeded(const std::functionshutdown(); } -void StoragePostgreSQLReplica::shutdownFinal() +void StorageMaterializePostgreSQL::shutdownFinal() { if (is_postgresql_replica_database) return; @@ -333,7 +333,7 @@ void StoragePostgreSQLReplica::shutdownFinal() } -void StoragePostgreSQLReplica::dropNested() +void StorageMaterializePostgreSQL::dropNested() { std::lock_guard lock(nested_mutex); nested_loaded = false; @@ -351,11 +351,11 @@ void StoragePostgreSQLReplica::dropNested() interpreter.execute(); nested_storage = nullptr; - LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Dropped (or temporarily) nested table {}", getNestedTableName()); + LOG_WARNING(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Dropped (or temporarily) nested table {}", getNestedTableName()); } -NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const +NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const { if (nested_storage) return nested_storage->getVirtuals(); @@ -364,7 +364,7 @@ NamesAndTypesList StoragePostgreSQLReplica::getVirtuals() const } -Pipe StoragePostgreSQLReplica::read( +Pipe StorageMaterializePostgreSQL::read( const Names & column_names, const StorageMetadataPtr & /* metadata_snapshot */, SelectQueryInfo & query_info, @@ -442,24 +442,24 @@ Pipe StoragePostgreSQLReplica::read( return pipe; } - LOG_WARNING(&Poco::Logger::get("StoragePostgreSQLReplica"), "Nested table {} is unavailable or is not loaded yet", getNestedTableName()); + LOG_WARNING(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Nested table {} is unavailable or is not loaded yet", getNestedTableName()); return Pipe(); } -void registerStoragePostgreSQLReplica(StorageFactory & factory) +void registerStorageMaterializePostgreSQL(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; bool has_settings = args.storage_def->settings; - auto postgresql_replication_settings = std::make_unique(); + auto postgresql_replication_settings = std::make_unique(); if (has_settings) postgresql_replication_settings->loadFromQuery(*args.storage_def); if (engine_args.size() != 5) - throw Exception("Storage PostgreSQLReplica requires 5 parameters: " + throw Exception("Storage MaterializePostgreSQL requires 5 parameters: " "PostgreSQL('host:port', 'database', 'table', 'username', 'password'", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); @@ -474,7 +474,7 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) args.storage_def->set(args.storage_def->order_by, args.storage_def->primary_key->clone()); if (!args.storage_def->order_by) - throw Exception("Storage PostgreSQLReplica needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); + throw Exception("Storage MaterializePostgreSQL needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); if (args.storage_def->primary_key) metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.context); @@ -493,14 +493,14 @@ void registerStoragePostgreSQLReplica(StorageFactory & factory) engine_args[3]->as().value.safeGet(), engine_args[4]->as().value.safeGet()); - return StoragePostgreSQLReplica::create( + return StorageMaterializePostgreSQL::create( args.table_id, remote_database, remote_table, connection.conn_str(), metadata, args.context, std::move(postgresql_replication_settings)); }; factory.registerStorage( - "PostgreSQLReplica", + "MaterializePostgreSQL", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, diff --git a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h similarity index 83% rename from src/Storages/PostgreSQL/StoragePostgreSQLReplica.h rename to src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 4d407f337ad..544bae1bf85 100644 --- a/src/Storages/PostgreSQL/StoragePostgreSQLReplica.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -6,7 +6,7 @@ #if USE_LIBPQXX #include "PostgreSQLReplicationHandler.h" -#include "PostgreSQLReplicaSettings.h" +#include "MaterializePostgreSQLSettings.h" #include #include @@ -24,17 +24,17 @@ namespace DB { -class StoragePostgreSQLReplica final : public ext::shared_ptr_helper, public IStorage +class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage { - friend struct ext::shared_ptr_helper; + friend struct ext::shared_ptr_helper; public: - StoragePostgreSQLReplica( + StorageMaterializePostgreSQL( const StorageID & table_id_, StoragePtr nested_storage_, const Context & context_); - String getName() const override { return "PostgreSQLReplica"; } + String getName() const override { return "MaterializePostgreSQL"; } void startup() override; void shutdown() override; @@ -70,14 +70,14 @@ public: void dropNested(); protected: - StoragePostgreSQLReplica( + StorageMaterializePostgreSQL( const StorageID & table_id_, const String & remote_database_name, const String & remote_table_name, const String & connection_str, const StorageInMemoryMetadata & storage_metadata, const Context & context_, - std::unique_ptr replication_settings_); + std::unique_ptr replication_settings_); private: static std::shared_ptr getMaterializedColumnsDeclaration( @@ -92,7 +92,7 @@ private: std::string remote_table_name; std::shared_ptr global_context; - std::unique_ptr replication_settings; + std::unique_ptr replication_settings; std::unique_ptr replication_handler; std::atomic nested_loaded = false; diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 0f5a3acaa86..bd32de1c315 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -60,7 +60,7 @@ void registerStorageEmbeddedRocksDB(StorageFactory & factory); #if USE_LIBPQXX void registerStoragePostgreSQL(StorageFactory & factory); -void registerStoragePostgreSQLReplica(StorageFactory & factory); +void registerStorageMaterializePostgreSQL(StorageFactory & factory); #endif void registerStorages() @@ -118,7 +118,7 @@ void registerStorages() #if USE_LIBPQXX registerStoragePostgreSQL(factory); - registerStoragePostgreSQLReplica(factory); + registerStorageMaterializePostgreSQL(factory); #endif } From e722cee3750a77b73519e2614b722ee2c4492aa8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 17 Mar 2021 10:52:56 +0000 Subject: [PATCH 052/931] Better templates --- .../PostgreSQLBlockInputStream.cpp | 28 ++++++------------- .../PostgreSQLDictionarySource.cpp | 15 ++++++---- src/Dictionaries/PostgreSQLDictionarySource.h | 1 + 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index 7e9aa40e904..495d08fc831 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -23,25 +23,9 @@ namespace DB { -template<> -PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( - std::shared_ptr tx_, - const std::string & query_str_, - const Block & sample_block, - const UInt64 max_block_size_, - bool auto_commit_) - : query_str(query_str_) - , max_block_size(max_block_size_) - , auto_commit(auto_commit_) - , tx(tx_) -{ - description.init(sample_block); -} - - -template<> -PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( - std::shared_ptr tx_, +template +PostgreSQLBlockInputStream::PostgreSQLBlockInputStream( + std::shared_ptr tx_, const std::string & query_str_, const Block & sample_block, const UInt64 max_block_size_, @@ -138,6 +122,12 @@ void PostgreSQLBlockInputStream::readSuffix() } } +template +class PostgreSQLBlockInputStream; + +template +class PostgreSQLBlockInputStream; + } #endif diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 954e5f4e187..5c660f1a5f5 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -68,8 +68,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar BlockInputStreamPtr PostgreSQLDictionarySource::loadAll() { LOG_TRACE(log, load_all_query); - auto tx = std::make_shared(*connection->conn()); - return std::make_shared>(tx, load_all_query, sample_block, max_block_size); + return loadBase(load_all_query); } @@ -77,21 +76,25 @@ BlockInputStreamPtr PostgreSQLDictionarySource::loadUpdatedAll() { auto load_update_query = getUpdateFieldAndDate(); LOG_TRACE(log, load_update_query); - auto tx = std::make_shared(*connection->conn()); - return std::make_shared>(tx, load_update_query, sample_block, max_block_size); + return loadBase(load_update_query); } BlockInputStreamPtr PostgreSQLDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); - auto tx = std::make_shared(*connection->conn()); - return std::make_shared>(tx, query, sample_block, max_block_size); + return loadBase(query); } BlockInputStreamPtr PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); + return loadBase(query); +} + + +BlockInputStreamPtr PostgreSQLDictionarySource::loadBase(const String & query) +{ auto tx = std::make_shared(*connection->conn()); return std::make_shared>(tx, query, sample_block, max_block_size); } diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index a826ff15f4f..f3d6fc15e05 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -37,6 +37,7 @@ public: BlockInputStreamPtr loadUpdatedAll() override; BlockInputStreamPtr loadIds(const std::vector & ids) override; BlockInputStreamPtr loadKeys(const Columns & key_columns, const std::vector & requested_rows) override; + BlockInputStreamPtr loadBase(const String & query); bool isModified() const override; bool supportsSelectiveLoad() const override; From 2dc006acf11eb1335122a537c201e562393e24b2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 17 Mar 2021 12:35:02 +0000 Subject: [PATCH 053/931] Rename part 2 --- src/Interpreters/InterpreterCreateQuery.cpp | 4 ++-- src/Interpreters/InterpreterDropQuery.cpp | 2 +- .../test.py | 12 ++++++------ .../test_storage_postgresql_replica/test.py | 18 +++++++++--------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f3e1dc23447..ee918d29281 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -215,9 +215,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) "Enable allow_experimental_database_replicated to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } - if (create.storage->engine->name == "PostgreSQLReplica" && !context.getSettingsRef().allow_experimental_database_postgresql_replica && !internal) + if (create.storage->engine->name == "MaterializePostgreSQL" && !context.getSettingsRef().allow_experimental_database_postgresql_replica && !internal) { - throw Exception("PostgreSQLReplica is an experimental database engine. " + throw Exception("MaterializePostgreSQL is an experimental database engine. " "Enable allow_experimental_database_postgresql_replica to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 1a282b6ef5d..d2a43629ce4 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -191,7 +191,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(const ASTDropQuery & query, Dat table->shutdown(); #if USE_LIBPQXX - if (table->getName() == "PostgreSQLReplica") + if (table->getName() == "MaterializePostgreSQL") table->as()->shutdownFinal(); #endif diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 9d5686175ab..8611b228392 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -100,7 +100,7 @@ def test_load_and_sync_all_database_tables(started_cluster): create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name)) - instance.query("CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + instance.query("CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") assert 'test_database' in instance.query('SHOW DATABASES') for i in range(NUM_TABLES): @@ -127,7 +127,7 @@ def test_replicating_dml(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) instance.query( - "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) @@ -188,7 +188,7 @@ def test_different_data_types(started_cluster): )''') instance.query( - "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") for i in range(10): instance.query(''' @@ -262,7 +262,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query(''' CREATE DATABASE test_database - ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') SETTINGS postgresql_replica_tables_list = '{}'; '''.format(publication_tables)) assert 'test_database' in instance.query('SHOW DATABASES') @@ -308,7 +308,7 @@ def test_table_schema_changes(started_cluster): instance.query( """CREATE DATABASE test_database - ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') SETTINGS postgresql_replica_allow_minimal_ddl = 1; """) @@ -354,7 +354,7 @@ def test_changing_replica_identity_value(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)") instance.query( - "CREATE DATABASE test_database ENGINE = PostgreSQLReplica('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)") check_tables_are_synchronized('postgresql_replica'); diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 2ca07092607..4a7a6592873 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -79,7 +79,7 @@ def test_initial_load_from_snapshot(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -103,7 +103,7 @@ def test_no_connection_at_startup(started_cluster): started_cluster.pause_container('postgres1') instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -129,7 +129,7 @@ def test_detach_attach_is_ok(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -164,7 +164,7 @@ def test_replicating_insert_queries(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -205,7 +205,7 @@ def test_replicating_delete_queries(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -243,7 +243,7 @@ def test_replicating_update_queries(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -273,7 +273,7 @@ def test_resume_from_written_version(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -319,7 +319,7 @@ def test_many_replication_messages(started_cluster): _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1, PRIMARY KEY(key)) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') SETTINGS postgresql_replica_max_block_size = 50000; ''') @@ -373,7 +373,7 @@ def test_connection_loss(started_cluster): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) - ENGINE = PostgreSQLReplica( + ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') From 6bb81630e5a9c37a31f9b791bec859a4b776454d Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 10:32:11 +0000 Subject: [PATCH 054/931] Small fixes --- .../DatabaseMaterializePostgreSQL.cpp | 43 +++++++------------ .../DatabaseMaterializePostgreSQL.h | 7 +-- .../fetchPostgreSQLTableStructure.cpp | 3 +- .../MaterializePostgreSQLSettings.h | 2 +- .../StorageMaterializePostgreSQL.cpp | 4 +- .../TableFunctionPostgreSQL.cpp | 1 - 6 files changed, 21 insertions(+), 39 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 62c00dfd4a2..98ddb856a31 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -27,6 +27,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; template<> @@ -83,9 +88,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() connection->conn_str(), metadata_path + METADATA_SUFFIX, std::make_shared(global_context), - settings->postgresql_replica_max_block_size.changed - ? settings->postgresql_replica_max_block_size.value - : (global_context.getSettingsRef().max_insert_block_size.value), + settings->postgresql_replica_max_block_size.value, settings->postgresql_replica_allow_minimal_ddl, true, settings->postgresql_replica_tables_list.value); @@ -93,13 +96,15 @@ void DatabaseMaterializePostgreSQL::startSynchronization() for (const auto & table_name : tables_to_replicate) { - auto storage = getStorage(table_name); + auto storage = tryGetTable(table_name, global_context); - if (storage) + if (!storage) { - replication_handler->addStorage(table_name, storage->template as()); - tables[table_name] = storage; + storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), StoragePtr{}, global_context); } + + replication_handler->addStorage(table_name, storage->template as()); + tables[table_name] = storage; } LOG_TRACE(log, "Loaded {} tables. Starting synchronization", tables.size()); @@ -107,18 +112,6 @@ void DatabaseMaterializePostgreSQL::startSynchronization() } -template -StoragePtr DatabaseMaterializePostgreSQL::getStorage(const String & name) -{ - auto storage = tryGetTable(name, global_context); - - if (storage) - return storage; - - return StorageMaterializePostgreSQL::create(StorageID(database_name, name), StoragePtr{}, global_context); -} - - template void DatabaseMaterializePostgreSQL::shutdown() { @@ -151,6 +144,8 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects( template StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const Context & context) const { + /// When a nested ReplacingMergeTree table is managed from PostgreSQLReplicationHandler, its context is modified + /// to show the type of managed table. if (context.hasQueryContext()) { auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; @@ -183,14 +178,8 @@ void DatabaseMaterializePostgreSQL::createTable(const Context & context, c } } - LOG_WARNING(log, "Create table query allowed only for ReplacingMergeTree engine and from synchronization thread"); -} - - -template -void DatabaseMaterializePostgreSQL::dropTable(const Context & context, const String & name, bool no_delay) -{ - Base::dropTable(context, name, no_delay); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Create table query allowed only for ReplacingMergeTree engine and from synchronization thread"); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index fd6d5982fdf..b6a1e70169b 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -52,8 +52,6 @@ public: void createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) override; - void dropTable(const Context & context, const String & name, bool no_delay) override; - void drop(const Context & context) override; void shutdown() override; @@ -61,12 +59,11 @@ public: private: void startSynchronization(); - StoragePtr getStorage(const String & name); - Poco::Logger * log; - const Context global_context; + const Context & global_context; String metadata_path; ASTPtr database_engine_define; + String database_name, remote_database_name; PostgreSQLConnectionPtr connection; std::unique_ptr settings; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 59f0ca62d75..e139914fcc8 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -141,8 +141,7 @@ std::shared_ptr readNamesAndTypesList( } catch (const pqxx::undefined_table &) { - throw Exception(fmt::format( - "PostgreSQL table {} does not exist", postgres_table_name), ErrorCodes::UNKNOWN_TABLE); + throw Exception(ErrorCodes::UNKNOWN_TABLE, "PostgreSQL table {} does not exist", postgres_table_name); } catch (Exception & e) { diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h index 0df618f513e..a2ad76ceaeb 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h @@ -14,7 +14,7 @@ namespace DB #define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ - M(UInt64, postgresql_replica_max_block_size, 0, "Number of row collected before flushing data into table.", 0) \ + M(UInt64, postgresql_replica_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ M(String, postgresql_replica_tables_list, "", "List of tables for MaterializePostgreSQL database engine", 0) \ M(Bool, postgresql_replica_allow_minimal_ddl, 0, "Allow to track minimal possible ddl. By default, table after ddl will get into a skip list", 0) \ diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index f239b9e78d4..c1ba5b66679 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -61,9 +61,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( connection_str, metadata_path, global_context, - replication_settings->postgresql_replica_max_block_size.changed - ? replication_settings->postgresql_replica_max_block_size.value - : global_context->getSettingsRef().max_insert_block_size.value, + replication_settings->postgresql_replica_max_block_size.value, replication_settings->postgresql_replica_allow_minimal_ddl.value, false); } diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index d243594b726..ba9a3778742 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -12,7 +12,6 @@ #include #include #include "registerTableFunctions.h" -#include #include #include From 4e63b8e5dd3c738a6a748287e300f058a71d676a Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sat, 20 Mar 2021 13:50:16 +0300 Subject: [PATCH 055/931] Update src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp Co-authored-by: tavplubix --- .../PostgreSQLReplicationHandler.cpp | 23 +++++++------------ 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 9e7364c9bb6..8033fbf3e45 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -217,26 +217,19 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na void PostgreSQLReplicationHandler::consumerFunc() { - auto start_time = std::chrono::steady_clock::now(); std::vector> skipped_tables; - while (!stop_synchronization) - { - bool reschedule = !consumer->consume(skipped_tables); + bool schedule_now = consumer->consume(skipped_tables); - if (!skipped_tables.empty()) - consumer->updateSkipList(reloadFromSnapshot(skipped_tables)); + if (!skipped_tables.empty()) + consumer->updateSkipList(reloadFromSnapshot(skipped_tables)); - if (reschedule) - break; + if (stop_synchronization) + return; - auto end_time = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast(end_time - start_time); - if (duration.count() > max_thread_work_duration_ms) - break; - } - - if (!stop_synchronization) + if (schedule_now) + consumer_task->schedule(); + else consumer_task->scheduleAfter(reschedule_ms); } From caffccd73e068ae2aea43a447426aee217694d39 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 11:53:41 +0000 Subject: [PATCH 056/931] Fixes --- src/DataStreams/PostgreSQLBlockInputStream.h | 1 + .../DatabaseMaterializePostgreSQL.cpp | 2 +- .../MaterializePostgreSQLConsumer.cpp | 13 ++--- .../MaterializePostgreSQLConsumer.h | 4 +- .../PostgreSQLReplicationHandler.cpp | 47 +++++++++---------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 5 +- .../StorageMaterializePostgreSQL.cpp | 6 +-- .../PostgreSQL/StorageMaterializePostgreSQL.h | 2 +- 8 files changed, 38 insertions(+), 42 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index a558a46d153..32eaf25fae5 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -20,6 +20,7 @@ namespace DB template class PostgreSQLBlockInputStream : public IBlockInputStream { + public: PostgreSQLBlockInputStream( std::shared_ptr tx_, diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 98ddb856a31..0da021fd3c8 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -87,7 +87,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() remote_database_name, connection->conn_str(), metadata_path + METADATA_SUFFIX, - std::make_shared(global_context), + global_context, settings->postgresql_replica_max_block_size.value, settings->postgresql_replica_allow_minimal_ddl, true, settings->postgresql_replica_tables_list.value); diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 8e7dc81647e..6448d71adf2 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes } MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( - std::shared_ptr context_, + const Context & context_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, @@ -358,10 +358,11 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli } case 'C': // Commit { - readInt8(replication_message, pos, size); /// unused flags - readInt64(replication_message, pos, size); /// Int64 commit lsn - readInt64(replication_message, pos, size); /// Int64 transaction end lsn - readInt64(replication_message, pos, size); /// Int64 transaction commit timestamp + constexpr size_t unused_flags_len = 1; + constexpr size_t commit_lsn_len = 8; + constexpr size_t transaction_end_lsn_len = 8; + constexpr size_t transaction_commit_timestamp_len = 8; + pos += unused_flags_len + commit_lsn_len + transaction_end_lsn_len + transaction_commit_timestamp_len; final_lsn = current_lsn; LOG_DEBUG(log, "Commit lsn: {}", getLSNValue(current_lsn)); /// Will be removed @@ -487,7 +488,7 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptrtable_id = storage->getStorageID(); insert->columns = buffer.columnsAST; - auto insert_context(*context); + auto insert_context(context); insert_context.makeQueryContext(); insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 74dd5d67ccf..97bf926d267 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -27,7 +27,7 @@ public: using Storages = std::unordered_map; MaterializePostgreSQLConsumer( - std::shared_ptr context_, + const Context & context_, PostgreSQLConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, @@ -100,7 +100,7 @@ private: } Poco::Logger * log; - std::shared_ptr context; + const Context & context; const std::string replication_slot_name, publication_name; MaterializePostgreSQLMetadata metadata; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 8033fbf3e45..2752587f9bd 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -15,14 +15,13 @@ namespace DB { static const auto reschedule_ms = 500; -static const auto max_thread_work_duration_ms = 60000; PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & conn_str, const std::string & metadata_path_, - std::shared_ptr context_, + const Context & context_, const size_t max_block_size_, bool allow_minimal_ddl_, bool is_postgresql_replica_database_engine_, @@ -41,11 +40,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( replication_slot = fmt::format("{}_ch_replication_slot", database_name); publication_name = fmt::format("{}_ch_publication", database_name); - startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); - startup_task->deactivate(); - - consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); - consumer_task->deactivate(); + startup_task = context.getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); + consumer_task = context.getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); } @@ -87,6 +83,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() void PostgreSQLReplicationHandler::shutdown() { stop_synchronization.store(true); + startup_task->deactivate(); consumer_task->deactivate(); } @@ -97,18 +94,18 @@ void PostgreSQLReplicationHandler::startSynchronization() auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection->conn_str())); replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - auto ntx = std::make_shared(*replication_connection->conn()); + auto tx = std::make_shared(*replication_connection->conn()); std::string snapshot_name, start_lsn; auto initial_sync = [&]() { - createReplicationSlot(ntx, start_lsn, snapshot_name); + createReplicationSlot(tx, start_lsn, snapshot_name); loadFromSnapshot(snapshot_name, storages); }; /// Replication slot should be deleted with drop table only and created only once, reused after detach. - if (!isReplicationSlotExist(ntx, replication_slot)) + if (!isReplicationSlotExist(tx, replication_slot)) { initial_sync(); } @@ -117,7 +114,7 @@ void PostgreSQLReplicationHandler::startSynchronization() /// In case of some failure, the following cases are possible (since publication and replication slot are reused): /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. /// 2. If created a new publication and replication slot existed before it was created, it is not ok. - dropReplicationSlot(ntx); + dropReplicationSlot(tx); initial_sync(); } else @@ -136,7 +133,7 @@ void PostgreSQLReplicationHandler::startSynchronization() } } - ntx->commit(); + tx->commit(); consumer = std::make_shared( context, @@ -151,8 +148,6 @@ void PostgreSQLReplicationHandler::startSynchronization() nested_storages); consumer_task->activateAndSchedule(); - - replication_connection->conn()->close(); } @@ -287,10 +282,10 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded( } -bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name) +bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr tx, std::string & slot_name) { std::string query_str = fmt::format("SELECT active, restart_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); - pqxx::result result{ntx->exec(query_str)}; + pqxx::result result{tx->exec(query_str)}; /// Replication slot does not exist if (result.empty()) @@ -305,7 +300,7 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr ntx, void PostgreSQLReplicationHandler::createReplicationSlot( - NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name, bool temporary) + NontransactionPtr tx, std::string & start_lsn, std::string & snapshot_name, bool temporary) { std::string query_str; @@ -319,7 +314,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot( try { - pqxx::result result{ntx->exec(query_str)}; + pqxx::result result{tx->exec(query_str)}; start_lsn = result[0][1].as(); snapshot_name = result[0][2].as(); LOG_TRACE(log, "Created replication slot: {}, start lsn: {}", replication_slot, start_lsn); @@ -332,7 +327,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot( } -void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, bool temporary) +void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr tx, bool temporary) { std::string slot_name; if (temporary) @@ -342,15 +337,15 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr ntx, bo std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); - ntx->exec(query_str); + tx->exec(query_str); LOG_TRACE(log, "Dropped replication slot: {}", slot_name); } -void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr ntx) +void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr tx) { std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); - ntx->exec(query_str); + tx->exec(query_str); } @@ -400,7 +395,7 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(PostgreSQLConne PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( std::shared_ptr tx, const std::string & table_name) { - auto use_nulls = context->getSettingsRef().external_table_functions_use_nulls; + auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; return fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true); } @@ -425,12 +420,12 @@ std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapsh auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection_str)); replication_connection->conn()->set_variable("default_transaction_isolation", "'repeatable read'"); - auto ntx = std::make_shared(*replication_connection->conn()); + auto r_tx = std::make_shared(*replication_connection->conn()); std::string snapshot_name, start_lsn; - createReplicationSlot(ntx, start_lsn, snapshot_name, true); + createReplicationSlot(r_tx, start_lsn, snapshot_name, true); /// This snapshot is valid up to the end of the transaction, which exported it. auto success_tables = loadFromSnapshot(snapshot_name, sync_storages); - ntx->commit(); + r_tx->commit(); for (const auto & relation : relation_data) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 12fbd782887..00ff0c3c488 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -24,12 +24,11 @@ class StorageMaterializePostgreSQL; class PostgreSQLReplicationHandler { public: - friend class PGReplicaLSN; PostgreSQLReplicationHandler( const std::string & database_name_, const std::string & conn_str_, const std::string & metadata_path_, - std::shared_ptr context_, + const Context & context_, const size_t max_block_size_, bool allow_minimal_ddl_, bool is_postgresql_replica_database_engine_, @@ -76,7 +75,7 @@ private: std::unordered_map reloadFromSnapshot(const std::vector> & relation_data); Poco::Logger * log; - std::shared_ptr context; + const Context & context; const std::string database_name, connection_str, metadata_path; const size_t max_block_size; bool allow_minimal_ddl, is_postgresql_replica_database_engine; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index c1ba5b66679..14af357a4ab 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -46,7 +46,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( std::unique_ptr replication_settings_) : IStorage(table_id_) , remote_table_name(remote_table_name_) - , global_context(std::make_shared(context_.getGlobalContext())) + , global_context(context_.getGlobalContext()) , replication_settings(std::move(replication_settings_)) , is_postgresql_replica_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") @@ -71,7 +71,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( StoragePtr nested_storage_, const Context & context_) : IStorage(table_id_) - , global_context(std::make_shared(context_)) + , global_context(context_) , nested_storage(nested_storage_) , is_postgresql_replica_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") @@ -267,7 +267,7 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(const std::function global_context; + const Context global_context; std::unique_ptr replication_settings; std::unique_ptr replication_handler; From 8091c8724dca603f7b40f19688775f94f6163741 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 12:15:32 +0000 Subject: [PATCH 057/931] Fix metadata file --- .../MaterializePostgreSQLMetadata.cpp | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp index 5cc68b429c0..ee945c67c1a 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp @@ -51,14 +51,14 @@ void MaterializePostgreSQLMetadata::writeMetadata(bool append_metadata) { WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); - if (!append_metadata) + if (append_metadata) { - writeString("\nLast version:\t" + toString(last_version), out); - writeString("\nLast LSN:\t" + toString(last_lsn), out); + writeString("\nActual LSN:\t" + toString(last_lsn), out); } else { - writeString("\nActual LSN:\t" + toString(last_lsn), out); + writeString("\nLast version:\t" + toString(last_version), out); + writeString("\nLast LSN:\t" + toString(last_lsn), out); } out.next(); @@ -78,6 +78,15 @@ void MaterializePostgreSQLMetadata::commitMetadata(std::string & lsn, const std: try { actual_lsn = finalizeStreamFunc(); + + /// This is not supposed to happen + if (actual_lsn != last_lsn) + { + writeMetadata(true); + LOG_WARNING(&Poco::Logger::get("MaterializePostgreSQLMetadata"), + "Last written LSN {} is not equal to actual LSN {}", last_lsn, actual_lsn); + } + Poco::File(tmp_metadata_file).renameTo(metadata_file); } catch (...) @@ -85,14 +94,6 @@ void MaterializePostgreSQLMetadata::commitMetadata(std::string & lsn, const std: Poco::File(tmp_metadata_file).remove(); throw; } - - /// This is not supposed to happen - if (actual_lsn != last_lsn) - { - writeMetadata(true); - LOG_WARNING(&Poco::Logger::get("MaterializePostgreSQLMetadata"), - "Last written LSN {} is not equal to actual LSN {}", last_lsn, actual_lsn); - } } } From 91b79f95bc5cad93b3bbe9edf45034de53a53a38 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 12:24:35 +0000 Subject: [PATCH 058/931] Better templates --- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 15 +++++---------- .../PostgreSQL/fetchPostgreSQLTableStructure.h | 3 ++- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index e139914fcc8..14004021401 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -154,7 +154,7 @@ std::shared_ptr readNamesAndTypesList( template -PostgreSQLTableStructure fetchPostgreSQLTableStructureImpl( +PostgreSQLTableStructure fetchPostgreSQLTableStructure( std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) { PostgreSQLTableStructure table; @@ -194,19 +194,14 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructureImpl( } +template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) -{ - return fetchPostgreSQLTableStructureImpl(tx, postgres_table_name, use_nulls, with_primary_key); -} + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key); -/// For the case when several operations are made on the transaction object before it can be used (like export snapshot and isolation level) +template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) -{ - return fetchPostgreSQLTableStructureImpl(tx, postgres_table_name, use_nulls, with_primary_key); -} + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key); PostgreSQLTableStructure fetchPostgreSQLTableStructure( diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index bf3c8ead422..485f1f1742e 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -23,8 +23,9 @@ struct PostgreSQLTableStructure PostgreSQLTableStructure fetchPostgreSQLTableStructure( PostgreSQLConnection::ConnectionPtr connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); +template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); } From 60fbeb83546bd7ea91641da245144fe24a5d5f6d Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 12:53:12 +0000 Subject: [PATCH 059/931] Better replica consumer code --- .../MaterializePostgreSQLConsumer.cpp | 32 +++++++++++-------- .../MaterializePostgreSQLConsumer.h | 4 +-- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 6448d71adf2..50278fe70aa 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -52,7 +52,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( } -void MaterializePostgreSQLConsumer::Buffer::fillBuffer(StoragePtr storage) +void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage) { const auto storage_metadata = storage->getInMemoryMetadataPtr(); description.init(storage_metadata->getSampleBlock()); @@ -281,9 +281,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli Int8 new_tuple = readInt8(replication_message, pos, size); const auto & table_name = relation_id_to_name[relation_id]; auto buffer = buffers.find(table_name); - - if (buffer == buffers.end()) - throw Exception(ErrorCodes::UNKNOWN_TABLE, "Buffer for table {} does not exist", table_name); + assert(buffer != buffers.end()); if (new_tuple) readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT); @@ -299,6 +297,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli const auto & table_name = relation_id_to_name[relation_id]; auto buffer = buffers.find(table_name); + assert(buffer != buffers.end()); auto proccess_identifier = [&](Int8 identifier) -> bool { @@ -352,6 +351,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli const auto & table_name = relation_id_to_name[relation_id]; auto buffer = buffers.find(table_name); + assert(buffer != buffers.end()); readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::DELETE); break; @@ -381,6 +381,16 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli if (!isSyncAllowed(relation_id)) return; + if (storages.find(relation_name) == storages.end()) + { + markTableAsSkipped(relation_id, relation_name); + LOG_ERROR(log, "Storage for table {} does not exist, but is included in replication stream", relation_name); + return; + } + + assert(buffers.count(relation_name)); + + /// 'd' - default (primary key if any) /// 'n' - nothing /// 'f' - all columns (set replica identity full) @@ -443,15 +453,6 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli } } - if (storages.find(relation_name) == storages.end()) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Storage for table {} does not exist, but is included in replication stream", relation_name); - } - - [[maybe_unused]] auto buffer_iter = buffers.find(relation_name); - assert(buffer_iter != buffers.end()); - tables_to_sync.insert(relation_name); break; @@ -530,6 +531,9 @@ String MaterializePostgreSQLConsumer::advanceLSN(std::shared_ptrsecond; - buffer.fillBuffer(nested_storage); + buffer.createEmptyBuffer(nested_storage); } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 97bf926d267..7cbfb8dd963 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -65,8 +65,8 @@ private: /// Needed for insertPostgreSQLValue() method to parse array std::unordered_map array_info; - Buffer(StoragePtr storage) { fillBuffer(storage); } - void fillBuffer(StoragePtr storage); + Buffer(StoragePtr storage) { createEmptyBuffer(storage); } + void createEmptyBuffer(StoragePtr storage); }; using Buffers = std::unordered_map; From 4c37ff3fbcf0aa53b099029bb9ec637bcfffeecc Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 13:13:30 +0000 Subject: [PATCH 060/931] Less lambdas --- .../fetchPostgreSQLTableStructure.h | 2 ++ .../PostgreSQLReplicationHandler.cpp | 9 ++++++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 ++-- .../StorageMaterializePostgreSQL.cpp | 20 +++++++++++-------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 4 ++-- 5 files changed, 24 insertions(+), 15 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 485f1f1742e..265f1f33234 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -20,6 +20,8 @@ struct PostgreSQLTableStructure std::shared_ptr primary_key_columns; }; +using PostgreSQLTableStructurePtr = std::unique_ptr; + PostgreSQLTableStructure fetchPostgreSQLTableStructure( PostgreSQLConnection::ConnectionPtr connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 2752587f9bd..62c334e6dda 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -167,7 +167,7 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); tx->exec(query_str); - storage_data.second->createNestedIfNeeded([&]() { return fetchTableStructure(tx, table_name); }); + storage_data.second->createNestedIfNeeded(fetchTableStructure(tx, table_name)); auto nested_storage = storage_data.second->getNested(); /// Load from snapshot, which will show table state before creation of replication slot. @@ -392,11 +392,14 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(PostgreSQLConne } -PostgreSQLTableStructure PostgreSQLReplicationHandler::fetchTableStructure( +PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( std::shared_ptr tx, const std::string & table_name) { + if (!is_postgresql_replica_database_engine) + return nullptr; + auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; - return fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true); + return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true)); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 00ff0c3c488..2b2245082f5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -44,8 +44,6 @@ public: NameSet fetchRequiredTables(PostgreSQLConnection::ConnectionPtr connection_); - PostgreSQLTableStructure fetchTableStructure(std::shared_ptr tx, const std::string & table_name); - private: using NontransactionPtr = std::shared_ptr; using Storages = std::unordered_map; @@ -74,6 +72,8 @@ private: std::unordered_map reloadFromSnapshot(const std::vector> & relation_data); + PostgreSQLTableStructurePtr fetchTableStructure(std::shared_ptr tx, const std::string & table_name); + Poco::Logger * log; const Context & context; const std::string database_name, connection_str, metadata_path; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 14af357a4ab..92ca0b6c2bf 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -153,7 +153,7 @@ ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & da /// For single storage MaterializePostgreSQL get columns and primary key columns from storage definition. /// For database engine MaterializePostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same /// transaction with snapshot, which is used for initial tables dump. -ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(const std::function & fetch_table_structure) +ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableStructurePtr table_structure) { auto create_table_query = std::make_shared(); @@ -175,9 +175,13 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(const std::functi } else { - auto table_structure = fetch_table_structure(); + if (!table_structure) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No table structure returned for table {}.{}", table_id.database_name, table_id.table_name); + } - if (!table_structure.columns) + if (!table_structure->columns) { throw Exception(ErrorCodes::LOGICAL_ERROR, "No columns returned for table {}.{}", table_id.database_name, table_id.table_name); @@ -185,17 +189,17 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(const std::functi StorageInMemoryMetadata storage_metadata; - ordinary_columns_and_types = *table_structure.columns; + ordinary_columns_and_types = *table_structure->columns; storage_metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); setInMemoryMetadata(storage_metadata); - if (!table_structure.primary_key_columns) + if (!table_structure->primary_key_columns) { throw Exception(ErrorCodes::LOGICAL_ERROR, "No primary key columns returned for table {}.{}", table_id.database_name, table_id.table_name); } - auto primary_key_columns = *table_structure.primary_key_columns; + auto primary_key_columns = *table_structure->primary_key_columns; order_by_expression->name = "tuple"; order_by_expression->arguments = std::make_shared(); @@ -238,7 +242,7 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(const std::functi } -void StorageMaterializePostgreSQL::createNestedIfNeeded(const std::function & fetch_table_structure) +void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { if (nested_loaded) { @@ -249,7 +253,7 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(const std::function & fetch_table_structure); + void createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure); /// Can be nullptr StoragePtr tryGetNested(); @@ -85,7 +85,7 @@ private: ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; - ASTPtr getCreateNestedTableQuery(const std::function & fetch_table_structure); + ASTPtr getCreateNestedTableQuery(PostgreSQLTableStructurePtr table_structure); std::string getNestedTableName() const; From 8062a81ddd111386795b585919b7337f58cd9664 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 14:13:57 +0000 Subject: [PATCH 061/931] Separate common for materialize* storages --- .../StorageMaterializePostgreSQL.cpp | 72 +++------------- .../ReadFinalForExternalReplicaStorage.cpp | 86 +++++++++++++++++++ .../ReadFinalForExternalReplicaStorage.h | 28 ++++++ src/Storages/StorageMaterializeMySQL.cpp | 66 +++----------- 4 files changed, 136 insertions(+), 116 deletions(-) create mode 100644 src/Storages/ReadFinalForExternalReplicaStorage.cpp create mode 100644 src/Storages/ReadFinalForExternalReplicaStorage.h diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 92ca0b6c2bf..2f950f9823e 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace DB @@ -368,7 +369,7 @@ NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const Pipe StorageMaterializePostgreSQL::read( const Names & column_names, - const StorageMetadataPtr & /* metadata_snapshot */, + const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, const Context & context, QueryProcessingStage::Enum processed_stage, @@ -382,66 +383,15 @@ Pipe StorageMaterializePostgreSQL::read( if (!nested_storage) getNested(); - auto storage_lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - - const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); - Block nested_header = nested_metadata->getSampleBlock(); - - ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); - ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); - - NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - - if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) - { - auto & tables_in_select_query = select_query->tables()->as(); - - if (!tables_in_select_query.children.empty()) - { - auto & tables_element = tables_in_select_query.children[0]->as(); - - if (tables_element.table_expression) - tables_element.table_expression->as().final = true; - } - } - - String filter_column_name; - Names require_columns_name = column_names; - ASTPtr expressions = std::make_shared(); - if (column_names_set.empty() || !column_names_set.count(sign_column.name)) - { - require_columns_name.emplace_back(sign_column.name); - - const auto & sign_column_name = std::make_shared(sign_column.name); - const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); - - expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); - filter_column_name = expressions->children.back()->getColumnName(); - - for (const auto & column_name : column_names) - expressions->children.emplace_back(std::make_shared(column_name)); - } - - Pipe pipe = nested_storage->read( - require_columns_name, - nested_metadata, query_info, context, - processed_stage, max_block_size, num_streams); - - pipe.addTableLock(storage_lock); - - if (!expressions->children.empty() && !pipe.empty()) - { - Block pipe_header = pipe.getHeader(); - auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); - ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); - - pipe.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, expression_actions, filter_column_name, false); - }); - } - - return pipe; + return readFinalFromNestedStorage( + nested_storage, + column_names, + metadata_snapshot, + query_info, + context, + processed_stage, + max_block_size, + num_streams); } LOG_WARNING(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Nested table {} is unavailable or is not loaded yet", getNestedTableName()); diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp new file mode 100644 index 00000000000..37b95eb5d6a --- /dev/null +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -0,0 +1,86 @@ +#include + +#if USE_MYSQL || USE_LIBPQXX + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +Pipe readFinalFromNestedStorage( + StoragePtr nested_storage, + const Names & column_names, + const StorageMetadataPtr & /*metadata_snapshot*/, + SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned int num_streams) +{ + NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); + auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); + + Block nested_header = nested_metadata->getSampleBlock(); + ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); + ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); + + if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) + { + auto & tables_in_select_query = select_query->tables()->as(); + + if (!tables_in_select_query.children.empty()) + { + auto & tables_element = tables_in_select_query.children[0]->as(); + + if (tables_element.table_expression) + tables_element.table_expression->as().final = true; + } + } + + String filter_column_name; + Names require_columns_name = column_names; + ASTPtr expressions = std::make_shared(); + if (column_names_set.empty() || !column_names_set.count(sign_column.name)) + { + require_columns_name.emplace_back(sign_column.name); + + const auto & sign_column_name = std::make_shared(sign_column.name); + const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); + + expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); + filter_column_name = expressions->children.back()->getColumnName(); + + for (const auto & column_name : column_names) + expressions->children.emplace_back(std::make_shared(column_name)); + } + + Pipe pipe = nested_storage->read(require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); + pipe.addTableLock(lock); + + if (!expressions->children.empty() && !pipe.empty()) + { + Block pipe_header = pipe.getHeader(); + auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); + ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); + + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, expression_actions, filter_column_name, false); + }); + } + + return pipe; +} +} + +#endif diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.h b/src/Storages/ReadFinalForExternalReplicaStorage.h new file mode 100644 index 00000000000..2062392b22f --- /dev/null +++ b/src/Storages/ReadFinalForExternalReplicaStorage.h @@ -0,0 +1,28 @@ +#pragma once + +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif + +#if USE_MYSQL || USE_LIBPQXX + +#include +#include + + +namespace DB +{ + +Pipe readFinalFromNestedStorage( + StoragePtr nested_storage, + const Names & column_names, + const StorageMetadataPtr & /*metadata_snapshot*/, + SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned int num_streams); + +} + +#endif diff --git a/src/Storages/StorageMaterializeMySQL.cpp b/src/Storages/StorageMaterializeMySQL.cpp index e59f1e22958..876c9c41e20 100644 --- a/src/Storages/StorageMaterializeMySQL.cpp +++ b/src/Storages/StorageMaterializeMySQL.cpp @@ -23,6 +23,7 @@ #include #include +#include namespace DB { @@ -37,7 +38,7 @@ StorageMaterializeMySQL::StorageMaterializeMySQL(const StoragePtr & nested_stora Pipe StorageMaterializeMySQL::read( const Names & column_names, - const StorageMetadataPtr & /*metadata_snapshot*/, + const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, const Context & context, QueryProcessingStage::Enum processed_stage, @@ -47,60 +48,15 @@ Pipe StorageMaterializeMySQL::read( /// If the background synchronization thread has exception. rethrowSyncExceptionIfNeed(database); - NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); - - Block nested_header = nested_metadata->getSampleBlock(); - ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); - ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); - - if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) - { - auto & tables_in_select_query = select_query->tables()->as(); - - if (!tables_in_select_query.children.empty()) - { - auto & tables_element = tables_in_select_query.children[0]->as(); - - if (tables_element.table_expression) - tables_element.table_expression->as().final = true; - } - } - - String filter_column_name; - Names require_columns_name = column_names; - ASTPtr expressions = std::make_shared(); - if (column_names_set.empty() || !column_names_set.count(sign_column.name)) - { - require_columns_name.emplace_back(sign_column.name); - - const auto & sign_column_name = std::make_shared(sign_column.name); - const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); - - expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); - filter_column_name = expressions->children.back()->getColumnName(); - - for (const auto & column_name : column_names) - expressions->children.emplace_back(std::make_shared(column_name)); - } - - Pipe pipe = nested_storage->read(require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); - pipe.addTableLock(lock); - - if (!expressions->children.empty() && !pipe.empty()) - { - Block pipe_header = pipe.getHeader(); - auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); - ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); - - pipe.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header, expression_actions, filter_column_name, false); - }); - } - - return pipe; + return readFinalFromNestedStorage( + nested_storage, + column_names, + metadata_snapshot, + query_info, + context, + processed_stage, + max_block_size, + num_streams); } NamesAndTypesList StorageMaterializeMySQL::getVirtuals() const From 878292d341694241d3524a60dc24154b392bc44e Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 14:26:36 +0000 Subject: [PATCH 062/931] Remove redundant rethrow --- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 62c334e6dda..e81ad1e0aff 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -200,8 +200,6 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na catch (Exception & e) { tryLogCurrentException(__PRETTY_FUNCTION__); - e.addMessage("while initial data synchronization for table {}", storage_data.first); - throw; } } From ed158e2b7790c2199957c3571a404bef36beac74 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 14:42:03 +0000 Subject: [PATCH 063/931] Proper setting --- src/Core/Settings.h | 1 + src/Databases/PostgreSQL/DatabasePostgreSQL.cpp | 2 +- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2af246b50a7..db87d7334b1 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -412,6 +412,7 @@ class IColumn; M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ M(Bool, allow_experimental_database_postgresql_replica, false, "Allow to create database with Engine=PostgreSQLReplica(...).", 0) \ + M(Bool, external_databases_use_nulls, true, "If set to false, external databases will use default values instead of NULLs. (Sopported for PostgreSQL/MaterializePostgreSQL database engine)", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 81394eb6fb3..320f39376e7 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -148,7 +148,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, const Conte if (!table_checked && !checkPostgresTable(table_name)) return StoragePtr{}; - auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; + auto use_nulls = context.getSettingsRef().external_databases_use_nulls; auto columns = fetchPostgreSQLTableStructure(connection->conn(), doubleQuoteString(table_name), use_nulls).columns; if (!columns) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index e81ad1e0aff..fa48641424b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -197,7 +197,7 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na if (consumer) consumer->updateNested(table_name, nested_storage); } - catch (Exception & e) + catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } @@ -396,7 +396,7 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( if (!is_postgresql_replica_database_engine) return nullptr; - auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; + auto use_nulls = context.getSettingsRef().external_databases_use_nulls; return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true)); } From 40e50e6b027e91e765aeac9e285f80f6b8f9da1a Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 15:12:46 +0000 Subject: [PATCH 064/931] Add unhexN method --- .../MaterializePostgreSQLConsumer.cpp | 42 +++++++++++-------- .../MaterializePostgreSQLConsumer.h | 2 + 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 50278fe70aa..3f562687148 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -145,13 +145,32 @@ void MaterializePostgreSQLConsumer::readString(const char * message, size_t & po } +template +T MaterializePostgreSQLConsumer::unhexN(const char * message, size_t pos, size_t n) +{ + T result = 0; + for (size_t i = 0; i < n; ++i) + { + if (i) result <<= 8; + result |= UInt32(unhex2(message + pos + 2 * i)); + } + return result; +} + + +Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) +{ + assert(size > pos + 16); + Int64 result = unhexN(message, pos, 8); + pos += 16; + return result; +} + + Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 8); - Int32 result = (UInt32(unhex2(message + pos)) << 24) - | (UInt32(unhex2(message + pos + 2)) << 16) - | (UInt32(unhex2(message + pos + 4)) << 8) - | (UInt32(unhex2(message + pos + 6))); + Int32 result = unhexN(message, pos, 4); pos += 8; return result; } @@ -160,8 +179,7 @@ Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & po Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size > pos + 4); - Int16 result = (UInt32(unhex2(message + pos)) << 8) - | (UInt32(unhex2(message + pos + 2))); + Int16 result = unhexN(message, pos, 2); pos += 4; return result; } @@ -176,18 +194,6 @@ Int8 MaterializePostgreSQLConsumer::readInt8(const char * message, size_t & pos, } -Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) -{ - assert(size > pos + 16); - Int64 result = (UInt64(unhex4(message + pos)) << 48) - | (UInt64(unhex4(message + pos + 4)) << 32) - | (UInt64(unhex4(message + pos + 8)) << 16) - | (UInt64(unhex4(message + pos + 12))); - pos += 16; - return result; -} - - void MaterializePostgreSQLConsumer::readTupleData( Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 7cbfb8dd963..1cc0a1fcb20 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -83,6 +83,8 @@ private: void readTupleData(Buffer & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false); + template + static T unhexN(const char * message, size_t pos, size_t n); static void readString(const char * message, size_t & pos, size_t size, String & result); static Int64 readInt64(const char * message, size_t & pos, size_t size); static Int32 readInt32(const char * message, size_t & pos, size_t size); From 2100ca6c55439cfc12074ccb4f19fc23166b39e4 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 15:27:13 +0000 Subject: [PATCH 065/931] Try smaller timeouts in tests --- .../test_postgresql_replica_database_engine/test.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 8611b228392..b03fb307247 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -43,6 +43,7 @@ def create_postgres_table(cursor, table_name, replica_identity_full=False, templ cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) +@pytest.mark.timeout(30) def assert_nested_table_is_created(table_name): database_tables = instance.query('SHOW TABLES FROM test_database') while table_name not in database_tables: @@ -51,6 +52,7 @@ def assert_nested_table_is_created(table_name): assert(table_name in database_tables) +@pytest.mark.timeout(30) def check_tables_are_synchronized(table_name, order_by='key'): assert_nested_table_is_created(table_name) @@ -88,7 +90,7 @@ def postgresql_setup_teardown(): instance.query('DROP TABLE IF EXISTS test.postgresql_replica') -@pytest.mark.timeout(320) +@pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -115,7 +117,7 @@ def test_load_and_sync_all_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(320) +@pytest.mark.timeout(120) def test_replicating_dml(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -158,7 +160,7 @@ def test_replicating_dml(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(320) +@pytest.mark.timeout(120) def test_different_data_types(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -242,7 +244,7 @@ def test_different_data_types(started_cluster): assert(result == expected) -@pytest.mark.timeout(320) +@pytest.mark.timeout(120) def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -295,7 +297,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(320) +@pytest.mark.timeout(120) def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) From 99295c050afb40eacb65f95fa31d0982a3f6668a Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 15:41:15 +0000 Subject: [PATCH 066/931] Better logging --- src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 2f950f9823e..b01defde74d 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -354,7 +354,7 @@ void StorageMaterializePostgreSQL::dropNested() interpreter.execute(); nested_storage = nullptr; - LOG_WARNING(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Dropped (or temporarily) nested table {}", getNestedTableName()); + LOG_TRACE(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Dropped (possibly temporarily) nested table {}", getNestedTableName()); } From fc1c16c4cdab8109901168967a373515efcc8b5d Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 20 Mar 2021 17:47:32 +0000 Subject: [PATCH 067/931] Increase timeout for 1 test --- .../integration/test_postgresql_replica_database_engine/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index b03fb307247..e1c7459de91 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -297,7 +297,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(120) +@pytest.mark.timeout(240) def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) From 8586565ce9931d94398c6ffbf0f7105cebf1a615 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 12:43:02 +0000 Subject: [PATCH 068/931] Move connection-related filed to Core/PostgreSQL --- src/CMakeLists.txt | 1 + .../PostgreSQL/PostgreSQLConnection.cpp | 4 +- .../PostgreSQL/PostgreSQLConnection.h | 0 .../PostgreSQL/PostgreSQLConnectionPool.cpp | 0 .../PostgreSQL/PostgreSQLConnectionPool.h | 0 .../PostgreSQL/PostgreSQLPoolWithFailover.cpp | 0 .../PostgreSQL/PostgreSQLPoolWithFailover.h | 0 .../PostgreSQL/insertPostgreSQLValue.cpp | 0 .../PostgreSQL/insertPostgreSQLValue.h | 0 src/Core/ya.make | 4 ++ src/DataStreams/PostgreSQLBlockInputStream.h | 4 +- src/Databases/DatabaseFactory.cpp | 2 +- .../DatabaseMaterializePostgreSQL.cpp | 2 +- .../PostgreSQL/DatabasePostgreSQL.cpp | 1 - src/Databases/PostgreSQL/DatabasePostgreSQL.h | 2 +- .../fetchPostgreSQLTableStructure.h | 2 +- src/Dictionaries/PostgreSQLDictionarySource.h | 2 +- .../MaterializePostgreSQLConsumer.h | 4 +- .../PostgreSQL/PostgreSQLReplicaConnection.h | 39 ------------------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 +- .../StorageMaterializePostgreSQL.cpp | 2 +- src/Storages/StoragePostgreSQL.cpp | 2 +- src/Storages/StoragePostgreSQL.h | 2 +- .../TableFunctionPostgreSQL.cpp | 1 - src/TableFunctions/TableFunctionPostgreSQL.h | 2 +- 25 files changed, 23 insertions(+), 57 deletions(-) rename src/{Storages => Core}/PostgreSQL/PostgreSQLConnection.cpp (96%) rename src/{Storages => Core}/PostgreSQL/PostgreSQLConnection.h (100%) rename src/{Storages => Core}/PostgreSQL/PostgreSQLConnectionPool.cpp (100%) rename src/{Storages => Core}/PostgreSQL/PostgreSQLConnectionPool.h (100%) rename src/{Storages => Core}/PostgreSQL/PostgreSQLPoolWithFailover.cpp (100%) rename src/{Storages => Core}/PostgreSQL/PostgreSQLPoolWithFailover.h (100%) rename src/{Storages => Core}/PostgreSQL/insertPostgreSQLValue.cpp (100%) rename src/{Storages => Core}/PostgreSQL/insertPostgreSQLValue.h (100%) delete mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 295cf4904c3..b76132e9cb4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -86,6 +86,7 @@ if (USE_AMQPCPP) endif() if (USE_LIBPQXX) + add_headers_and_sources(dbms Core/PostgreSQL) add_headers_and_sources(dbms Databases/PostgreSQL) add_headers_and_sources(dbms Storages/PostgreSQL) endif() diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp b/src/Core/PostgreSQL/PostgreSQLConnection.cpp similarity index 96% rename from src/Storages/PostgreSQL/PostgreSQLConnection.cpp rename to src/Core/PostgreSQL/PostgreSQLConnection.cpp index 5353ce2bb4f..0a0dbe19e45 100644 --- a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Core/PostgreSQL/PostgreSQLConnection.cpp @@ -1,4 +1,4 @@ -#include +#include "PostgreSQLConnection.h" #if USE_LIBPQXX #include @@ -6,6 +6,7 @@ #include #include + namespace DB { namespace ErrorCodes @@ -14,7 +15,6 @@ namespace ErrorCodes } } - namespace postgres { diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.h b/src/Core/PostgreSQL/PostgreSQLConnection.h similarity index 100% rename from src/Storages/PostgreSQL/PostgreSQLConnection.h rename to src/Core/PostgreSQL/PostgreSQLConnection.h diff --git a/src/Storages/PostgreSQL/PostgreSQLConnectionPool.cpp b/src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp similarity index 100% rename from src/Storages/PostgreSQL/PostgreSQLConnectionPool.cpp rename to src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp diff --git a/src/Storages/PostgreSQL/PostgreSQLConnectionPool.h b/src/Core/PostgreSQL/PostgreSQLConnectionPool.h similarity index 100% rename from src/Storages/PostgreSQL/PostgreSQLConnectionPool.h rename to src/Core/PostgreSQL/PostgreSQLConnectionPool.h diff --git a/src/Storages/PostgreSQL/PostgreSQLPoolWithFailover.cpp b/src/Core/PostgreSQL/PostgreSQLPoolWithFailover.cpp similarity index 100% rename from src/Storages/PostgreSQL/PostgreSQLPoolWithFailover.cpp rename to src/Core/PostgreSQL/PostgreSQLPoolWithFailover.cpp diff --git a/src/Storages/PostgreSQL/PostgreSQLPoolWithFailover.h b/src/Core/PostgreSQL/PostgreSQLPoolWithFailover.h similarity index 100% rename from src/Storages/PostgreSQL/PostgreSQLPoolWithFailover.h rename to src/Core/PostgreSQL/PostgreSQLPoolWithFailover.h diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp similarity index 100% rename from src/Storages/PostgreSQL/insertPostgreSQLValue.cpp rename to src/Core/PostgreSQL/insertPostgreSQLValue.cpp diff --git a/src/Storages/PostgreSQL/insertPostgreSQLValue.h b/src/Core/PostgreSQL/insertPostgreSQLValue.h similarity index 100% rename from src/Storages/PostgreSQL/insertPostgreSQLValue.h rename to src/Core/PostgreSQL/insertPostgreSQLValue.h diff --git a/src/Core/ya.make b/src/Core/ya.make index 004653d060e..890ce20e7b3 100644 --- a/src/Core/ya.make +++ b/src/Core/ya.make @@ -31,6 +31,10 @@ SRCS( MySQL/PacketsProtocolText.cpp MySQL/PacketsReplication.cpp NamesAndTypes.cpp + PostgreSQL/PostgreSQLConnection.cpp + PostgreSQL/PostgreSQLConnectionPool.cpp + PostgreSQL/PostgreSQLPoolWithFailover.cpp + PostgreSQL/insertPostgreSQLValue.cpp PostgreSQLProtocol.cpp QueryProcessingStage.cpp Settings.cpp diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index 248ba5c3318..bd22c2ea028 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -10,8 +10,8 @@ #include #include #include -#include -#include +#include +#include namespace DB diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 2bec6663a54..60af31a46cc 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -39,7 +39,7 @@ #include // Y_IGNORE #include #include -#include +#include #endif namespace DB diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index b39382f4cef..49aeff0c5db 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -2,7 +2,7 @@ #if USE_LIBPQXX -#include +#include #include #include diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 887c200d705..eaa1a931fb3 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -16,7 +16,6 @@ #include #include #include -#include namespace DB diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.h b/src/Databases/PostgreSQL/DatabasePostgreSQL.h index 966d41a35ce..7224490fa04 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include namespace DB diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index d69ad55db81..bdfa9e0a01b 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -5,7 +5,7 @@ #endif #if USE_LIBPQXX -#include +#include #include diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index f1520a37a79..59c38eb04ed 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 970402b7ba5..e7bb6d22f7e 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -5,9 +5,9 @@ #endif #if USE_LIBPQXX -#include "PostgreSQLConnection.h" #include "MaterializePostgreSQLMetadata.h" -#include "insertPostgreSQLValue.h" +#include +#include #include #include diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h deleted file mode 100644 index 1ed442873a2..00000000000 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include -#include -#include "PostgreSQLConnectionPool.h" -#include - - -namespace DB -{ - -class PostgreSQLReplicaConnection -{ - -public: - static constexpr inline auto POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM = 5; - - PostgreSQLReplicaConnection( - const Poco::Util::AbstractConfiguration & config, - const String & config_prefix, - const size_t num_retries_ = POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM); - - PostgreSQLReplicaConnection(const PostgreSQLReplicaConnection & other); - - PostgreSQLConnectionHolderPtr get(); - - -private: - /// Highest priority is 0, the bigger the number in map, the less the priority - using ReplicasByPriority = std::map; - - ReplicasByPriority replicas; - size_t num_retries; - std::mutex mutex; -}; - -using PostgreSQLReplicaConnectionPtr = std::shared_ptr; - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index aee1d314035..0aa165bd183 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -5,9 +5,11 @@ #endif #if USE_LIBPQXX -#include "PostgreSQLConnection.h" + #include "MaterializePostgreSQLConsumer.h" #include "MaterializePostgreSQLMetadata.h" + +#include #include diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 6d35ced5e93..e90ada126c0 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -22,7 +22,7 @@ #include #include #include -#include "PostgreSQLConnectionPool.h" +#include namespace DB diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 40347d888e5..c764063552b 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -1,7 +1,7 @@ #include "StoragePostgreSQL.h" #if USE_LIBPQXX -#include +#include #include #include diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index ec06a698c1f..8a2bbe7d6f0 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index 9c202618b63..a88e0557509 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -12,7 +12,6 @@ #include #include #include "registerTableFunctions.h" -#include #include #include diff --git a/src/TableFunctions/TableFunctionPostgreSQL.h b/src/TableFunctions/TableFunctionPostgreSQL.h index 96d46b4fe5a..18f5d5c0d5d 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.h +++ b/src/TableFunctions/TableFunctionPostgreSQL.h @@ -5,7 +5,7 @@ #if USE_LIBPQXX #include -#include +#include namespace DB From b544c9fe9ae50393af25e9b7fee1a548c7f98a3e Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 13:08:51 +0000 Subject: [PATCH 069/931] Add default template --- src/DataStreams/PostgreSQLBlockInputStream.h | 2 +- src/Dictionaries/PostgreSQLDictionarySource.cpp | 4 ++-- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 3 --- src/Storages/StoragePostgreSQL.cpp | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index bd22c2ea028..52c7aa53679 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -17,7 +17,7 @@ namespace DB { -template +template class PostgreSQLBlockInputStream : public IBlockInputStream { diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 2b300c28d9f..9bef164389b 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -93,7 +93,7 @@ BlockInputStreamPtr PostgreSQLDictionarySource::loadKeys(const Columns & key_col BlockInputStreamPtr PostgreSQLDictionarySource::loadBase(const String & query) { - return std::make_shared>(connection->get(), query, sample_block, max_block_size); + return std::make_shared>(connection->get(), query, sample_block, max_block_size); } @@ -115,7 +115,7 @@ std::string PostgreSQLDictionarySource::doInvalidateQuery(const std::string & re Block invalidate_sample_block; ColumnPtr column(ColumnString::create()); invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared(), "Sample Block")); - PostgreSQLBlockInputStream block_input_stream(connection->get(), request, invalidate_sample_block, 1); + PostgreSQLBlockInputStream<> block_input_stream(connection->get(), request, invalidate_sample_block, 1); return readInvalidateQuery(block_input_stream); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index ac0e026a167..a1851cc3248 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -161,9 +161,6 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na auto tx = std::make_shared(connection->getRef()); const auto & table_name = storage_data.first; - /// Specific isolation level is required to read from snapshot. - ///tx->set_variable("transaction_isolation", "'repeatable read'"); - std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); tx->exec(query_str); diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index c764063552b..7bc02421f6b 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -89,7 +89,7 @@ Pipe StoragePostgreSQL::read( } return Pipe(std::make_shared( - std::make_shared>(pool->get(), query, sample_block, max_block_size_))); + std::make_shared>(pool->get(), query, sample_block, max_block_size_))); } From 4482a35a3a2dbb903ffa33887a99ea0e7cd24c5d Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 14:02:15 +0000 Subject: [PATCH 070/931] Check table in a suggested way --- src/DataStreams/PostgreSQLBlockInputStream.h | 4 ++-- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 14 +++----------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index 52c7aa53679..d62c11ccef7 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -67,14 +67,14 @@ class PostgreSQLTransactionBlockInputStream : public PostgreSQLBlockInputStream< { public: - using Base = PostgreSQLBlockInputStream; + using Base = PostgreSQLBlockInputStream; PostgreSQLTransactionBlockInputStream( std::shared_ptr tx_, const std::string & query_str_, const Block & sample_block_, const UInt64 max_block_size_) - : PostgreSQLBlockInputStream(tx_, query_str_, sample_block_, max_block_size_, false) {} + : PostgreSQLBlockInputStream(tx_, query_str_, sample_block_, max_block_size_, false) {} void readPrefix() override { diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 4c7047ebe84..4f669a60529 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -161,21 +162,12 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( { PostgreSQLTableStructure table; - if (postgres_table_name.find('\'') != std::string::npos - || postgres_table_name.find('\\') != std::string::npos) - { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "PostgreSQL table name cannot contain single quote or backslash characters, passed {}", - postgres_table_name); - } - std::string query = fmt::format( "SELECT attname AS name, format_type(atttypid, atttypmod) AS type, " "attnotnull AS not_null, attndims AS dims " "FROM pg_attribute " - "WHERE attrelid = '{}'::regclass " - "AND NOT attisdropped AND attnum > 0", postgres_table_name); + "WHERE attrelid = {}::regclass " + "AND NOT attisdropped AND attnum > 0", quoteString(postgres_table_name)); table.columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, false); From 093e53e65a9e0760d3ab9643e2e1bf8af3368fec Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 20:39:56 +0000 Subject: [PATCH 071/931] Fix and test server restart --- .../PostgreSQLReplicationHandler.cpp | 2 ++ .../StorageMaterializePostgreSQL.cpp | 7 ++++ .../PostgreSQL/StorageMaterializePostgreSQL.h | 2 ++ .../test.py | 32 ++++++++++++++++++- .../test_storage_postgresql_replica/test.py | 32 ++++++++++++++++++- 5 files changed, 73 insertions(+), 2 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index a1851cc3248..1cca362ca35 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -119,11 +119,13 @@ void PostgreSQLReplicationHandler::startSynchronization() } else { + LOG_TRACE(log, "Restoring tables..."); for (const auto & [table_name, storage] : storages) { try { nested_storages[table_name] = storage->getNested(); + storage->setStorageMetadata(); storage->setNestedLoaded(); } catch (...) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index e90ada126c0..8b48622da5f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -152,6 +152,13 @@ ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & da } +void StorageMaterializePostgreSQL::setStorageMetadata() +{ + auto storage_metadata = getNested()->getInMemoryMetadataPtr(); + setInMemoryMetadata(*storage_metadata); +} + + /// For single storage MaterializePostgreSQL get columns and primary key columns from storage definition. /// For database engine MaterializePostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same /// transaction with snapshot, which is used for initial tables dump. diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 9d933e84050..feba216b4c4 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -69,6 +69,8 @@ public: void dropNested(); + void setStorageMetadata(); + protected: StorageMaterializePostgreSQL( const StorageID & table_id_, diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index e1c7459de91..535cb0f6a7d 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -10,7 +10,10 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], user_configs = ['configs/users.xml'], with_postgres=True) +instance = cluster.add_instance('instance', + main_configs=['configs/log_conf.xml'], + user_configs = ['configs/users.xml'], + with_postgres=True, stay_alive=True) postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -55,6 +58,7 @@ def assert_nested_table_is_created(table_name): @pytest.mark.timeout(30) def check_tables_are_synchronized(table_name, order_by='key'): assert_nested_table_is_created(table_name) + print("nested ok") expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) @@ -364,6 +368,32 @@ def test_changing_replica_identity_value(started_cluster): check_tables_are_synchronized('postgresql_replica'); +@pytest.mark.timeout(320) +def test_clickhouse_restart(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) + + instance.query("CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + check_tables_are_synchronized(table_name); + + for i in range(NUM_TABLES): + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(50000)".format(i, i)) + + instance.restart_clickhouse() + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 4a7a6592873..bca4f159cf6 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -9,7 +9,7 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True) +instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True, stay_alive=True) postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -400,6 +400,36 @@ def test_connection_loss(started_cluster): assert(int(result) == 100050) +@pytest.mark.timeout(320) +def test_clickhouse_restart(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = MaterializePostgreSQL( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; ''') + + i = 50 + while i < 100000: + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format(i)) + i += 10000 + + instance.restart_clickhouse() + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) < 100050: + time.sleep(1) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + cursor.execute('DROP TABLE postgresql_replica;') + print(result) + assert(int(result) == 100050) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From bd207daa746a5acbd3ebf31a83c792f3a3c35ffe Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 22:38:17 +0000 Subject: [PATCH 072/931] Use only atomic database --- src/Databases/DatabaseFactory.cpp | 18 +---- .../DatabaseMaterializePostgreSQL.cpp | 74 +++++-------------- .../DatabaseMaterializePostgreSQL.h | 12 +-- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- .../StorageMaterializePostgreSQL.cpp | 3 +- .../test.py | 2 - 6 files changed, 29 insertions(+), 82 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 60af31a46cc..6b9f90c5500 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -294,20 +294,10 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (engine_define->settings) postgresql_replica_settings->loadFromQuery(*engine_define); - if (create.uuid == UUIDHelpers::Nil) - { - return std::make_shared>( - context, metadata_path, uuid, engine_define, - database_name, postgres_database_name, connection_string, - std::move(postgresql_replica_settings)); - } - else - { - return std::make_shared>( - context, metadata_path, uuid, engine_define, - database_name, postgres_database_name, connection_string, - std::move(postgresql_replica_settings)); - } + return std::make_shared( + context, metadata_path, uuid, engine_define, + database_name, postgres_database_name, connection_string, + std::move(postgresql_replica_settings)); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 49aeff0c5db..dac33e00b3c 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -34,33 +34,7 @@ namespace ErrorCodes static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; -template<> -DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( - const Context & context, - const String & metadata_path_, - UUID /* uuid */, - const ASTStorage * database_engine_define_, - const String & database_name_, - const String & postgres_database_name, - const String & connection_string, - std::unique_ptr settings_) - : DatabaseOrdinary( - database_name_, metadata_path_, "data/" + escapeForFileName(database_name_) + "/", - "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context) - , log(&Poco::Logger::get("MaterializePostgreSQLDatabaseEngine")) - , global_context(context.getGlobalContext()) - , metadata_path(metadata_path_) - , database_engine_define(database_engine_define_->clone()) - , database_name(database_name_) - , remote_database_name(postgres_database_name) - , connection(std::make_shared(connection_string, "")) - , settings(std::move(settings_)) -{ -} - - -template<> -DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( +DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const Context & context, const String & metadata_path_, UUID uuid, @@ -70,8 +44,6 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const String & connection_string, std::unique_ptr settings_) : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context) - , global_context(context.getGlobalContext()) - , metadata_path(metadata_path_) , database_engine_define(database_engine_define_->clone()) , remote_database_name(postgres_database_name) , connection(std::make_shared(connection_string, "")) @@ -80,8 +52,7 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( } -template -void DatabaseMaterializePostgreSQL::startSynchronization() +void DatabaseMaterializePostgreSQL::startSynchronization() { replication_handler = std::make_unique( remote_database_name, @@ -104,27 +75,24 @@ void DatabaseMaterializePostgreSQL::startSynchronization() } replication_handler->addStorage(table_name, storage->template as()); - tables[table_name] = storage; + materialized_tables[table_name] = storage; } - LOG_TRACE(log, "Loaded {} tables. Starting synchronization", tables.size()); + LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); replication_handler->startup(); } -template -void DatabaseMaterializePostgreSQL::shutdown() +void DatabaseMaterializePostgreSQL::shutdown() { if (replication_handler) replication_handler->shutdown(); } -template -void DatabaseMaterializePostgreSQL::loadStoredObjects( - Context & context, bool has_force_restore_data_flag, bool force_attach) +void DatabaseMaterializePostgreSQL::loadStoredObjects(Context & context, bool has_force_restore_data_flag, bool force_attach) { - Base::loadStoredObjects(context, has_force_restore_data_flag, force_attach); + DatabaseAtomic::loadStoredObjects(context, has_force_restore_data_flag, force_attach); try { @@ -132,7 +100,7 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects( } catch (...) { - tryLogCurrentException(Base::log, "Cannot load nested database objects for PostgreSQL database engine."); + tryLogCurrentException(log, "Cannot load nested database objects for PostgreSQL database engine."); if (!force_attach) throw; @@ -141,8 +109,7 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects( } -template -StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const Context & context) const +StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const Context & context) const { /// When a nested ReplacingMergeTree table is managed from PostgreSQLReplicationHandler, its context is modified /// to show the type of managed table. @@ -151,29 +118,28 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { - return Base::tryGetTable(name, context); + return DatabaseAtomic::tryGetTable(name, context); } } - auto table = tables.find(name); + auto table = materialized_tables.find(name); /// Here it is possible that nested table is temporarily out of reach, but return storage anyway, /// it will not allow to read if nested is unavailable at the moment - if (table != tables.end()) + if (table != materialized_tables.end()) return table->second; return StoragePtr{}; } -template -void DatabaseMaterializePostgreSQL::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializePostgreSQL::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) { if (context.hasQueryContext()) { auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { - Base::createTable(context, name, table, query); + DatabaseAtomic::createTable(context, name, table, query); return; } } @@ -183,8 +149,7 @@ void DatabaseMaterializePostgreSQL::createTable(const Context & context, c } -template -void DatabaseMaterializePostgreSQL::drop(const Context & context) +void DatabaseMaterializePostgreSQL::drop(const Context & context) { if (replication_handler) { @@ -193,21 +158,20 @@ void DatabaseMaterializePostgreSQL::drop(const Context & context) } /// Remove metadata - Poco::File metadata(Base::getMetadataPath() + METADATA_SUFFIX); + Poco::File metadata(getMetadataPath() + METADATA_SUFFIX); if (metadata.exists()) metadata.remove(false); - Base::drop(context); + DatabaseAtomic::drop(context); } -template -DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( +DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( const Context & /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) { Tables nested_tables; - for (const auto & [table_name, storage] : tables) + for (const auto & [table_name, storage] : materialized_tables) { auto nested_storage = storage->template as()->tryGetNested(); diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 67c3d5af012..cf9024494f9 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB @@ -24,8 +25,7 @@ class PostgreSQLConnection; using PostgreSQLConnectionPtr = std::shared_ptr; -template -class DatabaseMaterializePostgreSQL : public Base +class DatabaseMaterializePostgreSQL : public DatabaseAtomic { public: @@ -59,17 +59,13 @@ public: private: void startSynchronization(); - Poco::Logger * log; - const Context & global_context; - String metadata_path; ASTPtr database_engine_define; - - String database_name, remote_database_name; + String remote_database_name; postgres::ConnectionPtr connection; std::unique_ptr settings; std::shared_ptr replication_handler; - std::map tables; + std::map materialized_tables; }; } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index e56cdf13b12..b20a1992a46 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -150,7 +150,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", serializeAST(*create.storage)); } - if (create.storage->engine->name == "Atomic" || create.storage->engine->name == "Replicated") + if (create.storage->engine->name == "Atomic" || create.storage->engine->name == "Replicated" || create.storage->engine->name == "MaterializePostgreSQL") { if (create.attach && create.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. " diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 8b48622da5f..7002d7d8c99 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -75,8 +75,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( : IStorage(table_id_) , global_context(context_) , nested_storage(nested_storage_) - , is_postgresql_replica_database( - DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") + , is_postgresql_replica_database(true) { } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 535cb0f6a7d..7d23458dda1 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -58,7 +58,6 @@ def assert_nested_table_is_created(table_name): @pytest.mark.timeout(30) def check_tables_are_synchronized(table_name, order_by='key'): assert_nested_table_is_created(table_name) - print("nested ok") expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) @@ -81,7 +80,6 @@ def started_cluster(): instance.query(''' CREATE DATABASE postgres_database ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') - yield cluster finally: From 3e41c82df157e65ebe9edbc1b761b79fd6127f8e Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 8 Apr 2021 23:38:27 +0000 Subject: [PATCH 073/931] Add mutex to tryGetTable only for materialized table (not nested) --- src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp | 2 ++ src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index dac33e00b3c..6c746855b89 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -122,6 +122,8 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const } } + /// Note: In select query we call MaterializePostgreSQL table and it calls tryGetTable from its nested. + std::lock_guard lock(tables_mutex); auto table = materialized_tables.find(name); /// Here it is possible that nested table is temporarily out of reach, but return storage anyway, /// it will not allow to read if nested is unavailable at the moment diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index cf9024494f9..f3d4535866a 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -66,6 +66,7 @@ private: std::shared_ptr replication_handler; std::map materialized_tables; + mutable std::mutex tables_mutex; }; } From f0be5c6938d9194807899ba642043ed62561b0a5 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Apr 2021 08:07:56 +0000 Subject: [PATCH 074/931] Fix style, build, ya check --- src/Core/PostgreSQL/PostgreSQLConnection.cpp | 1 - src/Core/ya.make.in | 2 +- .../PostgreSQL/DatabaseMaterializePostgreSQL.cpp | 6 +++--- .../PostgreSQL/DatabaseMaterializePostgreSQL.h | 10 +++++----- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/Core/PostgreSQL/PostgreSQLConnection.cpp b/src/Core/PostgreSQL/PostgreSQLConnection.cpp index 0a0dbe19e45..12f0232f326 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Core/PostgreSQL/PostgreSQLConnection.cpp @@ -2,7 +2,6 @@ #if USE_LIBPQXX #include -#include #include #include diff --git a/src/Core/ya.make.in b/src/Core/ya.make.in index e1c679ac809..53cdafbf32a 100644 --- a/src/Core/ya.make.in +++ b/src/Core/ya.make.in @@ -10,7 +10,7 @@ PEERDIR( SRCS( - + ) END() diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 6c746855b89..d6a02ca2cc9 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -35,15 +35,15 @@ namespace ErrorCodes static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( - const Context & context, + const Context & context_, const String & metadata_path_, - UUID uuid, + UUID uuid_, const ASTStorage * database_engine_define_, const String & database_name_, const String & postgres_database_name, const String & connection_string, std::unique_ptr settings_) - : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context) + : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context_) , database_engine_define(database_engine_define_->clone()) , remote_database_name(postgres_database_name) , connection(std::make_shared(connection_string, "")) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index f3d4535866a..b80ff4c5974 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -30,12 +30,12 @@ class DatabaseMaterializePostgreSQL : public DatabaseAtomic public: DatabaseMaterializePostgreSQL( - const Context & context, + const Context & context_, const String & metadata_path_, - UUID uuid, - const ASTStorage * database_engine_define, - const String & dbname_, - const String & postgres_dbname, + UUID uuid_, + const ASTStorage * database_engine_define_, + const String & database_name_, + const String & postgres_database_name, const String & connection_string, std::unique_ptr settings_); From 12f98e8b1185847c5124127f20fd97d094b67324 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Apr 2021 10:22:06 +0000 Subject: [PATCH 075/931] Much better --- src/Core/PostgreSQL/PostgreSQLConnection.cpp | 50 +++++++++--- src/Core/PostgreSQL/PostgreSQLConnection.h | 43 +++++++--- .../PostgreSQL/PostgreSQLConnectionPool.cpp | 25 +----- .../PostgreSQL/PostgreSQLConnectionPool.h | 5 +- src/Databases/DatabaseFactory.cpp | 4 +- .../DatabaseMaterializePostgreSQL.cpp | 6 +- .../DatabaseMaterializePostgreSQL.h | 2 +- .../PostgreSQLReplicationHandler.cpp | 80 ++++++++----------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 57 ++++++++++--- .../StorageMaterializePostgreSQL.cpp | 8 +- .../PostgreSQL/StorageMaterializePostgreSQL.h | 2 +- 11 files changed, 166 insertions(+), 116 deletions(-) diff --git a/src/Core/PostgreSQL/PostgreSQLConnection.cpp b/src/Core/PostgreSQL/PostgreSQLConnection.cpp index 12f0232f326..b6128e909ef 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Core/PostgreSQL/PostgreSQLConnection.cpp @@ -1,9 +1,8 @@ #include "PostgreSQLConnection.h" #if USE_LIBPQXX -#include -#include #include +#include namespace DB @@ -17,11 +16,41 @@ namespace ErrorCodes namespace postgres { -Connection::Connection( - const String & connection_str_, - const String & address_) - : connection_str(connection_str_) - , address(address_) +ConnectionInfo formatConnectionString( + std::string dbname, std::string host, UInt16 port, std::string user, std::string password) +{ + DB::WriteBufferFromOwnString out; + out << "dbname=" << DB::quote << dbname + << " host=" << DB::quote << host + << " port=" << port + << " user=" << DB::quote << user + << " password=" << DB::quote << password; + return std::make_pair(out.str(), host + ':' + DB::toString(port)); +} + + +ConnectionPtr createReplicationConnection(const ConnectionInfo & connection_info) +{ + auto new_connection_info = std::make_pair( + fmt::format("{} replication=database", connection_info.first), + connection_info.second); + + auto connection = std::make_shared(new_connection_info); + connection->get()->set_variable("default_transaction_isolation", "'repeatable read'"); + + return connection; +} + + +template +std::shared_ptr createTransaction(pqxx::connection & connection) +{ + return std::make_shared(connection); +} + + +Connection::Connection(const ConnectionInfo & connection_info_) + : connection_info(connection_info_) { } @@ -54,8 +83,8 @@ void Connection::connectIfNeeded() { if (!connection || !connection->is_open()) { - LOG_DEBUG(&Poco::Logger::get("PostgreSQLConnection"), "New connection to {}", getAddress()); - connection = std::make_shared(connection_str); + connection = std::make_shared(connection_info.first); + LOG_DEBUG(&Poco::Logger::get("PostgreSQLConnection"), "New connection to {}", connection_info.second); } } @@ -70,8 +99,7 @@ bool Connection::tryConnectIfNeeded() { LOG_ERROR( &Poco::Logger::get("PostgreSQLConnection"), - "Unable to setup connection to {}, reason: {}", - getAddress(), pqxx_error.what()); + "Unable to setup connection to {}, reason: {}", connection_info.second, pqxx_error.what()); return false; } catch (...) diff --git a/src/Core/PostgreSQL/PostgreSQLConnection.h b/src/Core/PostgreSQL/PostgreSQLConnection.h index 94bb0635914..dfed426b462 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnection.h +++ b/src/Core/PostgreSQL/PostgreSQLConnection.h @@ -8,6 +8,7 @@ #include // Y_IGNORE #include #include +#include namespace pqxx @@ -20,13 +21,40 @@ namespace pqxx namespace postgres { +class Connection; +using ConnectionPtr = std::shared_ptr; + + +/// Connection string and address without login/password (for error logs) +using ConnectionInfo = std::pair; + +ConnectionInfo formatConnectionString( + std::string dbname, std::string host, UInt16 port, std::string user, std::string password); + +ConnectionPtr createReplicationConnection(const ConnectionInfo & connection_info); + + +template +class Transaction +{ +public: + Transaction(pqxx::connection & connection) : transaction(connection) {} + + ~Transaction() { transaction.commit(); } + + T & getRef() { return transaction; } + + void exec(const String & query) { transaction.exec(query); } + +private: + T transaction; +}; + + class Connection { - public: - Connection( - const String & connection_str_, - const String & address_); + Connection(const ConnectionInfo & connection_info_); Connection(const Connection & other) = delete; @@ -38,20 +66,17 @@ public: bool isConnected() { return tryConnectIfNeeded(); } - const String & getConnectionString() { return connection_str; } + const ConnectionInfo & getConnectionInfo() { return connection_info; } private: void connectIfNeeded(); bool tryConnectIfNeeded(); - const std::string & getAddress() { return address; } - pqxx::ConnectionPtr connection; - std::string connection_str, address; + ConnectionInfo connection_info; }; -using ConnectionPtr = std::shared_ptr; class ConnectionHolder { diff --git a/src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp b/src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp index 42c716dcf14..f4a1c7f08f2 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp +++ b/src/Core/PostgreSQL/PostgreSQLConnectionPool.cpp @@ -3,8 +3,6 @@ #endif #if USE_LIBPQXX -#include -#include #include "PostgreSQLConnectionPool.h" #include "PostgreSQLConnection.h" #include @@ -31,16 +29,14 @@ ConnectionPool::ConnectionPool( "New connection pool. Size: {}, blocks on empty pool: {}", pool_size, block_on_empty_pool); - address = host + ':' + std::to_string(port); - connection_str = formatConnectionString(std::move(dbname), std::move(host), port, std::move(user), std::move(password)); + connection_info = formatConnectionString(std::move(dbname), std::move(host), port, std::move(user), std::move(password)); initialize(); } ConnectionPool::ConnectionPool(const ConnectionPool & other) : pool(std::make_shared(other.pool_size)) - , connection_str(other.connection_str) - , address(other.address) + , connection_info(other.connection_info) , pool_size(other.pool_size) , pool_wait_timeout(other.pool_wait_timeout) , block_on_empty_pool(other.block_on_empty_pool) @@ -53,20 +49,7 @@ void ConnectionPool::initialize() { /// No connection is made, just fill pool with non-connected connection objects. for (size_t i = 0; i < pool_size; ++i) - pool->push(std::make_shared(connection_str, address)); -} - - -std::string ConnectionPool::formatConnectionString( - std::string dbname, std::string host, UInt16 port, std::string user, std::string password) -{ - DB::WriteBufferFromOwnString out; - out << "dbname=" << DB::quote << dbname - << " host=" << DB::quote << host - << " port=" << port - << " user=" << DB::quote << user - << " password=" << DB::quote << password; - return out.str(); + pool->push(std::make_shared(connection_info)); } @@ -87,7 +70,7 @@ ConnectionHolderPtr ConnectionPool::get() return std::make_shared(connection, *pool); } - connection = std::make_shared(connection_str, address); + connection = std::make_shared(connection_info); return std::make_shared(connection, *pool); } diff --git a/src/Core/PostgreSQL/PostgreSQLConnectionPool.h b/src/Core/PostgreSQL/PostgreSQLConnectionPool.h index b9b2a50aa48..01ae21703d9 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnectionPool.h +++ b/src/Core/PostgreSQL/PostgreSQLConnectionPool.h @@ -41,9 +41,6 @@ public: ConnectionHolderPtr get(); - static std::string formatConnectionString( - std::string dbname, std::string host, UInt16 port, std::string user, std::string password); - private: using Pool = ConcurrentBoundedQueue; using PoolPtr = std::shared_ptr; @@ -51,7 +48,7 @@ private: void initialize(); PoolPtr pool; - std::string connection_str, address; + ConnectionInfo connection_info; size_t pool_size; int64_t pool_wait_timeout; bool block_on_empty_pool; diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 6b9f90c5500..06b04a22b9f 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -287,7 +287,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String const auto & password = safeGetLiteralValue(engine_args[3], engine_name); auto parsed_host_port = parseAddress(host_port, 5432); - auto connection_string = postgres::ConnectionPool::formatConnectionString(postgres_database_name, parsed_host_port.first, parsed_host_port.second, username, password); + auto connection_info = postgres::formatConnectionString(postgres_database_name, parsed_host_port.first, parsed_host_port.second, username, password); auto postgresql_replica_settings = std::make_unique(); @@ -296,7 +296,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared( context, metadata_path, uuid, engine_define, - database_name, postgres_database_name, connection_string, + database_name, postgres_database_name, connection_info, std::move(postgresql_replica_settings)); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index d6a02ca2cc9..7f808007ebc 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -41,12 +41,12 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const ASTStorage * database_engine_define_, const String & database_name_, const String & postgres_database_name, - const String & connection_string, + const postgres::ConnectionInfo & connection_info, std::unique_ptr settings_) : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context_) , database_engine_define(database_engine_define_->clone()) , remote_database_name(postgres_database_name) - , connection(std::make_shared(connection_string, "")) + , connection(std::make_shared(connection_info)) , settings(std::move(settings_)) { } @@ -56,7 +56,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() { replication_handler = std::make_unique( remote_database_name, - connection->getConnectionString(), + connection->getConnectionInfo(), metadata_path + METADATA_SUFFIX, global_context, settings->postgresql_replica_max_block_size.value, diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index b80ff4c5974..405bfd80283 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -36,7 +36,7 @@ public: const ASTStorage * database_engine_define_, const String & database_name_, const String & postgres_database_name, - const String & connection_string, + const postgres::ConnectionInfo & connection_info, std::unique_ptr settings_); String getEngineName() const override { return "MaterializePostgreSQL"; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 1cca362ca35..cce84892ed6 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -19,7 +19,7 @@ static const auto reschedule_ms = 500; PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, - const std::string & conn_str, + const postgres::ConnectionInfo & connection_info_, const std::string & metadata_path_, const Context & context_, const size_t max_block_size_, @@ -29,13 +29,13 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) , database_name(database_name_) - , connection_str(conn_str) , metadata_path(metadata_path_) + , connection_info(connection_info_) , max_block_size(max_block_size_) , allow_minimal_ddl(allow_minimal_ddl_) , is_postgresql_replica_database_engine(is_postgresql_replica_database_engine_) , tables_list(tables_list_) - , connection(std::make_shared(conn_str, "")) + , connection(std::make_shared(connection_info_)) { replication_slot = fmt::format("{}_ch_replication_slot", database_name); publication_name = fmt::format("{}_ch_publication", database_name); @@ -63,14 +63,11 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { /// Will throw pqxx::broken_connection if no connection at the moment connection->get(); - startSynchronization(); } catch (const pqxx::broken_connection & pqxx_error) { - LOG_ERROR(log, "Unable to set up connection. Reconnection attempt will continue. Error message: {}", - pqxx_error.what()); - + LOG_ERROR(log, "Unable to set up connection. Reconnection attempt will continue. Error message: {}", pqxx_error.what()); startup_task->scheduleAfter(reschedule_ms); } catch (...) @@ -92,20 +89,19 @@ void PostgreSQLReplicationHandler::startSynchronization() { createPublicationIfNeeded(connection->getRef()); - auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection->getConnectionString()), ""); - replication_connection->get()->set_variable("default_transaction_isolation", "'repeatable read'"); - auto tx = std::make_shared(replication_connection->getRef()); + auto replication_connection = postgres::createReplicationConnection(connection_info); + postgres::Transaction tx(replication_connection->getRef()); std::string snapshot_name, start_lsn; auto initial_sync = [&]() { - createReplicationSlot(tx, start_lsn, snapshot_name); + createReplicationSlot(tx.getRef(), start_lsn, snapshot_name); loadFromSnapshot(snapshot_name, storages); }; /// Replication slot should be deleted with drop table only and created only once, reused after detach. - if (!isReplicationSlotExist(tx, replication_slot)) + if (!isReplicationSlotExist(tx.getRef(), replication_slot)) { initial_sync(); } @@ -114,12 +110,12 @@ void PostgreSQLReplicationHandler::startSynchronization() /// In case of some failure, the following cases are possible (since publication and replication slot are reused): /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. /// 2. If created a new publication and replication slot existed before it was created, it is not ok. - dropReplicationSlot(tx); + dropReplicationSlot(tx.getRef()); initial_sync(); } else { - LOG_TRACE(log, "Restoring tables..."); + LOG_TRACE(log, "Restoring {} tables...", storages.size()); for (const auto & [table_name, storage] : storages) { try @@ -135,8 +131,6 @@ void PostgreSQLReplicationHandler::startSynchronization() } } - tx->commit(); - consumer = std::make_shared( context, connection, @@ -226,10 +220,10 @@ void PostgreSQLReplicationHandler::consumerFunc() } -bool PostgreSQLReplicationHandler::isPublicationExist(std::shared_ptr tx) +bool PostgreSQLReplicationHandler::isPublicationExist(pqxx::work & tx) { std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); - pqxx::result result{tx->exec(query_str)}; + pqxx::result result{tx.exec(query_str)}; assert(!result.empty()); bool publication_exists = (result[0][0].as() == "t"); @@ -245,9 +239,9 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::connection & if (new_publication_created) return; - auto tx = std::make_shared(connection_); + postgres::Transaction tx(connection_); - if (!isPublicationExist(tx)) + if (!isPublicationExist(tx.getRef())) { if (tables_list.empty()) { @@ -263,7 +257,7 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::connection & std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, tables_list); try { - tx->exec(query_str); + tx.exec(query_str); new_publication_created = true; LOG_TRACE(log, "Created publication {} with tables list: {}", publication_name, tables_list); } @@ -273,15 +267,13 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::connection & throw; } } - - tx->commit(); } -bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr tx, std::string & slot_name) +bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name) { std::string query_str = fmt::format("SELECT active, restart_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); - pqxx::result result{tx->exec(query_str)}; + pqxx::result result{tx.exec(query_str)}; /// Replication slot does not exist if (result.empty()) @@ -296,7 +288,7 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(NontransactionPtr tx, void PostgreSQLReplicationHandler::createReplicationSlot( - NontransactionPtr tx, std::string & start_lsn, std::string & snapshot_name, bool temporary) + pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary) { std::string query_str; @@ -310,7 +302,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot( try { - pqxx::result result{tx->exec(query_str)}; + pqxx::result result{tx.exec(query_str)}; start_lsn = result[0][1].as(); snapshot_name = result[0][2].as(); LOG_TRACE(log, "Created replication slot: {}, start lsn: {}", replication_slot, start_lsn); @@ -323,7 +315,7 @@ void PostgreSQLReplicationHandler::createReplicationSlot( } -void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr tx, bool temporary) +void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx, bool temporary) { std::string slot_name; if (temporary) @@ -333,15 +325,15 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(NontransactionPtr tx, boo std::string query_str = fmt::format("SELECT pg_drop_replication_slot('{}')", slot_name); - tx->exec(query_str); + tx.exec(query_str); LOG_TRACE(log, "Dropped replication slot: {}", slot_name); } -void PostgreSQLReplicationHandler::dropPublication(NontransactionPtr tx) +void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) { std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); - tx->exec(query_str); + tx.exec(query_str); } @@ -350,14 +342,12 @@ void PostgreSQLReplicationHandler::shutdownFinal() if (Poco::File(metadata_path).exists()) Poco::File(metadata_path).remove(); - connection = std::make_shared(connection_str, ""); - auto tx = std::make_shared(connection->getRef()); + connection = std::make_shared(connection_info); + postgres::Transaction tx(connection->getRef()); - dropPublication(tx); - if (isReplicationSlotExist(tx, replication_slot)) - dropReplicationSlot(tx); - - tx->commit(); + dropPublication(tx.getRef()); + if (isReplicationSlotExist(tx.getRef(), replication_slot)) + dropReplicationSlot(tx.getRef()); } @@ -379,9 +369,9 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::connectio { std::string query = fmt::format("SELECT tablename FROM pg_publication_tables WHERE pubname = '{}'", publication_name); std::unordered_set tables; - pqxx::read_transaction tx(connection_); + postgres::Transaction tx(connection_); - for (auto table_name : tx.stream(query)) + for (auto table_name : tx.getRef().stream(query)) tables.insert(std::get<0>(table_name)); return tables; @@ -405,7 +395,6 @@ std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapsh std::unordered_map tables_start_lsn; try { - auto tx = std::make_shared(connection->getRef()); Storages sync_storages; for (const auto & relation : relation_data) { @@ -414,17 +403,14 @@ std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapsh sync_storages[table_name] = storage; storage->dropNested(); } - tx->commit(); - auto replication_connection = std::make_shared(fmt::format("{} replication=database", connection_str), ""); - replication_connection->get()->set_variable("default_transaction_isolation", "'repeatable read'"); + auto replication_connection = postgres::createReplicationConnection(connection_info); + postgres::Transaction tx(replication_connection->getRef()); - auto r_tx = std::make_shared(replication_connection->getRef()); std::string snapshot_name, start_lsn; - createReplicationSlot(r_tx, start_lsn, snapshot_name, true); + createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, true); /// This snapshot is valid up to the end of the transaction, which exported it. auto success_tables = loadFromSnapshot(snapshot_name, sync_storages); - r_tx->commit(); for (const auto & relation : relation_data) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 0aa165bd183..147c0c7b114 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -28,7 +28,7 @@ class PostgreSQLReplicationHandler public: PostgreSQLReplicationHandler( const std::string & database_name_, - const std::string & conn_str_, + const postgres::ConnectionInfo & connection_info_, const std::string & metadata_path_, const Context & context_, const size_t max_block_size_, @@ -38,29 +38,31 @@ public: void startup(); + /// Stop replication without cleanup. void shutdown(); + /// Clean up replication: remove publication and replication slots. void shutdownFinal(); void addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage); + /// Fetch list of tables which are going to be replicated. Used for database engine. NameSet fetchRequiredTables(pqxx::connection & connection_); private: - using NontransactionPtr = std::shared_ptr; using Storages = std::unordered_map; - bool isPublicationExist(std::shared_ptr tx); - - bool isReplicationSlotExist(NontransactionPtr ntx, std::string & slot_name); - void createPublicationIfNeeded(pqxx::connection & connection_); - void createReplicationSlot(NontransactionPtr ntx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); + bool isPublicationExist(pqxx::work & tx); - void dropReplicationSlot(NontransactionPtr tx, bool temporary = false); + bool isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name); - void dropPublication(NontransactionPtr ntx); + void createReplicationSlot(pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); + + void dropReplicationSlot(pqxx::nontransaction & tx, bool temporary = false); + + void dropPublication(pqxx::nontransaction & ntx); void waitConnectionAndStart(); @@ -78,19 +80,48 @@ private: Poco::Logger * log; const Context & context; - const std::string database_name, connection_str, metadata_path; + + /// Remote database name. + const String database_name; + + /// Path for replication metadata. + const String metadata_path; + + /// Connection string and address for logs. + postgres::ConnectionInfo connection_info; + + /// max_block_size for replication stream. const size_t max_block_size; - bool allow_minimal_ddl, is_postgresql_replica_database_engine; - std::string tables_list, replication_slot, publication_name; + + /// Table structure changes are always tracked. By default, table with changed schema will get into a skip list. + bool allow_minimal_ddl = false; + + /// To distinguish whether current replication handler belongs to a MaterializePostgreSQL database engine or single storage. + bool is_postgresql_replica_database_engine; + + /// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated. + String tables_list; + + String replication_slot, publication_name; postgres::ConnectionPtr connection; + + /// Replication consumer. Manages deconding of replication stream and syncing into tables. std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task, consumer_task; - std::atomic tables_loaded = false, stop_synchronization = false; + + std::atomic stop_synchronization = false; + + /// For database engine there are 2 places where it is checked for publication: + /// 1. to fetch tables list from already created publication when database is loaded + /// 2. at replication startup bool new_publication_created = false; + /// MaterializePostgreSQL tables. Used for managing all operations with its internal nested tables. Storages storages; + + /// List of nested tables, which is passed to replication consumer. std::unordered_map nested_storages; }; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 7002d7d8c99..65b38086db9 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -42,7 +42,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, const String & remote_database_name, const String & remote_table_name_, - const String & connection_str, + const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, const Context & context_, std::unique_ptr replication_settings_) @@ -60,7 +60,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( replication_handler = std::make_unique( remote_database_name, - connection_str, + connection_info, metadata_path, global_context, replication_settings->postgresql_replica_max_block_size.value, @@ -445,7 +445,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) const String & remote_database = engine_args[1]->as().value.safeGet(); /// No connection is made here, see Storages/PostgreSQL/PostgreSQLConnection.cpp - auto connection_string = postgres::ConnectionPool::formatConnectionString( + auto connection_info = postgres::formatConnectionString( remote_database, parsed_host_port.first, parsed_host_port.second, @@ -453,7 +453,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) engine_args[4]->as().value.safeGet()); return StorageMaterializePostgreSQL::create( - args.table_id, remote_database, remote_table, connection_string, + args.table_id, remote_database, remote_table, connection_info, metadata, args.context, std::move(postgresql_replication_settings)); }; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index feba216b4c4..5bbea64133a 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -76,7 +76,7 @@ protected: const StorageID & table_id_, const String & remote_database_name, const String & remote_table_name, - const String & connection_str, + const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, const Context & context_, std::unique_ptr replication_settings_); From f7361250b2b19a4bfab880c4789457a166c3a64d Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Apr 2021 11:02:12 +0000 Subject: [PATCH 076/931] Fixes --- src/Core/PostgreSQL/PostgreSQLConnection.cpp | 7 ------- src/DataStreams/PostgreSQLBlockInputStream.h | 3 +-- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 5 +++-- src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp | 1 - 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/Core/PostgreSQL/PostgreSQLConnection.cpp b/src/Core/PostgreSQL/PostgreSQLConnection.cpp index b6128e909ef..a6edf3e150e 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Core/PostgreSQL/PostgreSQLConnection.cpp @@ -42,13 +42,6 @@ ConnectionPtr createReplicationConnection(const ConnectionInfo & connection_info } -template -std::shared_ptr createTransaction(pqxx::connection & connection) -{ - return std::make_shared(connection); -} - - Connection::Connection(const ConnectionInfo & connection_info_) : connection_info(connection_info_) { diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index d62c11ccef7..5c637015f18 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -61,11 +61,10 @@ private: }; -/// Passes transaction object into PostgreSQLBlockInputStream and does not close transaction after read if finished. +/// Passes transaction object into PostgreSQLBlockInputStream and does not close transaction after read is finished. template class PostgreSQLTransactionBlockInputStream : public PostgreSQLBlockInputStream { - public: using Base = PostgreSQLBlockInputStream; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index cce84892ed6..a1825afe622 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -17,6 +17,8 @@ namespace DB static const auto reschedule_ms = 500; +/// TODO: fetch replica identity index + PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const postgres::ConnectionInfo & connection_info_, @@ -279,9 +281,8 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & if (result.empty()) return false; - bool is_active = result[0][0].as(); LOG_TRACE(log, "Replication slot {} already exists (active: {}). Restart lsn position is {}", - slot_name, is_active, result[0][0].as()); + slot_name, result[0][0].as(), result[0][0].as()); return true; } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 65b38086db9..a680a5bfd84 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -77,7 +77,6 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , nested_storage(nested_storage_) , is_postgresql_replica_database(true) { - } From dba1fe1989b0ee120485c13ba5769bfa1c9909c6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Apr 2021 14:07:18 +0000 Subject: [PATCH 077/931] Add doc draft --- .../materialize-postgresql.md | 41 +++++++++++++++++++ src/Core/PostgreSQL/insertPostgreSQLValue.cpp | 1 + src/Core/PostgreSQL/insertPostgreSQLValue.h | 1 - .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 +- 4 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 docs/en/engines/database-engines/materialize-postgresql.md diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialize-postgresql.md new file mode 100644 index 00000000000..c0572c85bfc --- /dev/null +++ b/docs/en/engines/database-engines/materialize-postgresql.md @@ -0,0 +1,41 @@ +--- +toc_priority: 30 +toc_title: MaterializePostgreSQL +--- + +# MaterializePostgreSQL {#materialize-postgresql} + +## Creating a Database {#creating-a-database} + +## Requirements + +Each replicated table must have one of the following **replica identity**: + +1. **default** (primary key) + +2. **index** + +``` +postgres# CREATE TABLE postgres_table (a Integer NOT NULL, b Integer, c Integer NOT NULL, d Integer, e Integer NOT NULL); +postgres# CREATE unique INDEX postgres_table_index on postgres_table(a, c, e); +postgres# ALTER TABLE postgres_table REPLICA IDENTITY USING INDEX postgres_table_index; + +``` + +3. **full** (all columns, very inefficient) + + +You can check what type is used for a specific table with the following command: + +``` sql +postgres# SELECT CASE relreplident + WHEN 'd' THEN 'default' + WHEN 'n' THEN 'nothing' + WHEN 'f' THEN 'full' + WHEN 'i' THEN 'index' + END AS replica_identity +FROM pg_class +WHERE oid = 'postgres_table'::regclass; + +``` + diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index 76e770e8fd0..8d490f253db 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -13,6 +13,7 @@ #include #include #include +#include // Y_IGNORE namespace DB diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.h b/src/Core/PostgreSQL/insertPostgreSQLValue.h index dd093cd4c5b..89d63e44ed3 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.h @@ -4,7 +4,6 @@ #include #include #include -#include // Y_IGNORE namespace DB diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 147c0c7b114..5557ae63f96 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -106,7 +106,7 @@ private: postgres::ConnectionPtr connection; - /// Replication consumer. Manages deconding of replication stream and syncing into tables. + /// Replication consumer. Manages decoding of replication stream and syncing into tables. std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task, consumer_task; From 1c501e7d9738bd9f2f4048ea71e047a4a40d7603 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 10 Apr 2021 14:42:45 +0000 Subject: [PATCH 078/931] Support replica identity index --- .../materialize-postgresql.md | 9 ++- src/Core/Settings.h | 2 +- .../fetchPostgreSQLTableStructure.cpp | 57 ++++++++++++++----- .../fetchPostgreSQLTableStructure.h | 6 +- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- .../MaterializePostgreSQLConsumer.cpp | 10 +--- .../MaterializePostgreSQLConsumer.h | 3 +- .../PostgreSQLReplicationHandler.cpp | 3 +- .../StorageMaterializePostgreSQL.cpp | 14 +++-- .../configs/users.xml | 2 +- .../test.py | 34 ++++++++++- 11 files changed, 99 insertions(+), 43 deletions(-) diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialize-postgresql.md index c0572c85bfc..b3516001929 100644 --- a/docs/en/engines/database-engines/materialize-postgresql.md +++ b/docs/en/engines/database-engines/materialize-postgresql.md @@ -15,19 +15,18 @@ Each replicated table must have one of the following **replica identity**: 2. **index** -``` +``` bash postgres# CREATE TABLE postgres_table (a Integer NOT NULL, b Integer, c Integer NOT NULL, d Integer, e Integer NOT NULL); postgres# CREATE unique INDEX postgres_table_index on postgres_table(a, c, e); postgres# ALTER TABLE postgres_table REPLICA IDENTITY USING INDEX postgres_table_index; - ``` -3. **full** (all columns, very inefficient) - +Primary key is always checked first. If it is absent, then index, defined as replica identity index, is checked. +If index is used as replica identity, there has to be only one such index in a table. You can check what type is used for a specific table with the following command: -``` sql +``` bash postgres# SELECT CASE relreplident WHEN 'd' THEN 'default' WHEN 'n' THEN 'nothing' diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2a956adb288..9936d8414b8 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -418,7 +418,7 @@ class IColumn; M(Bool, cast_keep_nullable, false, "CAST operator keep Nullable for result data type", 0) \ M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ - M(Bool, allow_experimental_database_postgresql_replica, false, "Allow to create database with Engine=PostgreSQLReplica(...).", 0) \ + M(Bool, allow_experimental_database_materialize_postgresql, false, "Allow to create database with Engine=MaterializePostgreSQL(...).", 0) \ M(Bool, external_databases_use_nulls, true, "If set to false, external databases will use default values instead of NULLs. (Sopported for PostgreSQL/MaterializePostgreSQL database engine)", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 4f669a60529..63124ba12d7 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -158,7 +158,7 @@ std::shared_ptr readNamesAndTypesList( template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key) + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key, bool with_replica_identity_index) { PostgreSQLTableStructure table; @@ -171,18 +171,43 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( table.columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, false); - if (!with_primary_key) - return table; + if (with_primary_key) + { + /// wiki.postgresql.org/wiki/Retrieve_primary_key_columns + query = fmt::format( + "SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type " + "FROM pg_index i " + "JOIN pg_attribute a ON a.attrelid = i.indrelid " + "AND a.attnum = ANY(i.indkey) " + "WHERE i.indrelid = '{}'::regclass AND i.indisprimary", postgres_table_name); - /// wiki.postgresql.org/wiki/Retrieve_primary_key_columns - query = fmt::format( - "SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type " - "FROM pg_index i " - "JOIN pg_attribute a ON a.attrelid = i.indrelid " - "AND a.attnum = ANY(i.indkey) " - "WHERE i.indrelid = '{}'::regclass AND i.indisprimary", postgres_table_name); + table.primary_key_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); + } - table.primary_key_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); + if (with_replica_identity_index) + { + query = fmt::format( + "SELECT " + "a.attname AS column_name, " /// column name + "format_type(a.atttypid, a.atttypmod) as type " /// column type + "FROM " + "pg_class t, " + "pg_class i, " + "pg_index ix, " + "pg_attribute a " + "WHERE " + "t.oid = ix.indrelid " + "and i.oid = ix.indexrelid " + "and a.attrelid = t.oid " + "and a.attnum = ANY(ix.indkey) " + "and t.relkind = 'r' " /// simple tables + "and t.relname = '{}' " + "and ix.indisreplident = 't' " /// index is is replica identity index + "ORDER BY a.attname", /// column names + postgres_table_name); + + table.replica_identity_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); + } return table; } @@ -190,19 +215,21 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key); + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, + bool with_primary_key, bool with_replica_identity_index); template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key); + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, + bool with_primary_key, bool with_replica_identity_index); PostgreSQLTableStructure fetchPostgreSQLTableStructure( - pqxx::connection & connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key) + pqxx::connection & connection, const String & postgres_table_name, bool use_nulls) { auto tx = std::make_shared(connection); - auto table = fetchPostgreSQLTableStructure(tx, postgres_table_name, use_nulls, with_primary_key); + auto table = fetchPostgreSQLTableStructure(tx, postgres_table_name, use_nulls, false, false); tx->commit(); return table; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index bdfa9e0a01b..bbcb9cd192f 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -18,16 +18,18 @@ struct PostgreSQLTableStructure { std::shared_ptr columns; std::shared_ptr primary_key_columns; + std::shared_ptr replica_identity_columns; }; using PostgreSQLTableStructurePtr = std::unique_ptr; PostgreSQLTableStructure fetchPostgreSQLTableStructure( - pqxx::connection & connection, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); + pqxx::connection & connection, const String & postgres_table_name, bool use_nulls); template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false); + std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, + bool with_primary_key = false, bool with_replica_identity_index = false); } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index b20a1992a46..d3bf61ee612 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -215,7 +215,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) "Enable allow_experimental_database_replicated to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } - if (create.storage->engine->name == "MaterializePostgreSQL" && !context.getSettingsRef().allow_experimental_database_postgresql_replica && !internal) + if (create.storage->engine->name == "MaterializePostgreSQL" && !context.getSettingsRef().allow_experimental_database_materialize_postgresql && !internal) { throw Exception("MaterializePostgreSQL is an experimental database engine. " "Enable allow_experimental_database_postgresql_replica to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 76626c575b7..ed317f03e60 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -31,7 +31,6 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( const std::string & start_lsn, const size_t max_block_size_, bool allow_minimal_ddl_, - bool is_postgresql_replica_database_engine_, Storages storages_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) , context(context_) @@ -42,7 +41,6 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( , current_lsn(start_lsn) , max_block_size(max_block_size_) , allow_minimal_ddl(allow_minimal_ddl_) - , is_postgresql_replica_database_engine(is_postgresql_replica_database_engine_) , storages(storages_) { for (const auto & [table_name, storage] : storages) @@ -401,15 +399,13 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli /// 'n' - nothing /// 'f' - all columns (set replica identity full) /// 'i' - user defined index with indisreplident set - /// For database engine now supported only 'd', for table engine 'f' is also allowed. + /// Only 'd' and 'i' - are supported. char replica_identity = readInt8(replication_message, pos, size); - if (replica_identity != 'd' && (replica_identity != 'f' || is_postgresql_replica_database_engine)) + if (replica_identity != 'd' && replica_identity != 'i') { LOG_WARNING(log, - "Table has replica identity {} - not supported. " - "For database engine only default (with primary keys) replica identity is supported." - "For table engine full replica identity is also supported. Table will be skipped."); + "Table has replica identity {} - not supported. A table must have a primary key or a replica identity index"); markTableAsSkipped(relation_id, relation_name); return; } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index e7bb6d22f7e..a52ecd73e07 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -35,7 +35,6 @@ public: const std::string & start_lsn, const size_t max_block_size_, bool allow_minimal_ddl_, - bool is_postgresql_replica_database_engine_, Storages storages_); void readMetadata(); @@ -110,7 +109,7 @@ private: std::string current_lsn, final_lsn; const size_t max_block_size; - bool allow_minimal_ddl, is_postgresql_replica_database_engine; + bool allow_minimal_ddl; std::string table_to_insert; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index a1825afe622..556ede4ce7b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -142,7 +142,6 @@ void PostgreSQLReplicationHandler::startSynchronization() start_lsn, max_block_size, allow_minimal_ddl, - is_postgresql_replica_database_engine, nested_storages); consumer_task->activateAndSchedule(); @@ -386,7 +385,7 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( return nullptr; auto use_nulls = context.getSettingsRef().external_databases_use_nulls; - return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true)); + return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true, true)); } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index a680a5bfd84..e4b87b8410b 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -200,18 +200,22 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt storage_metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); setInMemoryMetadata(storage_metadata); - if (!table_structure->primary_key_columns) + if (!table_structure->primary_key_columns && !table_structure->replica_identity_columns) { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "No primary key columns returned for table {}.{}", table_id.database_name, table_id.table_name); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Table {}.{} has no primary key and no replica identity index", table_id.database_name, table_id.table_name); } - auto primary_key_columns = *table_structure->primary_key_columns; + NamesAndTypesList merging_columns; + if (table_structure->primary_key_columns) + merging_columns = *table_structure->primary_key_columns; + else + merging_columns = *table_structure->replica_identity_columns; order_by_expression->name = "tuple"; order_by_expression->arguments = std::make_shared(); - for (const auto & column : primary_key_columns) + for (const auto & column : merging_columns) order_by_expression->arguments->children.emplace_back(std::make_shared(column.name)); } diff --git a/tests/integration/test_postgresql_replica_database_engine/configs/users.xml b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml index 948093dbf4c..74d2737c821 100644 --- a/tests/integration/test_postgresql_replica_database_engine/configs/users.xml +++ b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml @@ -2,7 +2,7 @@ - 1 + 1 diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 7d23458dda1..cf93a3e1b1c 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -11,7 +11,7 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) instance = cluster.add_instance('instance', - main_configs=['configs/log_conf.xml'], + main_configs = ['configs/log_conf.xml'], user_configs = ['configs/users.xml'], with_postgres=True, stay_alive=True) @@ -23,6 +23,10 @@ postgres_table_template_2 = """ CREATE TABLE IF NOT EXISTS {} ( key Integer NOT NULL, value1 Integer, value2 Integer, value3 Integer, PRIMARY KEY(key)) """ +postgres_table_template_3 = """ + CREATE TABLE IF NOT EXISTS {} ( + key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) + """ def get_postgres_conn(database=False): if database == True: @@ -299,7 +303,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(240) +@pytest.mark.timeout(320) def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) @@ -392,6 +396,32 @@ def test_clickhouse_restart(started_cluster): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); +@pytest.mark.timeout(120) +def test_replica_identity_index(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + + create_postgres_table(cursor, 'postgresql_replica', template=postgres_table_template_3); + cursor.execute("CREATE unique INDEX idx on postgresql_replica(key1, key2);") + cursor.execute("ALTER TABLE postgresql_replica REPLICA IDENTITY USING INDEX idx") + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)") + + instance.query( + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)") + check_tables_are_synchronized('postgresql_replica', order_by='key1'); + + cursor.execute("UPDATE postgresql_replica SET key1=key1-25 WHERE key1<100 ") + cursor.execute("UPDATE postgresql_replica SET key2=key2-25 WHERE key2>100 ") + cursor.execute("UPDATE postgresql_replica SET value1=value1+100 WHERE key1<100 ") + cursor.execute("UPDATE postgresql_replica SET value2=value2+200 WHERE key2>100 ") + check_tables_are_synchronized('postgresql_replica', order_by='key1'); + + cursor.execute('DELETE FROM postgresql_replica WHERE key2<75;') + check_tables_are_synchronized('postgresql_replica', order_by='key1'); + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From bc228f4010cf602df87bba5a34aed8f4bd3f2d00 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 10 Apr 2021 17:58:09 +0000 Subject: [PATCH 079/931] Better way to drop nested table for single storage --- src/Core/PostgreSQL/insertPostgreSQLValue.cpp | 3 + src/Core/PostgreSQL/insertPostgreSQLValue.h | 8 + src/Databases/DatabaseAtomic.cpp | 11 +- .../DatabaseMaterializePostgreSQL.cpp | 14 +- .../DatabaseMaterializePostgreSQL.h | 2 + .../fetchPostgreSQLTableStructure.cpp | 4 +- src/Interpreters/InterpreterDropQuery.cpp | 40 ++++- src/Interpreters/InterpreterDropQuery.h | 2 + src/Storages/IStorage.h | 2 + .../PostgreSQLReplicationHandler.cpp | 12 +- .../StorageMaterializePostgreSQL.cpp | 147 ++++++------------ .../PostgreSQL/StorageMaterializePostgreSQL.h | 22 +-- src/Storages/StorageMaterializedView.cpp | 39 +---- src/Storages/StorageMaterializedView.h | 2 +- 14 files changed, 135 insertions(+), 173 deletions(-) diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index 8d490f253db..70537767dc5 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -1,5 +1,6 @@ #include "insertPostgreSQLValue.h" +#if USE_LIBPQXX #include #include #include @@ -233,3 +234,5 @@ void preparePostgreSQLArrayInfo( array_info[column_idx] = {count_dimensions, default_value, parser}; } } + +#endif diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.h b/src/Core/PostgreSQL/insertPostgreSQLValue.h index 89d63e44ed3..7acba4f09bd 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.h @@ -1,5 +1,11 @@ #pragma once +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX + #include #include #include @@ -28,3 +34,5 @@ void preparePostgreSQLArrayInfo( void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_column); } + +#endif diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index b4222a7e349..ef1917091e3 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -108,12 +108,10 @@ StoragePtr DatabaseAtomic::detachTable(const String & name) void DatabaseAtomic::dropTable(const Context & context, const String & table_name, bool no_delay) { - if (auto * mv = dynamic_cast(tryGetTable(table_name, context).get())) - { - /// Remove the inner table (if any) to avoid deadlock - /// (due to attempt to execute DROP from the worker thread) - mv->dropInnerTable(no_delay, context); - } + auto * storage = tryGetTable(table_name, context).get(); + /// Remove the inner table (if any) to avoid deadlock + /// (due to attempt to execute DROP from the worker thread) + storage->dropInnerTableIfAny(no_delay, context); String table_metadata_path = getObjectMetadataPath(table_name); String table_metadata_path_drop; @@ -594,4 +592,3 @@ void DatabaseAtomic::waitDetachedTableNotInUse(const UUID & uuid) } } - diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 7f808007ebc..ad60b6242ff 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -70,9 +70,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() auto storage = tryGetTable(table_name, global_context); if (!storage) - { - storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), StoragePtr{}, global_context); - } + storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), global_context); replication_handler->addStorage(table_name, storage->template as()); materialized_tables[table_name] = storage; @@ -151,13 +149,17 @@ void DatabaseMaterializePostgreSQL::createTable(const Context & context, const S } +void DatabaseMaterializePostgreSQL::stopReplication() +{ + if (replication_handler) + replication_handler->shutdown(); +} + + void DatabaseMaterializePostgreSQL::drop(const Context & context) { if (replication_handler) - { - replication_handler->shutdown(); replication_handler->shutdownFinal(); - } /// Remove metadata Poco::File metadata(getMetadataPath() + METADATA_SUFFIX); diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 405bfd80283..b87d281d7a5 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -56,6 +56,8 @@ public: void shutdown() override; + void stopReplication(); + private: void startSynchronization(); diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 63124ba12d7..01ade1da180 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -184,7 +184,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( table.primary_key_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); } - if (with_replica_identity_index) + if (with_replica_identity_index && !table.primary_key_columns) { query = fmt::format( "SELECT " @@ -201,7 +201,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "and a.attrelid = t.oid " "and a.attnum = ANY(ix.indkey) " "and t.relkind = 'r' " /// simple tables - "and t.relname = '{}' " + "and t.relname = '{}' " /// Connection is alread done to a needed database, only table name is needed. "and ix.indisreplident = 't' " /// index is is replica identity index "ORDER BY a.attname", /// column names postgres_table_name); diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 2c698b5e3c1..6417b32e389 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -23,7 +23,7 @@ #endif #if USE_LIBPQXX -# include +# include #endif namespace DB @@ -186,11 +186,6 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP table->shutdown(); -#if USE_LIBPQXX - if (table->getName() == "MaterializePostgreSQL") - table->as()->shutdownFinal(); -#endif - TableExclusiveLockHolder table_lock; if (database->getUUID() == UUIDHelpers::Nil) table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); @@ -353,6 +348,10 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, #endif if (auto * replicated = typeid_cast(database.get())) replicated->stopReplication(); +#if USE_LIBPQXX + if (auto * materialize_postgresql = typeid_cast(database.get())) + materialize_postgresql->stopReplication(); +#endif if (database->shouldBeEmptyOnDetach()) { @@ -434,4 +433,33 @@ void InterpreterDropQuery::extendQueryLogElemImpl(QueryLogElement & elem, const elem.query_kind = "Drop"; } +void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, const Context & global_context, const Context & current_context, const StorageID & target_table_id, bool no_delay) +{ + if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context)) + { + /// We create and execute `drop` query for internal table. + auto drop_query = std::make_shared(); + drop_query->database = target_table_id.database_name; + drop_query->table = target_table_id.table_name; + drop_query->kind = kind; + drop_query->no_delay = no_delay; + drop_query->if_exists = true; + ASTPtr ast_drop_query = drop_query; + /// FIXME We have to use global context to execute DROP query for inner table + /// to avoid "Not enough privileges" error if current user has only DROP VIEW ON mat_view_name privilege + /// and not allowed to drop inner table explicitly. Allowing to drop inner table without explicit grant + /// looks like expected behaviour and we have tests for it. + auto drop_context = Context(global_context); + drop_context.getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; + if (auto txn = current_context.getZooKeeperMetadataTransaction()) + { + /// For Replicated database + drop_context.setQueryContext(const_cast(current_context)); + drop_context.initZooKeeperMetadataTransaction(txn, true); + } + InterpreterDropQuery drop_interpreter(ast_drop_query, drop_context); + drop_interpreter.execute(); + } +} + } diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index 4a67857767f..3c05c2788f1 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -26,6 +26,8 @@ public: void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, const Context &) const override; + static void executeDropQuery(ASTDropQuery::Kind kind, const Context & global_context, const Context & current_context, const StorageID & target_table_id, bool no_delay); + private: AccessRightsElements getRequiredAccessForDDLOnCluster() const; ASTPtr query_ptr; diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index b859e654967..2ecf1e33560 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -345,6 +345,8 @@ public: */ virtual void drop() {} + virtual void dropInnerTableIfAny(bool /* no_delay */, const Context & /* context */) {} + /** Clear the table data and leave it empty. * Must be called under exclusive lock (lockExclusively). */ diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 556ede4ce7b..4b14035fad7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -4,10 +4,10 @@ #include #include #include - +#include +#include #include #include -#include #include @@ -124,7 +124,7 @@ void PostgreSQLReplicationHandler::startSynchronization() { nested_storages[table_name] = storage->getNested(); storage->setStorageMetadata(); - storage->setNestedLoaded(); + storage->setNestedStatus(true); } catch (...) { @@ -183,7 +183,7 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica load from snapshot"); copyData(input, *block_io.out); - storage_data.second->setNestedLoaded(); + storage_data.second->setNestedStatus(true); nested_storages[table_name] = nested_storage; /// This is needed if this method is called from reloadFromSnapshot() method below. @@ -401,7 +401,9 @@ std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapsh const auto & table_name = relation.second; auto * storage = storages[table_name]; sync_storages[table_name] = storage; - storage->dropNested(); + auto nested_storage = storage->getNested(); + storage->setNestedStatus(false); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, context, context, nested_storage->getStorageID(), true); } auto replication_connection = postgres::createReplicationConnection(connection_info); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index e4b87b8410b..b26ecf805f6 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -23,6 +23,7 @@ #include #include #include +#include namespace DB @@ -52,6 +53,8 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , replication_settings(std::move(replication_settings_)) , is_postgresql_replica_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") + , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) + , nested_context(makeNestedTableContext()) { setInMemoryMetadata(storage_metadata); @@ -70,16 +73,28 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, - StoragePtr nested_storage_, const Context & context_) : IStorage(table_id_) , global_context(context_) - , nested_storage(nested_storage_) , is_postgresql_replica_database(true) + , nested_table_id(table_id_) + , nested_context(makeNestedTableContext()) { } +StoragePtr StorageMaterializePostgreSQL::getNested() const +{ + return DatabaseCatalog::instance().getTable(nested_table_id, nested_context); +} + + +StoragePtr StorageMaterializePostgreSQL::tryGetNested() const +{ + return DatabaseCatalog::instance().tryGetTable(nested_table_id, nested_context); +} + + std::string StorageMaterializePostgreSQL::getNestedTableName() const { auto table_name = getStorageID().table_name; @@ -91,6 +106,17 @@ std::string StorageMaterializePostgreSQL::getNestedTableName() const } +void StorageMaterializePostgreSQL::setStorageMetadata() +{ + /// If it is a MaterializePostgreSQL database engine, then storage with engine MaterializePostgreSQL + /// gets its metadata when it is fetch from postges, but if inner tables exist (i.e. it is a server restart) + /// then metadata for storage needs to be set from inner table metadata. + auto nested_table = getNested(); + auto storage_metadata = nested_table->getInMemoryMetadataPtr(); + setInMemoryMetadata(*storage_metadata); +} + + std::shared_ptr StorageMaterializePostgreSQL::getMaterializedColumnsDeclaration( const String name, const String type, UInt64 default_value) { @@ -150,13 +176,6 @@ ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & da } -void StorageMaterializePostgreSQL::setStorageMetadata() -{ - auto storage_metadata = getNested()->getInMemoryMetadataPtr(); - setInMemoryMetadata(*storage_metadata); -} - - /// For single storage MaterializePostgreSQL get columns and primary key columns from storage definition. /// For database engine MaterializePostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same /// transaction with snapshot, which is used for initial tables dump. @@ -231,8 +250,8 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt columns_declare_list->set(columns_declare_list->columns, columns_expression_list); - columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_sign", "Int8", UInt64(1))); - columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_version", "UInt64", UInt64(1))); + columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_sign", "Int8", 1)); + columns_declare_list->columns->children.emplace_back(getMaterializedColumnsDeclaration("_version", "UInt64", 1)); create_table_query->set(create_table_query->columns_list, columns_declare_list); @@ -255,14 +274,6 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { - if (nested_loaded) - { - nested_storage = tryGetNested(); - - if (nested_storage) - return; - } - auto context = makeNestedTableContext(); const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); @@ -275,8 +286,6 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure { tryLogCurrentException(__PRETTY_FUNCTION__); } - - nested_storage = getNested(); } @@ -290,32 +299,6 @@ Context StorageMaterializePostgreSQL::makeNestedTableContext() const } -StoragePtr StorageMaterializePostgreSQL::getNested() -{ - if (nested_storage) - return nested_storage; - - auto context = makeNestedTableContext(); - nested_storage = DatabaseCatalog::instance().getTable( - StorageID(getStorageID().database_name, getNestedTableName()), context); - - return nested_storage; -} - - -StoragePtr StorageMaterializePostgreSQL::tryGetNested() -{ - if (nested_storage) - return nested_storage; - - auto context = makeNestedTableContext(); - nested_storage = DatabaseCatalog::instance().tryGetTable( - StorageID(getStorageID().database_name, getNestedTableName()), context); - - return nested_storage; -} - - void StorageMaterializePostgreSQL::startup() { if (!is_postgresql_replica_database) @@ -333,47 +316,23 @@ void StorageMaterializePostgreSQL::shutdown() } -void StorageMaterializePostgreSQL::shutdownFinal() +void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, const Context & context) { - if (is_postgresql_replica_database) - return; - if (replication_handler) replication_handler->shutdownFinal(); - if (nested_storage) - dropNested(); -} - - -void StorageMaterializePostgreSQL::dropNested() -{ - std::lock_guard lock(nested_mutex); - nested_loaded = false; - - auto table_id = nested_storage->getStorageID(); - auto ast_drop = std::make_shared(); - - ast_drop->kind = ASTDropQuery::Drop; - ast_drop->table = table_id.table_name; - ast_drop->database = table_id.database_name; - ast_drop->if_exists = true; - - auto context = makeNestedTableContext(); - auto interpreter = InterpreterDropQuery(ast_drop, context); - interpreter.execute(); - - nested_storage = nullptr; - LOG_TRACE(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Dropped (possibly temporarily) nested table {}", getNestedTableName()); + auto nested_table = getNested(); + if (nested_table && !is_postgresql_replica_database) + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, global_context, context, nested_table_id, no_delay); } NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const { - if (nested_storage) - return nested_storage->getVirtuals(); - - return {}; + return NamesAndTypesList{ + {"_sign", std::make_shared()}, + {"_version", std::make_shared()} + }; } @@ -386,26 +345,20 @@ Pipe StorageMaterializePostgreSQL::read( size_t max_block_size, unsigned num_streams) { - std::unique_lock lock(nested_mutex, std::defer_lock); + if (!nested_loaded) + return Pipe(); - if (nested_loaded && lock.try_lock()) - { - if (!nested_storage) - getNested(); + auto nested_table = getNested(); - return readFinalFromNestedStorage( - nested_storage, - column_names, - metadata_snapshot, - query_info, - context, - processed_stage, - max_block_size, - num_streams); - } - - LOG_WARNING(&Poco::Logger::get("StorageMaterializePostgreSQL"), "Nested table {} is unavailable or is not loaded yet", getNestedTableName()); - return Pipe(); + return readFinalFromNestedStorage( + nested_table, + column_names, + metadata_snapshot, + query_info, + context, + processed_stage, + max_block_size, + num_streams); } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 5bbea64133a..f311414c041 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -31,14 +31,16 @@ class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper replication_handler; std::atomic nested_loaded = false; - StoragePtr nested_storage; - std::mutex nested_mutex; - bool is_postgresql_replica_database = false; + StorageID nested_table_id; + const Context nested_context; }; } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index c89187a46e2..75e34f97532 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -198,36 +197,6 @@ BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const } -static void executeDropQuery(ASTDropQuery::Kind kind, const Context & global_context, const Context & current_context, const StorageID & target_table_id, bool no_delay) -{ - if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context)) - { - /// We create and execute `drop` query for internal table. - auto drop_query = std::make_shared(); - drop_query->database = target_table_id.database_name; - drop_query->table = target_table_id.table_name; - drop_query->kind = kind; - drop_query->no_delay = no_delay; - drop_query->if_exists = true; - ASTPtr ast_drop_query = drop_query; - /// FIXME We have to use global context to execute DROP query for inner table - /// to avoid "Not enough privileges" error if current user has only DROP VIEW ON mat_view_name privilege - /// and not allowed to drop inner table explicitly. Allowing to drop inner table without explicit grant - /// looks like expected behaviour and we have tests for it. - auto drop_context = Context(global_context); - drop_context.getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; - if (auto txn = current_context.getZooKeeperMetadataTransaction()) - { - /// For Replicated database - drop_context.setQueryContext(const_cast(current_context)); - drop_context.initZooKeeperMetadataTransaction(txn, true); - } - InterpreterDropQuery drop_interpreter(ast_drop_query, drop_context); - drop_interpreter.execute(); - } -} - - void StorageMaterializedView::drop() { auto table_id = getStorageID(); @@ -235,19 +204,19 @@ void StorageMaterializedView::drop() if (!select_query.select_table_id.empty()) DatabaseCatalog::instance().removeDependency(select_query.select_table_id, table_id); - dropInnerTable(true, global_context); + dropInnerTableIfAny(true, global_context); } -void StorageMaterializedView::dropInnerTable(bool no_delay, const Context & context) +void StorageMaterializedView::dropInnerTableIfAny(bool no_delay, const Context & context) { if (has_inner_table && tryGetTargetTable()) - executeDropQuery(ASTDropQuery::Kind::Drop, global_context, context, target_table_id, no_delay); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, global_context, context, target_table_id, no_delay); } void StorageMaterializedView::truncate(const ASTPtr &, const StorageMetadataPtr &, const Context & context, TableExclusiveLockHolder &) { if (has_inner_table) - executeDropQuery(ASTDropQuery::Kind::Truncate, global_context, context, target_table_id, true); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Truncate, global_context, context, target_table_id, true); } void StorageMaterializedView::checkStatementCanBeForwarded() const diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index a5dc089d68e..cd89af154a9 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -37,7 +37,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override; void drop() override; - void dropInnerTable(bool no_delay, const Context & context); + void dropInnerTableIfAny(bool no_delay, const Context & context) override; void truncate(const ASTPtr &, const StorageMetadataPtr &, const Context &, TableExclusiveLockHolder &) override; From 820a32d9396643f0e277bd2cfc37c10a33ec0d8f Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 06:00:47 +0000 Subject: [PATCH 080/931] Allow rename in case of single storage --- .../StorageMaterializePostgreSQL.cpp | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index b26ecf805f6..c5d45a335a2 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -362,6 +362,41 @@ Pipe StorageMaterializePostgreSQL::read( } +void StorageMaterializePostgreSQL::renameInMemory(const StorageID & new_table_id) +{ + auto old_table_id = getStorageID(); + auto metadata_snapshot = getInMemoryMetadataPtr(); + bool from_atomic_to_atomic_database = old_table_id.hasUUID() && new_table_id.hasUUID(); + + if (has_inner_table && tryGetTargetTable() && !from_atomic_to_atomic_database) + { + auto new_target_table_name = generateInnerTableName(new_table_id); + auto rename = std::make_shared(); + + ASTRenameQuery::Table from; + from.database = target_table_id.database_name; + from.table = target_table_id.table_name; + + ASTRenameQuery::Table to; + to.database = target_table_id.database_name; + to.table = new_target_table_name; + + ASTRenameQuery::Element elem; + elem.from = from; + elem.to = to; + rename->elements.emplace_back(elem); + + InterpreterRenameQuery(rename, global_context).execute(); + target_table_id.table_name = new_target_table_name; + } + + IStorage::renameInMemory(new_table_id); + const auto & select_query = metadata_snapshot->getSelectQuery(); + // TODO Actually we don't need to update dependency if MV has UUID, but then db and table name will be outdated + DatabaseCatalog::instance().updateDependency(select_query.select_table_id, old_table_id, select_query.select_table_id, getStorageID()); +} + + void registerStorageMaterializePostgreSQL(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) From beae1c5fa472bb663adcd5bdc182ae268cd088cb Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 07:44:40 +0000 Subject: [PATCH 081/931] Use ContextPtr --- src/Core/ya.make | 4 - .../DatabaseMaterializePostgreSQL.cpp | 36 +++--- .../DatabaseMaterializePostgreSQL.h | 13 +-- .../PostgreSQL/DatabasePostgreSQL.cpp | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 4 +- src/Interpreters/InterpreterDropQuery.cpp | 12 +- src/Interpreters/InterpreterDropQuery.h | 2 +- src/Storages/IStorage.h | 2 +- .../MaterializePostgreSQLConsumer.cpp | 8 +- .../MaterializePostgreSQLConsumer.h | 4 +- .../PostgreSQLReplicationHandler.cpp | 12 +- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 +- .../StorageMaterializePostgreSQL.cpp | 103 +++++++++--------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 18 +-- .../ReadFinalForExternalReplicaStorage.cpp | 4 +- .../ReadFinalForExternalReplicaStorage.h | 2 +- src/Storages/StorageMaterializedView.cpp | 4 +- src/Storages/StorageMaterializedView.h | 2 +- 18 files changed, 114 insertions(+), 122 deletions(-) diff --git a/src/Core/ya.make b/src/Core/ya.make index 890ce20e7b3..004653d060e 100644 --- a/src/Core/ya.make +++ b/src/Core/ya.make @@ -31,10 +31,6 @@ SRCS( MySQL/PacketsProtocolText.cpp MySQL/PacketsReplication.cpp NamesAndTypes.cpp - PostgreSQL/PostgreSQLConnection.cpp - PostgreSQL/PostgreSQLConnectionPool.cpp - PostgreSQL/PostgreSQLPoolWithFailover.cpp - PostgreSQL/insertPostgreSQLValue.cpp PostgreSQLProtocol.cpp QueryProcessingStage.cpp Settings.cpp diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index ad60b6242ff..e5d61709387 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -35,7 +35,7 @@ namespace ErrorCodes static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( - const Context & context_, + ContextPtr context_, const String & metadata_path_, UUID uuid_, const ASTStorage * database_engine_define_, @@ -58,7 +58,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() remote_database_name, connection->getConnectionInfo(), metadata_path + METADATA_SUFFIX, - global_context, + getContext(), settings->postgresql_replica_max_block_size.value, settings->postgresql_replica_allow_minimal_ddl, true, settings->postgresql_replica_tables_list.value); @@ -67,12 +67,12 @@ void DatabaseMaterializePostgreSQL::startSynchronization() for (const auto & table_name : tables_to_replicate) { - auto storage = tryGetTable(table_name, global_context); + auto storage = tryGetTable(table_name, getContext()); if (!storage) - storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), global_context); + storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), getContext()); - replication_handler->addStorage(table_name, storage->template as()); + replication_handler->addStorage(table_name, storage->as()); materialized_tables[table_name] = storage; } @@ -88,9 +88,9 @@ void DatabaseMaterializePostgreSQL::shutdown() } -void DatabaseMaterializePostgreSQL::loadStoredObjects(Context & context, bool has_force_restore_data_flag, bool force_attach) +void DatabaseMaterializePostgreSQL::loadStoredObjects(ContextPtr local_context, bool has_force_restore_data_flag, bool force_attach) { - DatabaseAtomic::loadStoredObjects(context, has_force_restore_data_flag, force_attach); + DatabaseAtomic::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach); try { @@ -107,16 +107,16 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects(Context & context, bool ha } -StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const Context & context) const +StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, ContextPtr local_context) const { /// When a nested ReplacingMergeTree table is managed from PostgreSQLReplicationHandler, its context is modified /// to show the type of managed table. - if (context.hasQueryContext()) + if (local_context->hasQueryContext()) { - auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; + auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { - return DatabaseAtomic::tryGetTable(name, context); + return DatabaseAtomic::tryGetTable(name, local_context); } } @@ -132,14 +132,14 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, const } -void DatabaseMaterializePostgreSQL::createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & name, const StoragePtr & table, const ASTPtr & query) { - if (context.hasQueryContext()) + if (local_context->hasQueryContext()) { - auto storage_set = context.getQueryContext().getQueryFactoriesInfo().storages; + auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { - DatabaseAtomic::createTable(context, name, table, query); + DatabaseAtomic::createTable(local_context, name, table, query); return; } } @@ -156,7 +156,7 @@ void DatabaseMaterializePostgreSQL::stopReplication() } -void DatabaseMaterializePostgreSQL::drop(const Context & context) +void DatabaseMaterializePostgreSQL::drop(ContextPtr local_context) { if (replication_handler) replication_handler->shutdownFinal(); @@ -167,12 +167,12 @@ void DatabaseMaterializePostgreSQL::drop(const Context & context) if (metadata.exists()) metadata.remove(false); - DatabaseAtomic::drop(context); + DatabaseAtomic::drop(local_context); } DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( - const Context & /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) + ContextPtr /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) { Tables nested_tables; for (const auto & [table_name, storage] : materialized_tables) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index b87d281d7a5..17288be8fb2 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -20,7 +20,6 @@ namespace DB { -class Context; class PostgreSQLConnection; using PostgreSQLConnectionPtr = std::shared_ptr; @@ -30,7 +29,7 @@ class DatabaseMaterializePostgreSQL : public DatabaseAtomic public: DatabaseMaterializePostgreSQL( - const Context & context_, + ContextPtr context_, const String & metadata_path_, UUID uuid_, const ASTStorage * database_engine_define_, @@ -43,16 +42,16 @@ public: String getMetadataPath() const override { return metadata_path; } - void loadStoredObjects(Context &, bool, bool force_attach) override; + void loadStoredObjects(ContextPtr, bool, bool force_attach) override; DatabaseTablesIteratorPtr getTablesIterator( - const Context & context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) override; + ContextPtr context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) override; - StoragePtr tryGetTable(const String & name, const Context & context) const override; + StoragePtr tryGetTable(const String & name, ContextPtr context) const override; - void createTable(const Context & context, const String & name, const StoragePtr & table, const ASTPtr & query) override; + void createTable(ContextPtr context, const String & name, const StoragePtr & table, const ASTPtr & query) override; - void drop(const Context & context) override; + void drop(ContextPtr local_context) override; void shutdown() override; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 6bee70fcbbb..1ddb8d53e79 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -149,7 +149,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr if (!table_checked && !checkPostgresTable(table_name)) return StoragePtr{}; - auto use_nulls = context.getSettingsRef().external_databases_use_nulls; + auto use_nulls = local_context->getSettingsRef().external_databases_use_nulls; auto connection = pool->get(); auto columns = fetchPostgreSQLTableStructure(connection->conn(), doubleQuoteString(table_name), use_nulls).columns; diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index d8c6cf7d1e4..1aeafcab04e 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -216,13 +216,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) "Enable allow_experimental_database_replicated to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } - if (create.storage->engine->name == "MaterializePostgreSQL" && !context.getSettingsRef().allow_experimental_database_materialize_postgresql && !internal) + if (create.storage->engine->name == "MaterializePostgreSQL" && !getContext()->getSettingsRef().allow_experimental_database_materialize_postgresql && !internal) { throw Exception("MaterializePostgreSQL is an experimental database engine. " "Enable allow_experimental_database_postgresql_replica to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } - DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext); + DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext()); if (create.uuid != UUIDHelpers::Nil) create.database = TABLE_WITH_UUID_NAME_PLACEHOLDER; diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 41dc326b838..15e381d9191 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -436,7 +436,7 @@ void InterpreterDropQuery::extendQueryLogElemImpl(QueryLogElement & elem, const elem.query_kind = "Drop"; } -void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, const Context & global_context, const Context & current_context, const StorageID & target_table_id, bool no_delay) +void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool no_delay) { if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context)) { @@ -452,13 +452,13 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, const Conte /// to avoid "Not enough privileges" error if current user has only DROP VIEW ON mat_view_name privilege /// and not allowed to drop inner table explicitly. Allowing to drop inner table without explicit grant /// looks like expected behaviour and we have tests for it. - auto drop_context = Context(global_context); - drop_context.getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; - if (auto txn = current_context.getZooKeeperMetadataTransaction()) + auto drop_context = Context::createCopy(global_context); + drop_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; + if (auto txn = current_context->getZooKeeperMetadataTransaction()) { /// For Replicated database - drop_context.setQueryContext(const_cast(current_context)); - drop_context.initZooKeeperMetadataTransaction(txn, true); + drop_context->setQueryContext(current_context); + drop_context->initZooKeeperMetadataTransaction(txn, true); } InterpreterDropQuery drop_interpreter(ast_drop_query, drop_context); drop_interpreter.execute(); diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index 891c8378ff5..e50688fb7d4 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -26,7 +26,7 @@ public: void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const override; - static void executeDropQuery(ASTDropQuery::Kind kind, const Context & global_context, const Context & current_context, const StorageID & target_table_id, bool no_delay); + static void executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool no_delay); private: AccessRightsElements getRequiredAccessForDDLOnCluster() const; diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index ea38eedf2b7..f5292c0b094 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -344,7 +344,7 @@ public: */ virtual void drop() {} - virtual void dropInnerTableIfAny(bool /* no_delay */, const Context & /* context */) {} + virtual void dropInnerTableIfAny(bool /* no_delay */, ContextPtr /* context */) {} /** Clear the table data and leave it empty. * Must be called under exclusive lock (lockExclusively). diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index ed317f03e60..0a597d942f7 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes } MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( - const Context & context_, + ContextPtr context_, postgres::ConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, @@ -491,9 +491,9 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptrtable_id = storage->getStorageID(); insert->columns = buffer.columnsAST; - auto insert_context(context); - insert_context.makeQueryContext(); - insert_context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + auto insert_context = Context::createCopy(context); + insert_context->makeQueryContext(); + insert_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); InterpreterInsertQuery interpreter(insert, insert_context, true); auto block_io = interpreter.execute(); diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index a52ecd73e07..7eeac16337e 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -27,7 +27,7 @@ public: using Storages = std::unordered_map; MaterializePostgreSQLConsumer( - const Context & context_, + ContextPtr context_, postgres::ConnectionPtr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, @@ -101,7 +101,7 @@ private: } Poco::Logger * log; - const Context & context; + ContextPtr context; const std::string replication_slot_name, publication_name; MaterializePostgreSQLMetadata metadata; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 4b14035fad7..4ea1dad2b14 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -17,13 +17,11 @@ namespace DB static const auto reschedule_ms = 500; -/// TODO: fetch replica identity index - PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & database_name_, const postgres::ConnectionInfo & connection_info_, const std::string & metadata_path_, - const Context & context_, + ContextPtr context_, const size_t max_block_size_, bool allow_minimal_ddl_, bool is_postgresql_replica_database_engine_, @@ -42,8 +40,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( replication_slot = fmt::format("{}_ch_replication_slot", database_name); publication_name = fmt::format("{}_ch_publication", database_name); - startup_task = context.getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); - consumer_task = context.getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); + startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); + consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); } @@ -169,7 +167,7 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na query_str = fmt::format("SELECT * FROM {}", storage_data.first); const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); - auto insert_context = storage_data.second->makeNestedTableContext(); + auto insert_context = storage_data.second->getNestedTableContext(); auto insert = std::make_shared(); insert->table_id = nested_storage->getStorageID(); @@ -384,7 +382,7 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( if (!is_postgresql_replica_database_engine) return nullptr; - auto use_nulls = context.getSettingsRef().external_databases_use_nulls; + auto use_nulls = context->getSettingsRef().external_databases_use_nulls; return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true, true)); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 5557ae63f96..5a527179406 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -30,7 +30,7 @@ public: const std::string & database_name_, const postgres::ConnectionInfo & connection_info_, const std::string & metadata_path_, - const Context & context_, + ContextPtr context_, const size_t max_block_size_, bool allow_minimal_ddl_, bool is_postgresql_replica_database_engine_, @@ -79,7 +79,7 @@ private: PostgreSQLTableStructurePtr fetchTableStructure(std::shared_ptr tx, const std::string & table_name); Poco::Logger * log; - const Context & context; + ContextPtr context; /// Remote database name. const String database_name; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index c5d45a335a2..7a40b2003ed 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -45,11 +45,11 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const String & remote_table_name_, const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, - const Context & context_, + ContextPtr context_, std::unique_ptr replication_settings_) : IStorage(table_id_) + , WithContext(context_->getGlobalContext()) , remote_table_name(remote_table_name_) - , global_context(context_.getGlobalContext()) , replication_settings(std::move(replication_settings_)) , is_postgresql_replica_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") @@ -65,7 +65,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( remote_database_name, connection_info, metadata_path, - global_context, + getContext(), replication_settings->postgresql_replica_max_block_size.value, replication_settings->postgresql_replica_allow_minimal_ddl.value, false); } @@ -73,9 +73,9 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, - const Context & context_) + ContextPtr context_) : IStorage(table_id_) - , global_context(context_) + , WithContext(context_->getGlobalContext()) , is_postgresql_replica_database(true) , nested_table_id(table_id_) , nested_context(makeNestedTableContext()) @@ -274,12 +274,11 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { - auto context = makeNestedTableContext(); const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); try { - InterpreterCreateQuery interpreter(ast_create, context); + InterpreterCreateQuery interpreter(ast_create, nested_context); interpreter.execute(); } catch (...) @@ -289,11 +288,11 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure } -Context StorageMaterializePostgreSQL::makeNestedTableContext() const +std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext() const { - auto context(global_context); - context.makeQueryContext(); - context.addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + auto context = Context::createCopy(getContext()); + context->makeQueryContext(); + context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); return context; } @@ -316,14 +315,14 @@ void StorageMaterializePostgreSQL::shutdown() } -void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, const Context & context) +void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) { if (replication_handler) replication_handler->shutdownFinal(); auto nested_table = getNested(); if (nested_table && !is_postgresql_replica_database) - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, global_context, context, nested_table_id, no_delay); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, nested_table_id, no_delay); } @@ -340,7 +339,7 @@ Pipe StorageMaterializePostgreSQL::read( const Names & column_names, const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, - const Context & context, + ContextPtr context_, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) @@ -355,46 +354,46 @@ Pipe StorageMaterializePostgreSQL::read( column_names, metadata_snapshot, query_info, - context, + context_, processed_stage, max_block_size, num_streams); } -void StorageMaterializePostgreSQL::renameInMemory(const StorageID & new_table_id) -{ - auto old_table_id = getStorageID(); - auto metadata_snapshot = getInMemoryMetadataPtr(); - bool from_atomic_to_atomic_database = old_table_id.hasUUID() && new_table_id.hasUUID(); - - if (has_inner_table && tryGetTargetTable() && !from_atomic_to_atomic_database) - { - auto new_target_table_name = generateInnerTableName(new_table_id); - auto rename = std::make_shared(); - - ASTRenameQuery::Table from; - from.database = target_table_id.database_name; - from.table = target_table_id.table_name; - - ASTRenameQuery::Table to; - to.database = target_table_id.database_name; - to.table = new_target_table_name; - - ASTRenameQuery::Element elem; - elem.from = from; - elem.to = to; - rename->elements.emplace_back(elem); - - InterpreterRenameQuery(rename, global_context).execute(); - target_table_id.table_name = new_target_table_name; - } - - IStorage::renameInMemory(new_table_id); - const auto & select_query = metadata_snapshot->getSelectQuery(); - // TODO Actually we don't need to update dependency if MV has UUID, but then db and table name will be outdated - DatabaseCatalog::instance().updateDependency(select_query.select_table_id, old_table_id, select_query.select_table_id, getStorageID()); -} +//void StorageMaterializePostgreSQL::renameInMemory(const StorageID & new_table_id) +//{ +// auto old_table_id = getStorageID(); +// auto metadata_snapshot = getInMemoryMetadataPtr(); +// bool from_atomic_to_atomic_database = old_table_id.hasUUID() && new_table_id.hasUUID(); +// +// if (has_inner_table && tryGetTargetTable() && !from_atomic_to_atomic_database) +// { +// auto new_target_table_name = generateInnerTableName(new_table_id); +// auto rename = std::make_shared(); +// +// ASTRenameQuery::Table from; +// from.database = target_table_id.database_name; +// from.table = target_table_id.table_name; +// +// ASTRenameQuery::Table to; +// to.database = target_table_id.database_name; +// to.table = new_target_table_name; +// +// ASTRenameQuery::Element elem; +// elem.from = from; +// elem.to = to; +// rename->elements.emplace_back(elem); +// +// InterpreterRenameQuery(rename, global_context).execute(); +// target_table_id.table_name = new_target_table_name; +// } +// +// IStorage::renameInMemory(new_table_id); +// const auto & select_query = metadata_snapshot->getSelectQuery(); +// // TODO Actually we don't need to update dependency if MV has UUID, but then db and table name will be outdated +// DatabaseCatalog::instance().updateDependency(select_query.select_table_id, old_table_id, select_query.select_table_id, getStorageID()); +//} void registerStorageMaterializePostgreSQL(StorageFactory & factory) @@ -414,7 +413,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.local_context); + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.getContext()); StorageInMemoryMetadata metadata; metadata.setColumns(args.columns); @@ -427,9 +426,9 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) throw Exception("Storage MaterializePostgreSQL needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); if (args.storage_def->primary_key) - metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.context); + metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.getContext()); else - metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->order_by->ptr(), metadata.columns, args.context); + metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->order_by->ptr(), metadata.columns, args.getContext()); auto parsed_host_port = parseAddress(engine_args[0]->as().value.safeGet(), 5432); const String & remote_table = engine_args[2]->as().value.safeGet(); @@ -445,7 +444,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) return StorageMaterializePostgreSQL::create( args.table_id, remote_database, remote_table, connection_info, - metadata, args.context, + metadata, args.getContext(), std::move(postgresql_replication_settings)); }; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index f311414c041..3820ee0d66c 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -24,14 +23,14 @@ namespace DB { -class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage +class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage, WithContext { friend struct ext::shared_ptr_helper; public: StorageMaterializePostgreSQL( const StorageID & table_id_, - const Context & context_); + ContextPtr context_); String getName() const override { return "MaterializePostgreSQL"; } @@ -39,7 +38,7 @@ public: void shutdown() override; - void dropInnerTableIfAny(bool no_delay, const Context & context) override; + void dropInnerTableIfAny(bool no_delay, ContextPtr local_context) override; NamesAndTypesList getVirtuals() const override; @@ -47,7 +46,7 @@ public: const Names & column_names, const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, - const Context & context, + ContextPtr context_, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) override; @@ -58,7 +57,9 @@ public: StoragePtr tryGetNested() const; - Context makeNestedTableContext() const; + ContextPtr getNestedTableContext() const { return nested_context; } + + std::shared_ptr makeNestedTableContext() const; void setNestedStatus(bool loaded) { nested_loaded.store(loaded); } @@ -73,7 +74,7 @@ protected: const String & remote_table_name, const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, - const Context & context_, + ContextPtr context_, std::unique_ptr replication_settings_); private: @@ -87,7 +88,6 @@ private: std::string getNestedTableName() const; std::string remote_table_name; - const Context global_context; std::unique_ptr replication_settings; std::unique_ptr replication_handler; @@ -95,7 +95,7 @@ private: std::atomic nested_loaded = false; bool is_postgresql_replica_database = false; StorageID nested_table_id; - const Context nested_context; + ContextPtr nested_context; }; } diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index 37b95eb5d6a..985b9104085 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -21,13 +21,13 @@ Pipe readFinalFromNestedStorage( const Names & column_names, const StorageMetadataPtr & /*metadata_snapshot*/, SelectQueryInfo & query_info, - const Context & context, + ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned int num_streams) { NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); - auto lock = nested_storage->lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock = nested_storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); Block nested_header = nested_metadata->getSampleBlock(); diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.h b/src/Storages/ReadFinalForExternalReplicaStorage.h index 2062392b22f..b54592159ef 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.h +++ b/src/Storages/ReadFinalForExternalReplicaStorage.h @@ -18,7 +18,7 @@ Pipe readFinalFromNestedStorage( const Names & column_names, const StorageMetadataPtr & /*metadata_snapshot*/, SelectQueryInfo & query_info, - const Context & context, + ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned int num_streams); diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 9d1c172a5cd..666c8ca3749 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -208,10 +208,10 @@ void StorageMaterializedView::drop() dropInnerTableIfAny(true, getContext()); } -void StorageMaterializedView::dropInnerTableIfAny(bool no_delay, const Context & context) +void StorageMaterializedView::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) { if (has_inner_table && tryGetTargetTable()) - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), context, target_table_id, no_delay); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, target_table_id, no_delay); } void StorageMaterializedView::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 83140106b73..33aa6b9274d 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -37,7 +37,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; void drop() override; - void dropInnerTableIfAny(bool no_delay, ContextPtr context) override; + void dropInnerTableIfAny(bool no_delay, ContextPtr local_context) override; void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; From 027f67affb59293e4cabe13a0fdbceb47d3789b5 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 09:54:10 +0000 Subject: [PATCH 082/931] Fix checks --- .../fetchPostgreSQLTableStructure.cpp | 2 +- .../StorageMaterializePostgreSQL.cpp | 46 +++++++++---------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 8 ++-- 3 files changed, 27 insertions(+), 29 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 01ade1da180..7aaa7cc6f2a 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -201,7 +201,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "and a.attrelid = t.oid " "and a.attnum = ANY(ix.indkey) " "and t.relkind = 'r' " /// simple tables - "and t.relname = '{}' " /// Connection is alread done to a needed database, only table name is needed. + "and t.relname = '{}' " /// Connection is already done to a needed database, only table name is needed. "and ix.indisreplident = 't' " /// index is is replica identity index "ORDER BY a.attname", /// column names postgres_table_name); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 7a40b2003ed..c97e3529deb 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -23,7 +23,6 @@ #include #include #include -#include namespace DB @@ -51,10 +50,10 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , WithContext(context_->getGlobalContext()) , remote_table_name(remote_table_name_) , replication_settings(std::move(replication_settings_)) - , is_postgresql_replica_database( + , is_materialize_postgresql_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) - , nested_context(makeNestedTableContext()) + , nested_context(makeNestedTableContext(context_->getGlobalContext())) { setInMemoryMetadata(storage_metadata); @@ -76,9 +75,9 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , is_postgresql_replica_database(true) + , is_materialize_postgresql_database(true) , nested_table_id(table_id_) - , nested_context(makeNestedTableContext()) + , nested_context(makeNestedTableContext(context_->getGlobalContext())) { } @@ -99,7 +98,7 @@ std::string StorageMaterializePostgreSQL::getNestedTableName() const { auto table_name = getStorageID().table_name; - if (!is_postgresql_replica_database) + if (!is_materialize_postgresql_database) table_name += NESTED_STORAGE_SUFFIX; return table_name; @@ -195,7 +194,7 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt const auto & columns = metadata_snapshot->getColumns(); NamesAndTypesList ordinary_columns_and_types; - if (!is_postgresql_replica_database) + if (!is_materialize_postgresql_database) { ordinary_columns_and_types = columns.getOrdinary(); } @@ -288,19 +287,19 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure } -std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext() const +std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) const { - auto context = Context::createCopy(getContext()); - context->makeQueryContext(); - context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + auto new_context = Context::createCopy(from_context); + new_context->makeQueryContext(); + new_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - return context; + return new_context; } void StorageMaterializePostgreSQL::startup() { - if (!is_postgresql_replica_database) + if (!is_materialize_postgresql_database) { replication_handler->addStorage(remote_table_name, this); replication_handler->startup(); @@ -321,7 +320,7 @@ void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr replication_handler->shutdownFinal(); auto nested_table = getNested(); - if (nested_table && !is_postgresql_replica_database) + if (nested_table && !is_materialize_postgresql_database) InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, nested_table_id, no_delay); } @@ -365,31 +364,32 @@ Pipe StorageMaterializePostgreSQL::read( //{ // auto old_table_id = getStorageID(); // auto metadata_snapshot = getInMemoryMetadataPtr(); -// bool from_atomic_to_atomic_database = old_table_id.hasUUID() && new_table_id.hasUUID(); // -// if (has_inner_table && tryGetTargetTable() && !from_atomic_to_atomic_database) +// IStorage::renameInMemory(new_table_id); +// auto nested_table = tryGetNested(); +// +// if (nested_table) // { -// auto new_target_table_name = generateInnerTableName(new_table_id); +// auto new_nested_table_name = getNestedTableName(); // auto rename = std::make_shared(); // // ASTRenameQuery::Table from; -// from.database = target_table_id.database_name; -// from.table = target_table_id.table_name; +// from.database = nested_table_id.database_name; +// from.table = nested_table_id.table_name; // // ASTRenameQuery::Table to; -// to.database = target_table_id.database_name; -// to.table = new_target_table_name; +// to.database = nested_table_id.database_name; +// to.table = new_nested_table_name; // // ASTRenameQuery::Element elem; // elem.from = from; // elem.to = to; // rename->elements.emplace_back(elem); // -// InterpreterRenameQuery(rename, global_context).execute(); +// InterpreterRenameQuery(rename, getContext()).execute(); // target_table_id.table_name = new_target_table_name; // } // -// IStorage::renameInMemory(new_table_id); // const auto & select_query = metadata_snapshot->getSelectQuery(); // // TODO Actually we don't need to update dependency if MV has UUID, but then db and table name will be outdated // DatabaseCatalog::instance().updateDependency(select_query.select_table_id, old_table_id, select_query.select_table_id, getStorageID()); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 3820ee0d66c..686cee7a1e6 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -59,8 +59,6 @@ public: ContextPtr getNestedTableContext() const { return nested_context; } - std::shared_ptr makeNestedTableContext() const; - void setNestedStatus(bool loaded) { nested_loaded.store(loaded); } bool isNestedLoaded() { return nested_loaded.load(); } @@ -87,13 +85,13 @@ private: std::string getNestedTableName() const; - std::string remote_table_name; + std::shared_ptr makeNestedTableContext(ContextPtr from_context) const; + std::string remote_table_name; std::unique_ptr replication_settings; std::unique_ptr replication_handler; - std::atomic nested_loaded = false; - bool is_postgresql_replica_database = false; + bool is_materialize_postgresql_database = false; StorageID nested_table_id; ContextPtr nested_context; }; From c968ccb391f99ddfa55cf41532457301594b9a4e Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 17:19:20 +0000 Subject: [PATCH 083/931] Better dependent name for nested in case of single storage --- .../StorageMaterializePostgreSQL.cpp | 51 ++++--------------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 4 +- .../test_storage_postgresql_replica/test.py | 32 ++++++++++++ 3 files changed, 44 insertions(+), 43 deletions(-) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index c97e3529deb..72611428326 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -35,7 +35,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -static const auto NESTED_STORAGE_SUFFIX = "_ReplacingMergeTree"; +static const auto NESTED_TABLE_SUFFIX = "_nested"; StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( @@ -55,6 +55,9 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) , nested_context(makeNestedTableContext(context_->getGlobalContext())) { + if (table_id_.uuid == UUIDHelpers::Nil) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializePostgreSQL is allowed only for Atomic database"); + setInMemoryMetadata(storage_metadata); auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() @@ -96,12 +99,12 @@ StoragePtr StorageMaterializePostgreSQL::tryGetNested() const std::string StorageMaterializePostgreSQL::getNestedTableName() const { - auto table_name = getStorageID().table_name; + auto table_id = getStorageID(); - if (!is_materialize_postgresql_database) - table_name += NESTED_STORAGE_SUFFIX; + if (is_materialize_postgresql_database) + return table_id.table_name; - return table_name; + return toString(table_id.uuid) + NESTED_TABLE_SUFFIX; } @@ -287,7 +290,7 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure } -std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) const +std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) { auto new_context = Context::createCopy(from_context); new_context->makeQueryContext(); @@ -360,42 +363,6 @@ Pipe StorageMaterializePostgreSQL::read( } -//void StorageMaterializePostgreSQL::renameInMemory(const StorageID & new_table_id) -//{ -// auto old_table_id = getStorageID(); -// auto metadata_snapshot = getInMemoryMetadataPtr(); -// -// IStorage::renameInMemory(new_table_id); -// auto nested_table = tryGetNested(); -// -// if (nested_table) -// { -// auto new_nested_table_name = getNestedTableName(); -// auto rename = std::make_shared(); -// -// ASTRenameQuery::Table from; -// from.database = nested_table_id.database_name; -// from.table = nested_table_id.table_name; -// -// ASTRenameQuery::Table to; -// to.database = nested_table_id.database_name; -// to.table = new_nested_table_name; -// -// ASTRenameQuery::Element elem; -// elem.from = from; -// elem.to = to; -// rename->elements.emplace_back(elem); -// -// InterpreterRenameQuery(rename, getContext()).execute(); -// target_table_id.table_name = new_target_table_name; -// } -// -// const auto & select_query = metadata_snapshot->getSelectQuery(); -// // TODO Actually we don't need to update dependency if MV has UUID, but then db and table name will be outdated -// DatabaseCatalog::instance().updateDependency(select_query.select_table_id, old_table_id, select_query.select_table_id, getStorageID()); -//} - - void registerStorageMaterializePostgreSQL(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 686cee7a1e6..079061a69d4 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -65,6 +65,8 @@ public: void setStorageMetadata(); + void renameNested(); + protected: StorageMaterializePostgreSQL( const StorageID & table_id_, @@ -85,7 +87,7 @@ private: std::string getNestedTableName() const; - std::shared_ptr makeNestedTableContext(ContextPtr from_context) const; + static std::shared_ptr makeNestedTableContext(ContextPtr from_context); std::string remote_table_name; std::unique_ptr replication_settings; diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index bca4f159cf6..678c7384c1d 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -430,6 +430,38 @@ def test_clickhouse_restart(started_cluster): assert(int(result) == 100050) +def test_rename_table(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + ENGINE = MaterializePostgreSQL( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; ''') + + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) != 50: + time.sleep(0.5) + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + instance.query('RENAME TABLE test.postgresql_replica TO test.postgresql_replica_renamed') + + result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + while int(result) != 50: + time.sleep(0.5) + result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50, 50)") + + result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + while int(result) != 100: + time.sleep(0.5) + result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 01075677cfa53dccf5af33e4b3249a6917ab611e Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 19:58:33 +0000 Subject: [PATCH 084/931] Better --- .../materialize-postgresql.md | 5 +- src/Core/PostgreSQL/PostgreSQLConnection.h | 2 +- src/DataStreams/PostgreSQLBlockInputStream.h | 13 +++-- .../DatabaseMaterializePostgreSQL.cpp | 17 ++++-- .../DatabaseMaterializePostgreSQL.h | 2 + .../fetchPostgreSQLTableStructure.cpp | 54 +++++++++++-------- .../fetchPostgreSQLTableStructure.h | 9 ++-- .../PostgreSQLReplicationHandler.cpp | 41 ++++++++------ .../PostgreSQL/PostgreSQLReplicationHandler.h | 14 ++--- src/Storages/StoragePostgreSQL.h | 1 - .../test.py | 50 ----------------- 11 files changed, 92 insertions(+), 116 deletions(-) diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialize-postgresql.md index b3516001929..79dccabc287 100644 --- a/docs/en/engines/database-engines/materialize-postgresql.md +++ b/docs/en/engines/database-engines/materialize-postgresql.md @@ -7,9 +7,9 @@ toc_title: MaterializePostgreSQL ## Creating a Database {#creating-a-database} -## Requirements +## Requirements {#requirements} -Each replicated table must have one of the following **replica identity**: +- Each replicated table must have one of the following **replica identity**: 1. **default** (primary key) @@ -38,3 +38,4 @@ WHERE oid = 'postgres_table'::regclass; ``` +- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. diff --git a/src/Core/PostgreSQL/PostgreSQLConnection.h b/src/Core/PostgreSQL/PostgreSQLConnection.h index dfed426b462..f884e93669d 100644 --- a/src/Core/PostgreSQL/PostgreSQLConnection.h +++ b/src/Core/PostgreSQL/PostgreSQLConnection.h @@ -25,7 +25,7 @@ class Connection; using ConnectionPtr = std::shared_ptr; -/// Connection string and address without login/password (for error logs) +/// Connection string and address without credentials (for logs) using ConnectionInfo = std::pair; ConnectionInfo formatConnectionString( diff --git a/src/DataStreams/PostgreSQLBlockInputStream.h b/src/DataStreams/PostgreSQLBlockInputStream.h index 5c637015f18..f320e2caeb5 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.h +++ b/src/DataStreams/PostgreSQLBlockInputStream.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include @@ -28,6 +27,12 @@ public: const Block & sample_block, const UInt64 max_block_size_); + String getName() const override { return "PostgreSQL"; } + Block getHeader() const override { return description.sample_block.cloneEmpty(); } + + void readPrefix() override; + +protected: PostgreSQLBlockInputStream( std::shared_ptr tx_, const std::string & query_str_, @@ -35,12 +40,6 @@ public: const UInt64 max_block_size_, bool auto_commit_); - String getName() const override { return "PostgreSQL"; } - Block getHeader() const override { return description.sample_block.cloneEmpty(); } - - void readPrefix() override; - -protected: String query_str; std::shared_ptr tx; std::unique_ptr stream; diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index e5d61709387..c8e93616ead 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -60,7 +60,8 @@ void DatabaseMaterializePostgreSQL::startSynchronization() metadata_path + METADATA_SUFFIX, getContext(), settings->postgresql_replica_max_block_size.value, - settings->postgresql_replica_allow_minimal_ddl, true, + settings->postgresql_replica_allow_minimal_ddl, + /* is_materialize_postgresql_database = */ true, settings->postgresql_replica_tables_list.value); std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->getRef()); @@ -123,9 +124,9 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte /// Note: In select query we call MaterializePostgreSQL table and it calls tryGetTable from its nested. std::lock_guard lock(tables_mutex); auto table = materialized_tables.find(name); - /// Here it is possible that nested table is temporarily out of reach, but return storage anyway, - /// it will not allow to read if nested is unavailable at the moment - if (table != materialized_tables.end()) + + /// Nested table is not created immediately. Consider that table exists only if nested table exists. + if (table != materialized_tables.end() && table->second->as()->isNestedLoaded()) return table->second; return StoragePtr{}; @@ -177,7 +178,7 @@ DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( Tables nested_tables; for (const auto & [table_name, storage] : materialized_tables) { - auto nested_storage = storage->template as()->tryGetNested(); + auto nested_storage = storage->as()->tryGetNested(); if (nested_storage) nested_tables[table_name] = nested_storage; @@ -186,6 +187,12 @@ DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( return std::make_unique(nested_tables, database_name); } + +void DatabaseMaterializePostgreSQL::renameTable(ContextPtr /* context_ */, const String & /* name */, IDatabase & /* to_database */, const String & /* to_name */, bool /* exchange */, bool /* dictionary */) +{ + throw Exception("MaterializePostgreSQL database does not support rename table.", ErrorCodes::NOT_IMPLEMENTED); +} + } #endif diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 17288be8fb2..bdfe54ace13 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -51,6 +51,8 @@ public: void createTable(ContextPtr context, const String & name, const StoragePtr & table, const ASTPtr & query) override; + void renameTable(ContextPtr context_, const String & name, IDatabase & to_database, const String & to_name, bool exchange, bool dictionary) override; + void drop(ContextPtr local_context) override; void shutdown() override; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 7aaa7cc6f2a..1f8b08d3807 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -26,14 +26,14 @@ namespace ErrorCodes } -std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection) +template +std::unordered_set fetchPostgreSQLTablesList(T & tx) { std::unordered_set tables; std::string query = "SELECT tablename FROM pg_catalog.pg_tables " "WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'"; - pqxx::read_transaction tx(connection); - for (auto table_name : tx.stream(query)) + for (auto table_name : tx.template stream(query)) tables.insert(std::get<0>(table_name)); return tables; @@ -112,13 +112,13 @@ static DataTypePtr convertPostgreSQLDataType(std::string & type, bool is_nullabl template std::shared_ptr readNamesAndTypesList( - std::shared_ptr tx, const String & postgres_table_name, const String & query, bool use_nulls, bool only_names_and_types) + T & tx, const String & postgres_table_name, const String & query, bool use_nulls, bool only_names_and_types) { auto columns = NamesAndTypesList(); try { - pqxx::stream_from stream(*tx, pqxx::from_query, std::string_view(query)); + pqxx::stream_from stream(tx, pqxx::from_query, std::string_view(query)); if (only_names_and_types) { @@ -158,7 +158,7 @@ std::shared_ptr readNamesAndTypesList( template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key, bool with_replica_identity_index) + T & tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key, bool with_replica_identity_index) { PostgreSQLTableStructure table; @@ -213,29 +213,37 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( } -template -PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, - bool with_primary_key, bool with_replica_identity_index); - - -template -PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, - bool with_primary_key, bool with_replica_identity_index); - - PostgreSQLTableStructure fetchPostgreSQLTableStructure( pqxx::connection & connection, const String & postgres_table_name, bool use_nulls) { - auto tx = std::make_shared(connection); - auto table = fetchPostgreSQLTableStructure(tx, postgres_table_name, use_nulls, false, false); - tx->commit(); - - return table; + postgres::Transaction tx(connection); + return fetchPostgreSQLTableStructure(tx.getRef(), postgres_table_name, use_nulls, false, false); } +std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection) +{ + postgres::Transaction tx(connection); + return fetchPostgreSQLTablesList(tx.getRef()); +} + + +template +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + pqxx::ReadTransaction & tx, const String & postgres_table_name, bool use_nulls, + bool with_primary_key, bool with_replica_identity_index); + +template +PostgreSQLTableStructure fetchPostgreSQLTableStructure( + pqxx::ReplicationTransaction & tx, const String & postgres_table_name, bool use_nulls, + bool with_primary_key, bool with_replica_identity_index); + +template +std::unordered_set fetchPostgreSQLTablesList(pqxx::work & tx); + +template +std::unordered_set fetchPostgreSQLTablesList(pqxx::ReadTransaction & tx); + } #endif diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index bbcb9cd192f..2853e0a8ea4 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -12,8 +12,6 @@ namespace DB { -std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection); - struct PostgreSQLTableStructure { std::shared_ptr columns; @@ -23,14 +21,19 @@ struct PostgreSQLTableStructure using PostgreSQLTableStructurePtr = std::unique_ptr; +std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection); + PostgreSQLTableStructure fetchPostgreSQLTableStructure( pqxx::connection & connection, const String & postgres_table_name, bool use_nulls); template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - std::shared_ptr tx, const String & postgres_table_name, bool use_nulls, + T & tx, const String & postgres_table_name, bool use_nulls, bool with_primary_key = false, bool with_replica_identity_index = false); +template +std::unordered_set fetchPostgreSQLTablesList(T & tx); + } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 4ea1dad2b14..c1056f925bb 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -87,7 +87,10 @@ void PostgreSQLReplicationHandler::shutdown() void PostgreSQLReplicationHandler::startSynchronization() { - createPublicationIfNeeded(connection->getRef()); + { + postgres::Transaction tx(connection->getRef()); + createPublicationIfNeeded(tx.getRef()); + } auto replication_connection = postgres::createReplicationConnection(connection_info); postgres::Transaction tx(replication_connection->getRef()); @@ -159,7 +162,7 @@ NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_na std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); tx->exec(query_str); - storage_data.second->createNestedIfNeeded(fetchTableStructure(tx, table_name)); + storage_data.second->createNestedIfNeeded(fetchTableStructure(*tx, table_name)); auto nested_storage = storage_data.second->getNested(); /// Load from snapshot, which will show table state before creation of replication slot. @@ -233,14 +236,12 @@ bool PostgreSQLReplicationHandler::isPublicationExist(pqxx::work & tx) } -void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::connection & connection_) +void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bool create_without_check) { if (new_publication_created) return; - postgres::Transaction tx(connection_); - - if (!isPublicationExist(tx.getRef())) + if (create_without_check || !isPublicationExist(tx)) { if (tables_list.empty()) { @@ -349,27 +350,33 @@ void PostgreSQLReplicationHandler::shutdownFinal() } +/// Used by MaterializePostgreSQL database engine. NameSet PostgreSQLReplicationHandler::fetchRequiredTables(pqxx::connection & connection_) { - if (tables_list.empty()) + postgres::Transaction tx(connection_); + bool publication_exists = isPublicationExist(tx.getRef()); + + if (tables_list.empty() && !publication_exists) { - return fetchPostgreSQLTablesList(connection_); - } - else - { - createPublicationIfNeeded(connection_); - return fetchTablesFromPublication(connection_); + /// Fetch all tables list from database. Publication does not exist yet, which means + /// that no replication took place. Publication will be created in + /// startSynchronization method. + return fetchPostgreSQLTablesList(tx.getRef()); } + + if (!publication_exists) + createPublicationIfNeeded(tx.getRef(), /* create_without_check = */ true); + + return fetchTablesFromPublication(tx.getRef()); } -NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::connection & connection_) +NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::work & tx) { std::string query = fmt::format("SELECT tablename FROM pg_publication_tables WHERE pubname = '{}'", publication_name); std::unordered_set tables; - postgres::Transaction tx(connection_); - for (auto table_name : tx.getRef().stream(query)) + for (auto table_name : tx.stream(query)) tables.insert(std::get<0>(table_name)); return tables; @@ -377,7 +384,7 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::connectio PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( - std::shared_ptr tx, const std::string & table_name) + pqxx::ReplicationTransaction & tx, const std::string & table_name) { if (!is_postgresql_replica_database_engine) return nullptr; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 5a527179406..43f0067aed7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -52,18 +52,20 @@ public: private: using Storages = std::unordered_map; - void createPublicationIfNeeded(pqxx::connection & connection_); - bool isPublicationExist(pqxx::work & tx); + void createPublicationIfNeeded(pqxx::work & tx, bool create_without_check = false); + + NameSet fetchTablesFromPublication(pqxx::work & tx); + + void dropPublication(pqxx::nontransaction & ntx); + bool isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name); void createReplicationSlot(pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); void dropReplicationSlot(pqxx::nontransaction & tx, bool temporary = false); - void dropPublication(pqxx::nontransaction & ntx); - void waitConnectionAndStart(); void startSynchronization(); @@ -72,11 +74,9 @@ private: NameSet loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages); - NameSet fetchTablesFromPublication(pqxx::connection & connection_); - std::unordered_map reloadFromSnapshot(const std::vector> & relation_data); - PostgreSQLTableStructurePtr fetchTableStructure(std::shared_ptr tx, const std::string & table_name); + PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const std::string & table_name); Poco::Logger * log; ContextPtr context; diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index 9bf5a001f1b..fc57aded197 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -10,7 +10,6 @@ #include #include #include -#include namespace DB diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index cf93a3e1b1c..503e12c890f 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -303,56 +303,6 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') -@pytest.mark.timeout(320) -def test_table_schema_changes(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) - cursor = conn.cursor() - NUM_TABLES = 5 - - for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) - - instance.query( - """CREATE DATABASE test_database - ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS postgresql_replica_allow_minimal_ddl = 1; - """) - - for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) - - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); - - altered_table = random.randint(0, 4) - cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) - - for i in range(NUM_TABLES): - cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) - cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) - - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - for i in range(NUM_TABLES): - if i != altered_table: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) - else: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) - - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - for i in range(NUM_TABLES): - cursor.execute('drop table postgresql_replica_{};'.format(i)) - - instance.query("DROP DATABASE test_database") - - @pytest.mark.timeout(120) def test_changing_replica_identity_value(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") From 6413d7bac6b27a8ee268e3993268c99513609331 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 11 Apr 2021 20:26:59 +0000 Subject: [PATCH 085/931] Doc --- .../database-engines/materialize-postgresql.md | 2 +- .../integrations/materialize-postgresql.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 docs/en/engines/table-engines/integrations/materialize-postgresql.md diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialize-postgresql.md index 79dccabc287..5f1ee614704 100644 --- a/docs/en/engines/database-engines/materialize-postgresql.md +++ b/docs/en/engines/database-engines/materialize-postgresql.md @@ -35,7 +35,7 @@ postgres# SELECT CASE relreplident END AS replica_identity FROM pg_class WHERE oid = 'postgres_table'::regclass; - ``` - Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. + diff --git a/docs/en/engines/table-engines/integrations/materialize-postgresql.md b/docs/en/engines/table-engines/integrations/materialize-postgresql.md new file mode 100644 index 00000000000..e3cbfbb087b --- /dev/null +++ b/docs/en/engines/table-engines/integrations/materialize-postgresql.md @@ -0,0 +1,16 @@ +--- +toc_priority: 12 +toc_title: MateriaziePostgreSQL +--- + +# MaterializePostgreSQL {#materialize-postgresql} + +## Creating a Table {#creating-a-table} + +## Requirements {#requirements} + +- A table with engine `MaterializePostgreSQL` must have a primary key - the same as a replica identity index of a postgres table (See [details on replica identity index](../../database-engines/materialize-postgresql.md#requirements)). + +- Only database `Atomic` is allowed. + +- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. From a681b2484b5695a09d4a77cbf541cd23b21742ad Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 12 Apr 2021 13:32:38 +0300 Subject: [PATCH 086/931] Update arrayElement.cpp --- src/Functions/array/arrayElement.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index 87f91d5446c..9aa1e637b31 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -953,7 +953,7 @@ bool FunctionArrayElement::matchKeyToIndex( || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) || matchKeyToIndexNumber(data, offsets, arguments, matched_idxs) - || matchKeyToIndexString(data, offsets, arguments, matched_idxs); + || matchKeyToIndexString(data, offsets, arguments, matched_idxs) || matchKeyToIndexFixedString(data, offsets, arguments, matched_idxs); } From 7e413675a26b959db831f75e5df765d493ca1435 Mon Sep 17 00:00:00 2001 From: hexiaoting Date: Mon, 19 Apr 2021 11:18:46 +0800 Subject: [PATCH 087/931] Fix Map table create error --- src/DataTypes/DataTypeMap.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/DataTypeMap.cpp b/src/DataTypes/DataTypeMap.cpp index 1d580761362..4fa92247a6a 100644 --- a/src/DataTypes/DataTypeMap.cpp +++ b/src/DataTypes/DataTypeMap.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -55,9 +56,19 @@ DataTypeMap::DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & valu void DataTypeMap::assertKeyType() const { - if (!key_type->isValueRepresentedByInteger() && !isStringOrFixedString(*key_type) && !WhichDataType(key_type).isNothing()) + bool type_error = false; + if (key_type->getTypeId() == TypeIndex::LowCardinality) + { + const auto & low_cardinality_data_type = assert_cast(*key_type); + if (!isStringOrFixedString(*(low_cardinality_data_type.getDictionaryType()))) + type_error = true; + } + else if (!key_type->isValueRepresentedByInteger() && !isStringOrFixedString(*key_type) && !WhichDataType(key_type).isNothing()) + type_error = true; + + if (type_error) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Type of Map key must be a type, that can be represented by integer or string," + "Type of Map key must be a type, that can be represented by integer or [LowCardinality]string," " but {} given", key_type->getName()); } From cf0d8be8aa1cb8bce1810962e7f44ee357a9a7a6 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Mon, 19 Apr 2021 17:45:46 +0300 Subject: [PATCH 088/931] Add uniqTHetaSketch in performance test --- tests/performance/uniq.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index b4e73733769..f688f1d5a9d 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -46,6 +46,7 @@ uniqUpTo(10) uniqUpTo(25) uniqUpTo(100) + uniqThetaSketch From 14be3a07839891512918466cb3ee938cb1a1a7d7 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 1 May 2021 11:49:45 +0000 Subject: [PATCH 089/931] Better reload from snapshot --- .../DatabaseMaterializePostgreSQL.cpp | 1 + .../MaterializePostgreSQLConsumer.cpp | 8 +- .../MaterializePostgreSQLConsumer.h | 2 +- .../PostgreSQLReplicationHandler.cpp | 177 ++++++++-------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 20 +- .../StorageMaterializePostgreSQL.cpp | 190 ++++++++++-------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 3 + 7 files changed, 216 insertions(+), 185 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index c8e93616ead..7d493d3dcf3 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -56,6 +56,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() { replication_handler = std::make_unique( remote_database_name, + database_name, connection->getConnectionInfo(), metadata_path + METADATA_SUFFIX, getContext(), diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 0a597d942f7..bd4e5c0cbe5 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -675,13 +675,9 @@ void MaterializePostgreSQLConsumer::updateNested(const String & table_name, Stor } -void MaterializePostgreSQLConsumer::updateSkipList(const std::unordered_map & tables_with_lsn) +void MaterializePostgreSQLConsumer::updateSkipList(Int32 table_id, const String & table_start_lsn) { - for (const auto & [relation_id, lsn] : tables_with_lsn) - { - if (!lsn.empty()) - skip_list[relation_id] = lsn; /// start_lsn - } + skip_list[table_id] = table_start_lsn; } } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 7eeac16337e..3bef0c717ba 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -43,7 +43,7 @@ public: void updateNested(const String & table_name, StoragePtr nested_storage); - void updateSkipList(const std::unordered_map & tables_with_lsn); + void updateSkipList(Int32 table_id, const String & table_start_lsn); private: bool readFromReplicationSlot(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c1056f925bb..6607bb6d3f7 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -15,10 +15,12 @@ namespace DB { static const auto reschedule_ms = 500; +static const auto TMP_SUFFIX = "_tmp"; PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( - const std::string & database_name_, + const String & remote_database_name_, + const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, const std::string & metadata_path_, ContextPtr context_, @@ -28,7 +30,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) - , database_name(database_name_) + , remote_database_name(remote_database_name_) + , current_database_name(current_database_name_) , metadata_path(metadata_path_) , connection_info(connection_info_) , max_block_size(max_block_size_) @@ -37,8 +40,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) { - replication_slot = fmt::format("{}_ch_replication_slot", database_name); - publication_name = fmt::format("{}_ch_publication", database_name); + replication_slot = fmt::format("{}_ch_replication_slot", current_database_name); + publication_name = fmt::format("{}_ch_publication", current_database_name); startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); @@ -47,7 +50,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage) { - storages[table_name] = storage; + materialized_storages[table_name] = storage; } @@ -95,40 +98,59 @@ void PostgreSQLReplicationHandler::startSynchronization() auto replication_connection = postgres::createReplicationConnection(connection_info); postgres::Transaction tx(replication_connection->getRef()); + /// List of nested tables (table_name -> nested_storage), which is passed to replication consumer. + std::unordered_map nested_storages; std::string snapshot_name, start_lsn; auto initial_sync = [&]() { createReplicationSlot(tx.getRef(), start_lsn, snapshot_name); - loadFromSnapshot(snapshot_name, storages); + + for (const auto & [table_name, storage] : materialized_storages) + { + try + { + nested_storages[table_name] = loadFromSnapshot(snapshot_name, table_name, storage->as ()); + } + catch (Exception & e) + { + e.addMessage("while loading table {}.{}", remote_database_name, table_name); + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } }; - /// Replication slot should be deleted with drop table only and created only once, reused after detach. + /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, + /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. + /// Recreation of a replication slot imposes reloading of all tables. if (!isReplicationSlotExist(tx.getRef(), replication_slot)) { initial_sync(); } else if (!Poco::File(metadata_path).exists() || new_publication_created) { - /// In case of some failure, the following cases are possible (since publication and replication slot are reused): - /// 1. If replication slot exists and metadata file (where last synced version is written) does not exist, it is not ok. - /// 2. If created a new publication and replication slot existed before it was created, it is not ok. + /// There are the following cases, which mean that something non-intentioanal happened. + /// 1. If replication slot exists and metadata file does not exist, it is not ok. + /// 2. If replication slot exists before publication is created. dropReplicationSlot(tx.getRef()); initial_sync(); } else { - LOG_TRACE(log, "Restoring {} tables...", storages.size()); - for (const auto & [table_name, storage] : storages) + /// Synchronization and initial load already took place.c + LOG_TRACE(log, "Loading {} tables...", materialized_storages.size()); + for (const auto & [table_name, storage] : materialized_storages) { + auto materialized_storage = storage->as (); try { - nested_storages[table_name] = storage->getNested(); - storage->setStorageMetadata(); - storage->setNestedStatus(true); + nested_storages[table_name] = materialized_storage->getNested(); + materialized_storage->setStorageMetadata(); + materialized_storage->setNestedStatus(true); } - catch (...) + catch (Exception & e) { + e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); } } @@ -146,60 +168,47 @@ void PostgreSQLReplicationHandler::startSynchronization() nested_storages); consumer_task->activateAndSchedule(); + + /// Do not rely anymore on saved storage pointers. + materialized_storages.clear(); } -NameSet PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages) +StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage) { - NameSet success_tables; - for (const auto & storage_data : sync_storages) - { - try - { - auto tx = std::make_shared(connection->getRef()); - const auto & table_name = storage_data.first; + auto tx = std::make_shared(connection->getRef()); - std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); - tx->exec(query_str); + std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); + tx->exec(query_str); - storage_data.second->createNestedIfNeeded(fetchTableStructure(*tx, table_name)); - auto nested_storage = storage_data.second->getNested(); + /// Load from snapshot, which will show table state before creation of replication slot. + /// Already connected to needed database, no need to add it to query. + query_str = fmt::format("SELECT * FROM {}", table_name); - /// Load from snapshot, which will show table state before creation of replication slot. - /// Already connected to needed database, no need to add it to query. - query_str = fmt::format("SELECT * FROM {}", storage_data.first); + /// If table schema has changed, the table stops consuming changed from replication stream. + /// If `allow_minimal_ddl` is true, create a new table in the background, load new table schema + /// and all data from scratch. Then execute REPLACE query with Nested table. + /// This is only allowed for MaterializePostgreSQL database engine. + materialized_storage->createNestedIfNeeded(fetchTableStructure(*tx, table_name)); + auto nested_storage = materialized_storage->getNested(); + auto insert_context = materialized_storage->getNestedTableContext(); - const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); - auto insert_context = storage_data.second->getNestedTableContext(); + auto insert = std::make_shared(); + insert->table_id = nested_storage->getStorageID(); - auto insert = std::make_shared(); - insert->table_id = nested_storage->getStorageID(); + InterpreterInsertQuery interpreter(insert, insert_context); + auto block_io = interpreter.execute(); - InterpreterInsertQuery interpreter(insert, insert_context); - auto block_io = interpreter.execute(); + const StorageInMemoryMetadata & storage_metadata = nested_storage->getInMemoryMetadata(); + auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); - auto sample_block = storage_metadata.getSampleBlockNonMaterialized(); - PostgreSQLTransactionBlockInputStream input(tx, query_str, sample_block, DEFAULT_BLOCK_SIZE); + PostgreSQLTransactionBlockInputStream input(tx, query_str, sample_block, DEFAULT_BLOCK_SIZE); + assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica load from snapshot"); + copyData(input, *block_io.out); - assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica load from snapshot"); - copyData(input, *block_io.out); + materialized_storage->setNestedStatus(true); - storage_data.second->setNestedStatus(true); - nested_storages[table_name] = nested_storage; - - /// This is needed if this method is called from reloadFromSnapshot() method below. - success_tables.insert(table_name); - if (consumer) - consumer->updateNested(table_name, nested_storage); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - LOG_DEBUG(log, "Table dump end"); - return success_tables; + return nested_storage; } @@ -210,7 +219,7 @@ void PostgreSQLReplicationHandler::consumerFunc() bool schedule_now = consumer->consume(skipped_tables); if (!skipped_tables.empty()) - consumer->updateSkipList(reloadFromSnapshot(skipped_tables)); + reloadFromSnapshot(skipped_tables); if (stop_synchronization) return; @@ -245,7 +254,7 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo { if (tables_list.empty()) { - for (const auto & storage_data : storages) + for (const auto & storage_data : materialized_storages) { if (!tables_list.empty()) tables_list += ", "; @@ -394,43 +403,53 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( } -std::unordered_map PostgreSQLReplicationHandler::reloadFromSnapshot( - const std::vector> & relation_data) +void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector> & relation_data) { - std::unordered_map tables_start_lsn; try { - Storages sync_storages; - for (const auto & relation : relation_data) - { - const auto & table_name = relation.second; - auto * storage = storages[table_name]; - sync_storages[table_name] = storage; - auto nested_storage = storage->getNested(); - storage->setNestedStatus(false); - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, context, context, nested_storage->getStorageID(), true); - } - auto replication_connection = postgres::createReplicationConnection(connection_info); postgres::Transaction tx(replication_connection->getRef()); std::string snapshot_name, start_lsn; createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, true); - /// This snapshot is valid up to the end of the transaction, which exported it. - auto success_tables = loadFromSnapshot(snapshot_name, sync_storages); - for (const auto & relation : relation_data) + for (const auto & [table_id, table_name] : relation_data) { - if (success_tables.find(relation.second) != success_tables.end()) - tables_start_lsn[relation.first] = start_lsn; + auto materialized_storage = DatabaseCatalog::instance().getTable(StorageID(current_database_name, table_name), context); + StoragePtr temp_materialized_storage = materialized_storage->as ()->createTemporary(); + + /// This snapshot is valid up to the end of the transaction, which exported it. + StoragePtr nested_storage = loadFromSnapshot(snapshot_name, table_name, + temp_materialized_storage->as ()); + consumer->updateNested(table_name, nested_storage); + consumer->updateSkipList(table_id, start_lsn); + replaceMaterializedTable(table_name); } } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } +} - return tables_start_lsn; + +void PostgreSQLReplicationHandler::replaceMaterializedTable(const String & table_name) +{ + auto ast_replace = std::make_shared(); + + auto outdated_storage = materialized_storages[table_name]; + auto table_id = outdated_storage->getStorageID(); + + ast_replace->replace_table = true; + + ast_replace->table = table_id.table_name; + ast_replace->database = table_id.database_name; + + ast_replace->as_table = table_id.table_name + TMP_SUFFIX; + ast_replace->as_database = table_id.database_name; + + InterpreterCreateQuery interpreter(ast_replace, context); + interpreter.execute(); } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 43f0067aed7..76aed35ba80 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -27,9 +27,10 @@ class PostgreSQLReplicationHandler { public: PostgreSQLReplicationHandler( - const std::string & database_name_, + const String & remote_database_name_, + const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, - const std::string & metadata_path_, + const String & metadata_path_, ContextPtr context_, const size_t max_block_size_, bool allow_minimal_ddl_, @@ -50,7 +51,7 @@ public: NameSet fetchRequiredTables(pqxx::connection & connection_); private: - using Storages = std::unordered_map; + using MaterializedStorages = std::unordered_map; bool isPublicationExist(pqxx::work & tx); @@ -72,17 +73,19 @@ private: void consumerFunc(); - NameSet loadFromSnapshot(std::string & snapshot_name, Storages & sync_storages); + StoragePtr loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage); - std::unordered_map reloadFromSnapshot(const std::vector> & relation_data); + void reloadFromSnapshot(const std::vector> & relation_data); PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const std::string & table_name); + void replaceMaterializedTable(const String & table_name); + Poco::Logger * log; ContextPtr context; /// Remote database name. - const String database_name; + const String remote_database_name, current_database_name; /// Path for replication metadata. const String metadata_path; @@ -119,10 +122,7 @@ private: bool new_publication_created = false; /// MaterializePostgreSQL tables. Used for managing all operations with its internal nested tables. - Storages storages; - - /// List of nested tables, which is passed to replication consumer. - std::unordered_map nested_storages; + MaterializedStorages materialized_storages; }; } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 72611428326..253a11607b6 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -36,6 +36,7 @@ namespace ErrorCodes } static const auto NESTED_TABLE_SUFFIX = "_nested"; +static const auto TMP_SUFFIX = "_tmp"; StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( @@ -65,6 +66,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( replication_handler = std::make_unique( remote_database_name, + table_id_.database_name, connection_info, metadata_path, getContext(), @@ -85,6 +87,16 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( } +/// A temporary clone table might be created for current table in order to update its schema and reload +/// all data in the background while current table will still handle read requests. +StoragePtr StorageMaterializePostgreSQL::createTemporary() const +{ + auto table_id = getStorageID(); + auto new_context = Context::createCopy(context); + return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX), new_context); +} + + StoragePtr StorageMaterializePostgreSQL::getNested() const { return DatabaseCatalog::instance().getTable(nested_table_id, nested_context); @@ -119,6 +131,95 @@ void StorageMaterializePostgreSQL::setStorageMetadata() } +void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) +{ + const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); + + try + { + InterpreterCreateQuery interpreter(ast_create, nested_context); + interpreter.execute(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + + +std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) +{ + auto new_context = Context::createCopy(from_context); + new_context->makeQueryContext(); + new_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + + return new_context; +} + + +void StorageMaterializePostgreSQL::startup() +{ + if (!is_materialize_postgresql_database) + { + replication_handler->addStorage(remote_table_name, this); + replication_handler->startup(); + } +} + + +void StorageMaterializePostgreSQL::shutdown() +{ + if (replication_handler) + replication_handler->shutdown(); +} + + +void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) +{ + if (replication_handler) + replication_handler->shutdownFinal(); + + auto nested_table = getNested(); + if (nested_table && !is_materialize_postgresql_database) + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, nested_table_id, no_delay); +} + + +NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const +{ + return NamesAndTypesList{ + {"_sign", std::make_shared()}, + {"_version", std::make_shared()} + }; +} + + +Pipe StorageMaterializePostgreSQL::read( + const Names & column_names, + const StorageMetadataPtr & metadata_snapshot, + SelectQueryInfo & query_info, + ContextPtr context_, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) +{ + if (!nested_loaded) + return Pipe(); + + auto nested_table = getNested(); + + return readFinalFromNestedStorage( + nested_table, + column_names, + metadata_snapshot, + query_info, + context_, + processed_stage, + max_block_size, + num_streams); +} + + std::shared_ptr StorageMaterializePostgreSQL::getMaterializedColumnsDeclaration( const String name, const String type, UInt64 default_value) { @@ -274,95 +375,6 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt } -void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) -{ - const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); - - try - { - InterpreterCreateQuery interpreter(ast_create, nested_context); - interpreter.execute(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } -} - - -std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) -{ - auto new_context = Context::createCopy(from_context); - new_context->makeQueryContext(); - new_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - - return new_context; -} - - -void StorageMaterializePostgreSQL::startup() -{ - if (!is_materialize_postgresql_database) - { - replication_handler->addStorage(remote_table_name, this); - replication_handler->startup(); - } -} - - -void StorageMaterializePostgreSQL::shutdown() -{ - if (replication_handler) - replication_handler->shutdown(); -} - - -void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) -{ - if (replication_handler) - replication_handler->shutdownFinal(); - - auto nested_table = getNested(); - if (nested_table && !is_materialize_postgresql_database) - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, nested_table_id, no_delay); -} - - -NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const -{ - return NamesAndTypesList{ - {"_sign", std::make_shared()}, - {"_version", std::make_shared()} - }; -} - - -Pipe StorageMaterializePostgreSQL::read( - const Names & column_names, - const StorageMetadataPtr & metadata_snapshot, - SelectQueryInfo & query_info, - ContextPtr context_, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - unsigned num_streams) -{ - if (!nested_loaded) - return Pipe(); - - auto nested_table = getNested(); - - return readFinalFromNestedStorage( - nested_table, - column_names, - metadata_snapshot, - query_info, - context_, - processed_stage, - max_block_size, - num_streams); -} - - void registerStorageMaterializePostgreSQL(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 079061a69d4..c9523f2fea6 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -18,6 +18,7 @@ #include #include #include +#include namespace DB @@ -53,6 +54,8 @@ public: void createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure); + StoragePtr createTemporary() const; + StoragePtr getNested() const; StoragePtr tryGetNested() const; From 14d355a6ac8c9f582c91626832522784609603b9 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 2 May 2021 11:50:29 +0000 Subject: [PATCH 090/931] Fix background update --- .../DatabaseMaterializePostgreSQL.cpp | 5 - .../DatabaseMaterializePostgreSQL.h | 2 - .../PostgreSQLReplicationHandler.cpp | 93 +++++++++++++------ .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 - .../StorageMaterializePostgreSQL.cpp | 21 ++++- .../PostgreSQL/StorageMaterializePostgreSQL.h | 39 ++++++-- .../test.py | 52 +++++++++++ 7 files changed, 166 insertions(+), 48 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 7d493d3dcf3..c3672fc2b73 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -189,11 +189,6 @@ DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( } -void DatabaseMaterializePostgreSQL::renameTable(ContextPtr /* context_ */, const String & /* name */, IDatabase & /* to_database */, const String & /* to_name */, bool /* exchange */, bool /* dictionary */) -{ - throw Exception("MaterializePostgreSQL database does not support rename table.", ErrorCodes::NOT_IMPLEMENTED); -} - } #endif diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index bdfe54ace13..17288be8fb2 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -51,8 +51,6 @@ public: void createTable(ContextPtr context, const String & name, const StoragePtr & table, const ASTPtr & query) override; - void renameTable(ContextPtr context_, const String & name, IDatabase & to_database, const String & to_name, bool exchange, bool dictionary) override; - void drop(ContextPtr local_context) override; void shutdown() override; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 6607bb6d3f7..9cd859b9368 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -6,16 +6,24 @@ #include #include #include +#include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include namespace DB { static const auto reschedule_ms = 500; -static const auto TMP_SUFFIX = "_tmp"; PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( @@ -174,7 +182,8 @@ void PostgreSQLReplicationHandler::startSynchronization() } -StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage) +StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, const String & table_name, + StorageMaterializePostgreSQL * materialized_storage) { auto tx = std::make_shared(connection->getRef()); @@ -185,17 +194,14 @@ StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot /// Already connected to needed database, no need to add it to query. query_str = fmt::format("SELECT * FROM {}", table_name); - /// If table schema has changed, the table stops consuming changed from replication stream. - /// If `allow_minimal_ddl` is true, create a new table in the background, load new table schema - /// and all data from scratch. Then execute REPLACE query with Nested table. - /// This is only allowed for MaterializePostgreSQL database engine. materialized_storage->createNestedIfNeeded(fetchTableStructure(*tx, table_name)); auto nested_storage = materialized_storage->getNested(); - auto insert_context = materialized_storage->getNestedTableContext(); auto insert = std::make_shared(); insert->table_id = nested_storage->getStorageID(); + auto insert_context = materialized_storage->getNestedTableContext(); + InterpreterInsertQuery interpreter(insert, insert_context); auto block_io = interpreter.execute(); @@ -208,6 +214,10 @@ StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot materialized_storage->setNestedStatus(true); + nested_storage = materialized_storage->getNested(); + auto nested_table_id = nested_storage->getStorageID(); + LOG_TRACE(log, "Loaded table {}.{} (uuid: {})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); + return nested_storage; } @@ -405,6 +415,10 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector> & relation_data) { + /// If table schema has changed, the table stops consuming changes from replication stream. + /// If `allow_automatic_update` is true, create a new table in the background, load new table schema + /// and all data from scratch. Then execute REPLACE query. + /// This is only allowed for MaterializePostgreSQL database engine. try { auto replication_connection = postgres::createReplicationConnection(connection_info); @@ -415,15 +429,53 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorlockExclusively(String(), context->getSettingsRef().lock_acquire_timeout); + StoragePtr temp_materialized_storage = materialized_storage->as ()->createTemporary(); + auto from_table_id = materialized_storage->as ()->getNestedStorageID(); + auto to_table_id = temp_materialized_storage->as ()->getNestedStorageID(); + + LOG_TRACE(log, "Starting background update of table {}.{}, uuid {} with table {}.{} uuid {}", + from_table_id.database_name, from_table_id.table_name, toString(from_table_id.uuid), + to_table_id.database_name, to_table_id.table_name, toString(to_table_id.uuid)); + /// This snapshot is valid up to the end of the transaction, which exported it. StoragePtr nested_storage = loadFromSnapshot(snapshot_name, table_name, temp_materialized_storage->as ()); - consumer->updateNested(table_name, nested_storage); - consumer->updateSkipList(table_id, start_lsn); - replaceMaterializedTable(table_name); + auto nested_context = materialized_storage->as ()->getNestedTableContext(); + + to_table_id = nested_storage->getStorageID(); + + auto ast_rename = std::make_shared(); + ASTRenameQuery::Element elem + { + ASTRenameQuery::Table{from_table_id.database_name, from_table_id.table_name}, + ASTRenameQuery::Table{to_table_id.database_name, to_table_id.table_name} + }; + ast_rename->elements.push_back(std::move(elem)); + ast_rename->exchange = true; + + try + { + InterpreterRenameQuery(ast_rename, nested_context).execute(); + + nested_storage = materialized_storage->as ()->getNested(); + materialized_storage->setInMemoryMetadata(nested_storage->getInMemoryMetadata()); + + auto nested_table_id = nested_storage->getStorageID(); + LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); + + consumer->updateNested(table_name, nested_storage); + consumer->updateSkipList(table_id, start_lsn); + + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, to_table_id, true); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } } catch (...) @@ -433,25 +485,6 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector(); - - auto outdated_storage = materialized_storages[table_name]; - auto table_id = outdated_storage->getStorageID(); - - ast_replace->replace_table = true; - - ast_replace->table = table_id.table_name; - ast_replace->database = table_id.database_name; - - ast_replace->as_table = table_id.table_name + TMP_SUFFIX; - ast_replace->as_database = table_id.database_name; - - InterpreterCreateQuery interpreter(ast_replace, context); - interpreter.execute(); -} - } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 76aed35ba80..39167f00579 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -79,8 +79,6 @@ private: PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const std::string & table_name); - void replaceMaterializedTable(const String & table_name); - Poco::Logger * log; ContextPtr context; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 253a11607b6..b82474cf3be 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -53,7 +53,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , replication_settings(std::move(replication_settings_)) , is_materialize_postgresql_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") - , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) + , nested_table_id(StorageID(table_id_.database_name, getNestedTableName(), table_id_.uuid)) , nested_context(makeNestedTableContext(context_->getGlobalContext())) { if (table_id_.uuid == UUIDHelpers::Nil) @@ -87,13 +87,27 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( } +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( + StoragePtr nested_storage_, ContextPtr context_) + : IStorage(nested_storage_->getStorageID()) + , WithContext(context_->getGlobalContext()) + , is_materialize_postgresql_database(true) + , nested_table_id(nested_storage_->getStorageID()) + , nested_context(makeNestedTableContext(context_->getGlobalContext())) +{ + setInMemoryMetadata(nested_storage_->getInMemoryMetadata()); +} + + /// A temporary clone table might be created for current table in order to update its schema and reload /// all data in the background while current table will still handle read requests. StoragePtr StorageMaterializePostgreSQL::createTemporary() const { auto table_id = getStorageID(); auto new_context = Context::createCopy(context); - return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX), new_context); + const String temp_storage_name = table_id.table_name + TMP_SUFFIX; + auto temp_storage = StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, temp_storage_name, UUIDHelpers::generateV4()), new_context); + return std::move(temp_storage); } @@ -109,7 +123,7 @@ StoragePtr StorageMaterializePostgreSQL::tryGetNested() const } -std::string StorageMaterializePostgreSQL::getNestedTableName() const +String StorageMaterializePostgreSQL::getNestedTableName() const { auto table_id = getStorageID(); @@ -289,6 +303,7 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt auto table_id = getStorageID(); create_table_query->table = getNestedTableName(); create_table_query->database = table_id.database_name; + create_table_query->uuid = table_id.uuid; auto columns_declare_list = std::make_shared(); auto columns_expression_list = std::make_shared(); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index c9523f2fea6..6bc18d681d4 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -24,14 +24,39 @@ namespace DB { +/** Case of MaterializePostgreSQL database engine. + * There is a table with engine MaterializePostgreSQL. It has a nested table with engine ReplacingMergeTree. + * Both tables shared table_id.table_name and table_id.database_name (probably they automatically have the same uuid?). + * + * MaterializePostgreSQL table does not actually exists only in memory and acts as a wrapper for nested table. + * + * Also it has the same InMemoryMetadata as its nested table, so if metadata of nested table changes - main table also has + * to update its metadata, because all read requests are passed to MaterializePostgreSQL table and then it redirects read + * into nested table. + * + * When there is a need to update table structure, there will be created a new MaterializePostgreSQL table with its own nested table, + * it will have upadated table schema and all data will be loaded from scratch in the background, while previos table with outadted table + * structure will still serve read requests. When data is loaded, a replace query will be done, to swap tables atomically. + * + * In order to update MaterializePostgreSQL table: + * 1. need to update InMemoryMetadata of MaterializePostgreSQL table; + * 2. need to have a new updated ReplacingMergeTree table on disk. + * + * At the point before replace query there are: + * 1. In-memory MaterializePostgreSQL table `databae_name`.`table_name` -- outdated + * 2. On-disk ReplacingMergeTree table with `databae_name`.`table_name` -- outdated + * 3. In-memory MaterializePostgreSQL table `databae_name`.`table_name_tmp` -- updated + * 4. On-disk ReplacingMergeTree table with `databae_name`.`table_name_tmp` -- updated +**/ + class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage, WithContext { friend struct ext::shared_ptr_helper; public: - StorageMaterializePostgreSQL( - const StorageID & table_id_, - ContextPtr context_); + StorageMaterializePostgreSQL(const StorageID & table_id_, ContextPtr context_); + + StorageMaterializePostgreSQL(StoragePtr nested_table, ContextPtr context); String getName() const override { return "MaterializePostgreSQL"; } @@ -70,6 +95,10 @@ public: void renameNested(); + StorageID getNestedStorageID() { return nested_table_id; } + + static std::shared_ptr makeNestedTableContext(ContextPtr from_context); + protected: StorageMaterializePostgreSQL( const StorageID & table_id_, @@ -88,9 +117,7 @@ private: ASTPtr getCreateNestedTableQuery(PostgreSQLTableStructurePtr table_structure); - std::string getNestedTableName() const; - - static std::shared_ptr makeNestedTableContext(ContextPtr from_context); + String getNestedTableName() const; std::string remote_table_name; std::unique_ptr replication_settings; diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 503e12c890f..91f4c963d30 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -372,6 +372,58 @@ def test_replica_identity_index(started_cluster): check_tables_are_synchronized('postgresql_replica', order_by='key1'); +@pytest.mark.timeout(320) +def test_table_schema_changes(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + + instance.query( + """CREATE DATABASE test_database + ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + SETTINGS postgresql_replica_allow_minimal_ddl = 1; + """) + + for i in range(NUM_TABLES): + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); + + altered_table = random.randint(0, 4) + cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) + + for i in range(NUM_TABLES): + cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) + cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) + + check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + if i != altered_table: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) + else: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) + + check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + instance.query("DROP DATABASE test_database") + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 1d243e11e5bdb443551437aebfb52399b7fdfa2f Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 2 May 2021 11:53:20 +0000 Subject: [PATCH 091/931] Update read for materialized --- src/Storages/ReadFinalForExternalReplicaStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index 985b9104085..fb96bb01936 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -71,7 +71,7 @@ Pipe readFinalFromNestedStorage( { Block pipe_header = pipe.getHeader(); auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); - ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true); + ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true /* add_aliases */, false /* project_result */); pipe.addSimpleTransform([&](const Block & header) { From 01adfb7b3d113de0a893a131e5f66de43f0f60a3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 2 May 2021 13:49:28 +0000 Subject: [PATCH 092/931] Fix single storage case --- src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index b82474cf3be..a0dc6922d1f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -53,7 +53,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , replication_settings(std::move(replication_settings_)) , is_materialize_postgresql_database( DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") - , nested_table_id(StorageID(table_id_.database_name, getNestedTableName(), table_id_.uuid)) + , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) , nested_context(makeNestedTableContext(context_->getGlobalContext())) { if (table_id_.uuid == UUIDHelpers::Nil) @@ -303,7 +303,8 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt auto table_id = getStorageID(); create_table_query->table = getNestedTableName(); create_table_query->database = table_id.database_name; - create_table_query->uuid = table_id.uuid; + if (is_materialize_postgresql_database) + create_table_query->uuid = table_id.uuid; auto columns_declare_list = std::make_shared(); auto columns_expression_list = std::make_shared(); From 3e3396bd9c9e93092d65a90bf47439e63a95735d Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 3 May 2021 09:52:13 +0000 Subject: [PATCH 093/931] Refactor code, add comments --- .../DatabaseMaterializePostgreSQL.cpp | 61 ++++++---- .../DatabaseMaterializePostgreSQL.h | 2 + .../fetchPostgreSQLTableStructure.h | 6 +- src/Storages/IStorage.cpp | 5 +- .../MaterializePostgreSQLConsumer.cpp | 17 ++- .../PostgreSQLReplicationHandler.cpp | 60 +++++----- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 +- .../StorageMaterializePostgreSQL.cpp | 77 ++++++------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 87 ++++++++++----- .../test.py | 105 +++++++++--------- 10 files changed, 237 insertions(+), 187 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index c3672fc2b73..b472e151092 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -69,13 +69,25 @@ void DatabaseMaterializePostgreSQL::startSynchronization() for (const auto & table_name : tables_to_replicate) { - auto storage = tryGetTable(table_name, getContext()); + /// Check nested ReplacingMergeTree table. + auto storage = DatabaseAtomic::tryGetTable(table_name, getContext()); if (!storage) + { + /// Nested table does not exist and will be created by replication thread. storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), getContext()); + } + else + { + /// Nested table was already created and syncronized. + storage = StorageMaterializePostgreSQL::create(storage, getContext()); + } - replication_handler->addStorage(table_name, storage->as()); + /// Cache MaterializePostgreSQL wrapper over nested table. materialized_tables[table_name] = storage; + + /// Let replication thread now, which tables it needs to keep in sync. + replication_handler->addStorage(table_name, storage->as()); } LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); @@ -113,35 +125,36 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte { /// When a nested ReplacingMergeTree table is managed from PostgreSQLReplicationHandler, its context is modified /// to show the type of managed table. - if (local_context->hasQueryContext()) + if ((local_context->hasQueryContext() && local_context->getQueryContext()->getQueryFactoriesInfo().storages.count("ReplacingMergeTree")) + || materialized_tables.empty()) { - auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; - if (storage_set.find("ReplacingMergeTree") != storage_set.end()) - { - return DatabaseAtomic::tryGetTable(name, local_context); - } + return DatabaseAtomic::tryGetTable(name, local_context); } /// Note: In select query we call MaterializePostgreSQL table and it calls tryGetTable from its nested. + /// So the only point, where synchronization is needed - access to MaterializePostgreSQL table wrapper over nested table. std::lock_guard lock(tables_mutex); auto table = materialized_tables.find(name); - /// Nested table is not created immediately. Consider that table exists only if nested table exists. - if (table != materialized_tables.end() && table->second->as()->isNestedLoaded()) + /// Return wrapper over ReplacingMergeTree table. If table synchronization just started, table will not + /// be accessible immediately. Table is considered to exist once its nested table was created. + if (table != materialized_tables.end() && table->second->as ()->hasNested()) + { return table->second; + } return StoragePtr{}; } -void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & table, const ASTPtr & query) { if (local_context->hasQueryContext()) { auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; if (storage_set.find("ReplacingMergeTree") != storage_set.end()) { - DatabaseAtomic::createTable(local_context, name, table, query); + DatabaseAtomic::createTable(local_context, table_name, table, query); return; } } @@ -155,6 +168,15 @@ void DatabaseMaterializePostgreSQL::stopReplication() { if (replication_handler) replication_handler->shutdown(); + + /// Clear wrappers over nested, all access is not done to nested tables directly. + materialized_tables.clear(); +} + + +void DatabaseMaterializePostgreSQL::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) +{ + DatabaseAtomic::dropTable(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), table_name, no_delay); } @@ -169,23 +191,14 @@ void DatabaseMaterializePostgreSQL::drop(ContextPtr local_context) if (metadata.exists()) metadata.remove(false); - DatabaseAtomic::drop(local_context); + DatabaseAtomic::drop(StorageMaterializePostgreSQL::makeNestedTableContext(local_context)); } DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( - ContextPtr /* context */, const DatabaseOnDisk::FilterByNameFunction & /* filter_by_table_name */) + ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) { - Tables nested_tables; - for (const auto & [table_name, storage] : materialized_tables) - { - auto nested_storage = storage->as()->tryGetNested(); - - if (nested_storage) - nested_tables[table_name] = nested_storage; - } - - return std::make_unique(nested_tables, database_name); + return DatabaseAtomic::getTablesIterator(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), filter_by_table_name); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 17288be8fb2..931ef6836d5 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -51,6 +51,8 @@ public: void createTable(ContextPtr context, const String & name, const StoragePtr & table, const ASTPtr & query) override; + void dropTable(ContextPtr context_, const String & name, bool no_delay) override; + void drop(ContextPtr local_context) override; void shutdown() override; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 2853e0a8ea4..97066f575d8 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -14,9 +14,9 @@ namespace DB struct PostgreSQLTableStructure { - std::shared_ptr columns; - std::shared_ptr primary_key_columns; - std::shared_ptr replica_identity_columns; + std::shared_ptr columns = nullptr; + std::shared_ptr primary_key_columns = nullptr; + std::shared_ptr replica_identity_columns = nullptr; }; using PostgreSQLTableStructurePtr = std::unique_ptr; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index f7fb359432e..2b8ac3a28f0 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -51,7 +51,10 @@ TableLockHolder IStorage::lockForShare(const String & query_id, const std::chron TableLockHolder result = tryLockTimed(drop_lock, RWLockImpl::Read, query_id, acquire_timeout); if (is_dropped) - throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); + { + auto table_id = getStorageID(); + throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {}.{} is dropped", table_id.database_name, table_id.table_name); + } return result; } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index bd4e5c0cbe5..7ab6a6f126d 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -308,23 +308,20 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli bool read_next = true; switch (identifier) { - case 'K': - { - /// Only if changed column(s) are part of replica identity index (for now it can be only - /// be primary key - default values for replica identity index). In this case, first comes a tuple - /// with old replica identity indexes and all other values will come as nulls. Then comes a full new row. - readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); - break; - } + /// Only if changed column(s) are part of replica identity index (or primary keys if they are used instead). + /// In this case, first comes a tuple with old replica identity indexes and all other values will come as + /// nulls. Then comes a full new row. + case 'K': [[fallthrough]]; + /// Old row. Only if replica identity is set to full. Does notreally make sense to use it as + /// it is much more efficient to use replica identity index, but support all possible cases. case 'O': { - /// Old row. Only if replica identity is set to full. (For the case when a table does not have any - /// primary key, for now not supported, requires to find suitable order by key(s) for nested table.) readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true); break; } case 'N': { + /// New row. readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE); read_next = false; break; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 9cd859b9368..9ad43ddcc94 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -11,14 +11,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include namespace DB { @@ -34,7 +26,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( ContextPtr context_, const size_t max_block_size_, bool allow_minimal_ddl_, - bool is_postgresql_replica_database_engine_, + bool is_materialize_postgresql_database_, const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) @@ -44,7 +36,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , connection_info(connection_info_) , max_block_size(max_block_size_) , allow_minimal_ddl(allow_minimal_ddl_) - , is_postgresql_replica_database_engine(is_postgresql_replica_database_engine_) + , is_materialize_postgresql_database(is_materialize_postgresql_database_) , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) { @@ -128,6 +120,19 @@ void PostgreSQLReplicationHandler::startSynchronization() } }; + /// TODO: think for more cases + bool force_reload = false; + if (is_materialize_postgresql_database) + { + force_reload = !Poco::File(metadata_path).exists(); + } + else + { + assert(materialized_storages.size() == 1); + auto materialized_storage = materialized_storages.begin()->second; + force_reload = !materialized_storage->tryGetNested(); + } + /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. @@ -135,7 +140,7 @@ void PostgreSQLReplicationHandler::startSynchronization() { initial_sync(); } - else if (!Poco::File(metadata_path).exists() || new_publication_created) + else if (new_publication_created || force_reload) { /// There are the following cases, which mean that something non-intentioanal happened. /// 1. If replication slot exists and metadata file does not exist, it is not ok. @@ -145,16 +150,15 @@ void PostgreSQLReplicationHandler::startSynchronization() } else { - /// Synchronization and initial load already took place.c + /// Synchronization and initial load already took place. LOG_TRACE(log, "Loading {} tables...", materialized_storages.size()); for (const auto & [table_name, storage] : materialized_storages) { auto materialized_storage = storage->as (); try { - nested_storages[table_name] = materialized_storage->getNested(); - materialized_storage->setStorageMetadata(); - materialized_storage->setNestedStatus(true); + /// Try load nested table, set materialized table metadata. + nested_storages[table_name] = materialized_storage->prepare(); } catch (Exception & e) { @@ -212,9 +216,7 @@ StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica load from snapshot"); copyData(input, *block_io.out); - materialized_storage->setNestedStatus(true); - - nested_storage = materialized_storage->getNested(); + nested_storage = materialized_storage->prepare(); auto nested_table_id = nested_storage->getStorageID(); LOG_TRACE(log, "Loaded table {}.{} (uuid: {})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); @@ -405,7 +407,7 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::work & tx PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( pqxx::ReplicationTransaction & tx, const std::string & table_name) { - if (!is_postgresql_replica_database_engine) + if (!is_materialize_postgresql_database) return nullptr; auto use_nulls = context->getSettingsRef().external_databases_use_nulls; @@ -429,13 +431,15 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras (); + auto table_lock = materialized_storage->lockExclusively(String(), context->getSettingsRef().lock_acquire_timeout); + auto temp_materialized_storage = materialized_storage->createTemporary()->as (); - StoragePtr temp_materialized_storage = materialized_storage->as ()->createTemporary(); - - auto from_table_id = materialized_storage->as ()->getNestedStorageID(); - auto to_table_id = temp_materialized_storage->as ()->getNestedStorageID(); + auto from_table_id = materialized_storage->getNestedStorageID(); + auto to_table_id = temp_materialized_storage->getNestedStorageID(); LOG_TRACE(log, "Starting background update of table {}.{}, uuid {} with table {}.{} uuid {}", from_table_id.database_name, from_table_id.table_name, toString(from_table_id.uuid), @@ -444,8 +448,6 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras ()); - auto nested_context = materialized_storage->as ()->getNestedTableContext(); - to_table_id = nested_storage->getStorageID(); auto ast_rename = std::make_shared(); @@ -457,13 +459,13 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorelements.push_back(std::move(elem)); ast_rename->exchange = true; + auto nested_context = materialized_storage->getNestedTableContext(); + try { InterpreterRenameQuery(ast_rename, nested_context).execute(); - nested_storage = materialized_storage->as ()->getNested(); - materialized_storage->setInMemoryMetadata(nested_storage->getInMemoryMetadata()); - + nested_storage = materialized_storage->prepare(); auto nested_table_id = nested_storage->getStorageID(); LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 39167f00579..4a1269f4761 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -34,7 +34,7 @@ public: ContextPtr context_, const size_t max_block_size_, bool allow_minimal_ddl_, - bool is_postgresql_replica_database_engine_, + bool is_materialize_postgresql_database_, const String tables_list = ""); void startup(); @@ -98,7 +98,7 @@ private: bool allow_minimal_ddl = false; /// To distinguish whether current replication handler belongs to a MaterializePostgreSQL database engine or single storage. - bool is_postgresql_replica_database_engine; + bool is_materialize_postgresql_database; /// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated. String tables_list; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index a0dc6922d1f..6b0386ad685 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -39,6 +39,7 @@ static const auto NESTED_TABLE_SUFFIX = "_nested"; static const auto TMP_SUFFIX = "_tmp"; +/// For the case of single storage. StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, const String & remote_database_name, @@ -46,23 +47,23 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, ContextPtr context_, - std::unique_ptr replication_settings_) + std::unique_ptr replication_settings) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) , remote_table_name(remote_table_name_) - , replication_settings(std::move(replication_settings_)) - , is_materialize_postgresql_database( - DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getEngineName() == "MaterializePostgreSQL") - , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) + , is_materialize_postgresql_database(false) + , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) + , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) { if (table_id_.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializePostgreSQL is allowed only for Atomic database"); setInMemoryMetadata(storage_metadata); + /// Path to store replication metadata (like last written version, etc). auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() - + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name; + + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name + "_" + toString(table_id_.uuid); replication_handler = std::make_unique( remote_database_name, @@ -75,25 +76,30 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( } -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( - const StorageID & table_id_, - ContextPtr context_) +/// For the case of MaterializePosgreSQL database engine. +/// It is used when nested ReplacingMergeeTree table has not yet be created by replication thread. +/// In this case this storage can't be used for read queries. +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(const StorageID & table_id_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) , is_materialize_postgresql_database(true) - , nested_table_id(table_id_) + , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) + , nested_table_id(table_id_) { } -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( - StoragePtr nested_storage_, ContextPtr context_) +/// Costructor for MaterializePostgreSQL table engine - for the case of MaterializePosgreSQL database engine. +/// It is used when nested ReplacingMergeeTree table has already been created by replication thread. +/// This storage is ready to handle read queries. +StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(StoragePtr nested_storage_, ContextPtr context_) : IStorage(nested_storage_->getStorageID()) , WithContext(context_->getGlobalContext()) , is_materialize_postgresql_database(true) - , nested_table_id(nested_storage_->getStorageID()) + , has_nested(true) , nested_context(makeNestedTableContext(context_->getGlobalContext())) + , nested_table_id(nested_storage_->getStorageID()) { setInMemoryMetadata(nested_storage_->getInMemoryMetadata()); } @@ -105,9 +111,8 @@ StoragePtr StorageMaterializePostgreSQL::createTemporary() const { auto table_id = getStorageID(); auto new_context = Context::createCopy(context); - const String temp_storage_name = table_id.table_name + TMP_SUFFIX; - auto temp_storage = StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, temp_storage_name, UUIDHelpers::generateV4()), new_context); - return std::move(temp_storage); + + return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX, UUIDHelpers::generateV4()), new_context); } @@ -134,17 +139,6 @@ String StorageMaterializePostgreSQL::getNestedTableName() const } -void StorageMaterializePostgreSQL::setStorageMetadata() -{ - /// If it is a MaterializePostgreSQL database engine, then storage with engine MaterializePostgreSQL - /// gets its metadata when it is fetch from postges, but if inner tables exist (i.e. it is a server restart) - /// then metadata for storage needs to be set from inner table metadata. - auto nested_table = getNested(); - auto storage_metadata = nested_table->getInMemoryMetadataPtr(); - setInMemoryMetadata(*storage_metadata); -} - - void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); @@ -171,6 +165,15 @@ std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(Co } +StoragePtr StorageMaterializePostgreSQL::prepare() +{ + auto nested_table = getNested(); + setInMemoryMetadata(nested_table->getInMemoryMetadata()); + has_nested.store(true); + return nested_table; +} + + void StorageMaterializePostgreSQL::startup() { if (!is_materialize_postgresql_database) @@ -217,20 +220,18 @@ Pipe StorageMaterializePostgreSQL::read( size_t max_block_size, unsigned num_streams) { - if (!nested_loaded) + /// For database engine there is an invariant: table exists only if its nested table exists, so + /// this check is not needed because read() will never be called until nested is loaded. + /// But for single storage, there is no such invarient. Actually, not sure whether it it better + /// to silently wait until nested is loaded or to throw on read() requests until nested is loaded. + /// TODO: do not use a separate thread in case of single storage, then this problem will be fixed. + if (!has_nested.load()) return Pipe(); + LOG_TRACE(&Poco::Logger::get("kssenii"), "Read method!"); auto nested_table = getNested(); - - return readFinalFromNestedStorage( - nested_table, - column_names, - metadata_snapshot, - query_info, - context_, - processed_stage, - max_block_size, - num_streams); + return readFinalFromNestedStorage(nested_table, column_names, metadata_snapshot, + query_info, context_, processed_stage, max_block_size, num_streams); } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 6bc18d681d4..544fbbe5504 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -24,29 +24,43 @@ namespace DB { +/** Case of single MaterializePostgreSQL table engine. + * + * A user creates a table with engine MaterializePostgreSQL. Order by expression must be specified (needed for + * nested ReplacingMergeTree table). This storage owns its own replication handler, which loads table data + * from PostgreSQL into nested ReplacingMergeTree table. If table is not created, but attached, replication handler + * will not start loading-fron-snapshot procedure, instead it will continue for last commited lsn. + * + * Main point: Both tables exist on disk; database engine interacts only with the main table and main table takes + * total ownershot over nested table. Nested table has name `main_table_uuid` + NESTED_SUFFIX. + * + * TODO: a check is needed for existance of nested, now this case is checked via replication slot existance. +**/ + + /** Case of MaterializePostgreSQL database engine. - * There is a table with engine MaterializePostgreSQL. It has a nested table with engine ReplacingMergeTree. - * Both tables shared table_id.table_name and table_id.database_name (probably they automatically have the same uuid?). * - * MaterializePostgreSQL table does not actually exists only in memory and acts as a wrapper for nested table. + * MaterializePostgreSQL table exists only in memory and acts as a wrapper for nested table, i.e. only provides an + * interface to work with nested table. Both tables share the same StorageID. * - * Also it has the same InMemoryMetadata as its nested table, so if metadata of nested table changes - main table also has + * Main table is never created or droppped via database method. The only way database engine interacts with + * MaterializePostgreSQL table - in tryGetTable() method, a MaterializePostgreSQL table is returned in order to wrap + * and redirect read requests. Set of such wrapper-tables is cached inside database engine. All other methods in + * regard to materializePostgreSQL table are handled by replication handler. + * + * All database methods, apart from tryGetTable(), are devoted only to nested table. + * TODO: It makes sence to allow rename method for MaterializePostgreSQL table via database method. + * TODO: Make sure replication-to-table data channel is done only by relation_id. + * + * Also main table has the same InMemoryMetadata as its nested table, so if metadata of nested table changes - main table also has * to update its metadata, because all read requests are passed to MaterializePostgreSQL table and then it redirects read * into nested table. * * When there is a need to update table structure, there will be created a new MaterializePostgreSQL table with its own nested table, - * it will have upadated table schema and all data will be loaded from scratch in the background, while previos table with outadted table - * structure will still serve read requests. When data is loaded, a replace query will be done, to swap tables atomically. + * it will have updated table schema and all data will be loaded from scratch in the background, while previous table with outadted table + * structure will still serve read requests. When data is loaded, nested tables will be swapped, metadata of metarialzied table will be + * updated according to nested table. * - * In order to update MaterializePostgreSQL table: - * 1. need to update InMemoryMetadata of MaterializePostgreSQL table; - * 2. need to have a new updated ReplacingMergeTree table on disk. - * - * At the point before replace query there are: - * 1. In-memory MaterializePostgreSQL table `databae_name`.`table_name` -- outdated - * 2. On-disk ReplacingMergeTree table with `databae_name`.`table_name` -- outdated - * 3. In-memory MaterializePostgreSQL table `databae_name`.`table_name_tmp` -- updated - * 4. On-disk ReplacingMergeTree table with `databae_name`.`table_name_tmp` -- updated **/ class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper, public IStorage, WithContext @@ -64,6 +78,7 @@ public: void shutdown() override; + /// Used only for single MaterializePostgreSQL storage. void dropInnerTableIfAny(bool no_delay, ContextPtr local_context) override; NamesAndTypesList getVirtuals() const override; @@ -77,28 +92,26 @@ public: size_t max_block_size, unsigned num_streams) override; - void createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure); + bool hasNested() { return has_nested.load(); } - StoragePtr createTemporary() const; + void createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure); StoragePtr getNested() const; StoragePtr tryGetNested() const; + StoragePtr createTemporary() const; + ContextPtr getNestedTableContext() const { return nested_context; } - void setNestedStatus(bool loaded) { nested_loaded.store(loaded); } - - bool isNestedLoaded() { return nested_loaded.load(); } - - void setStorageMetadata(); - void renameNested(); StorageID getNestedStorageID() { return nested_table_id; } static std::shared_ptr makeNestedTableContext(ContextPtr from_context); + StoragePtr prepare(); + protected: StorageMaterializePostgreSQL( const StorageID & table_id_, @@ -107,7 +120,7 @@ protected: const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, ContextPtr context_, - std::unique_ptr replication_settings_); + std::unique_ptr replication_settings); private: static std::shared_ptr getMaterializedColumnsDeclaration( @@ -119,13 +132,31 @@ private: String getNestedTableName() const; - std::string remote_table_name; - std::unique_ptr replication_settings; + /// Needed only for the case of single MaterializePostgreSQL storage - in order to make + /// delayed storage forwarding into replication handler. + String remote_table_name; + + /// Not nullptr only for single MaterializePostgreSQL storage, because for MaterializePostgreSQL + /// database engine there is one replication handler for all tables. std::unique_ptr replication_handler; - std::atomic nested_loaded = false; + + /// Distinguish between single MaterilizePostgreSQL table engine and MaterializePostgreSQL database engine, + /// because table with engine MaterilizePostgreSQL acts differently in each case. bool is_materialize_postgresql_database = false; - StorageID nested_table_id; + + /// Will be set to `true` only once - when nested table was loaded by replication thread. + /// After that, it will never be changed. Needed for MaterializePostgreSQL database engine + /// because there is an invariant - table exists only if its nested table exists, but nested + /// table is not loaded immediately. It is made atomic, because it is accessed only by database engine, + /// and updated by replication handler (only once). + std::atomic has_nested = false; + + /// Nested table context is a copy of global context, but contains query context with defined + /// ReplacingMergeTree storage in factoriesLog. This is needed to let database engine know + /// whether to access nested table or a wrapper over nested (materialized table). ContextPtr nested_context; + + StorageID nested_table_id; }; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 91f4c963d30..ad27efe33cd 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -93,7 +93,6 @@ def started_cluster(): @pytest.fixture(autouse=True) def postgresql_setup_teardown(): yield # run test - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') @pytest.mark.timeout(120) @@ -372,57 +371,59 @@ def test_replica_identity_index(started_cluster): check_tables_are_synchronized('postgresql_replica', order_by='key1'); -@pytest.mark.timeout(320) -def test_table_schema_changes(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) - cursor = conn.cursor() - NUM_TABLES = 5 - - for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) - - instance.query( - """CREATE DATABASE test_database - ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS postgresql_replica_allow_minimal_ddl = 1; - """) - - for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) - - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); - - altered_table = random.randint(0, 4) - cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) - - for i in range(NUM_TABLES): - cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) - cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) - - check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - for i in range(NUM_TABLES): - if i != altered_table: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) - else: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) - - check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - - for i in range(NUM_TABLES): - cursor.execute('drop table postgresql_replica_{};'.format(i)) - - instance.query("DROP DATABASE test_database") - +#@pytest.mark.timeout(320) +#def test_table_schema_changes(started_cluster): +# instance.query("DROP DATABASE IF EXISTS test_database") +# conn = get_postgres_conn(True) +# cursor = conn.cursor() +# NUM_TABLES = 5 +# +# for i in range(NUM_TABLES): +# create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); +# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) +# +# instance.query( +# """CREATE DATABASE test_database +# ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') +# SETTINGS postgresql_replica_allow_minimal_ddl = 1; +# """) +# +# for i in range(NUM_TABLES): +# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) +# +# for i in range(NUM_TABLES): +# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); +# +# expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); +# +# altered_table = random.randint(0, 4) +# cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) +# +# for i in range(NUM_TABLES): +# cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) +# cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) +# +# check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); +# print('check1 OK') +# for i in range(NUM_TABLES): +# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); +# +# for i in range(NUM_TABLES): +# if i != altered_table: +# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) +# else: +# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) +# +# check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); +# print('check2 OK') +# for i in range(NUM_TABLES): +# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); +# +# for i in range(NUM_TABLES): +# cursor.execute('drop table postgresql_replica_{};'.format(i)) +# +# instance.query("DROP DATABASE test_database") +# if __name__ == '__main__': cluster.start() From 6a06d725808f5e1609b1372449d61468f19f14ae Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 3 May 2021 17:28:54 +0000 Subject: [PATCH 094/931] Fix background update --- .../PostgreSQLReplicationHandler.cpp | 46 ++++---- .../StorageMaterializePostgreSQL.cpp | 28 +++-- .../PostgreSQL/StorageMaterializePostgreSQL.h | 6 +- .../test.py | 108 +++++++++--------- 4 files changed, 105 insertions(+), 83 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 9ad43ddcc94..efa081bd00e 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -429,32 +429,29 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras (); - auto table_lock = materialized_storage->lockExclusively(String(), context->getSettingsRef().lock_acquire_timeout); - auto temp_materialized_storage = materialized_storage->createTemporary()->as (); - - auto from_table_id = materialized_storage->getNestedStorageID(); - auto to_table_id = temp_materialized_storage->getNestedStorageID(); - - LOG_TRACE(log, "Starting background update of table {}.{}, uuid {} with table {}.{} uuid {}", - from_table_id.database_name, from_table_id.table_name, toString(from_table_id.uuid), - to_table_id.database_name, to_table_id.table_name, toString(to_table_id.uuid)); + auto temp_materialized_storage = materialized_storage->createTemporary(); /// This snapshot is valid up to the end of the transaction, which exported it. - StoragePtr nested_storage = loadFromSnapshot(snapshot_name, table_name, - temp_materialized_storage->as ()); - to_table_id = nested_storage->getStorageID(); + StoragePtr temp_nested_storage = loadFromSnapshot(snapshot_name, table_name, temp_materialized_storage->as ()); + + auto table_id = materialized_storage->getNestedStorageID(); + auto temp_table_id = temp_nested_storage->getStorageID(); + + LOG_TRACE(log, "Starting background update of table {}.{} ({}) with table {}.{} ({})", + table_id.database_name, table_id.table_name, toString(table_id.uuid), + temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); auto ast_rename = std::make_shared(); ASTRenameQuery::Element elem { - ASTRenameQuery::Table{from_table_id.database_name, from_table_id.table_name}, - ASTRenameQuery::Table{to_table_id.database_name, to_table_id.table_name} + ASTRenameQuery::Table{table_id.database_name, table_id.table_name}, + ASTRenameQuery::Table{temp_table_id.database_name, temp_table_id.table_name} }; ast_rename->elements.push_back(std::move(elem)); ast_rename->exchange = true; @@ -465,14 +462,21 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorprepare(); - auto nested_table_id = nested_storage->getStorageID(); - LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); + { + auto table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); + auto nested_table_id = nested_storage->getStorageID(); - consumer->updateNested(table_name, nested_storage); - consumer->updateSkipList(table_id, start_lsn); + materialized_storage->setNestedStorageID(nested_table_id); + nested_storage = materialized_storage->prepare(); + LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, to_table_id, true); + consumer->updateNested(table_name, nested_storage); + consumer->updateSkipList(relation_id, start_lsn); + } + + LOG_DEBUG(log, "Dropping table {}.{} ({})", temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, temp_table_id, true); } catch (...) { diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 6b0386ad685..402a2d21ecc 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -54,7 +54,6 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , is_materialize_postgresql_database(false) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) - , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) { if (table_id_.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializePostgreSQL is allowed only for Atomic database"); @@ -85,7 +84,6 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(const StorageID & tab , is_materialize_postgresql_database(true) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) - , nested_table_id(table_id_) { } @@ -112,19 +110,19 @@ StoragePtr StorageMaterializePostgreSQL::createTemporary() const auto table_id = getStorageID(); auto new_context = Context::createCopy(context); - return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX, UUIDHelpers::generateV4()), new_context); + return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX), new_context); } StoragePtr StorageMaterializePostgreSQL::getNested() const { - return DatabaseCatalog::instance().getTable(nested_table_id, nested_context); + return DatabaseCatalog::instance().getTable(getNestedStorageID(), nested_context); } StoragePtr StorageMaterializePostgreSQL::tryGetNested() const { - return DatabaseCatalog::instance().tryGetTable(nested_table_id, nested_context); + return DatabaseCatalog::instance().tryGetTable(getNestedStorageID(), nested_context); } @@ -139,6 +137,17 @@ String StorageMaterializePostgreSQL::getNestedTableName() const } +StorageID StorageMaterializePostgreSQL::getNestedStorageID() const +{ + if (nested_table_id.has_value()) + return nested_table_id.value(); + + auto table_id = getStorageID(); + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No storageID found for inner table. ({}.{}, {})", table_id.database_name, table_id.table_name, toString(table_id.uuid)); +} + + void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); @@ -147,6 +156,12 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure { InterpreterCreateQuery interpreter(ast_create, nested_context); interpreter.execute(); + + auto table_id = getStorageID(); + auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); + + /// Save storage_id with correct uuid. + nested_table_id = nested_storage->getStorageID(); } catch (...) { @@ -198,7 +213,7 @@ void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr auto nested_table = getNested(); if (nested_table && !is_materialize_postgresql_database) - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, nested_table_id, no_delay); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, getNestedStorageID(), no_delay); } @@ -228,7 +243,6 @@ Pipe StorageMaterializePostgreSQL::read( if (!has_nested.load()) return Pipe(); - LOG_TRACE(&Poco::Logger::get("kssenii"), "Read method!"); auto nested_table = getNested(); return readFinalFromNestedStorage(nested_table, column_names, metadata_snapshot, query_info, context_, processed_stage, max_block_size, num_streams); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 544fbbe5504..6dc405a21f0 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -106,7 +106,9 @@ public: void renameNested(); - StorageID getNestedStorageID() { return nested_table_id; } + StorageID getNestedStorageID() const; + + void setNestedStorageID(const StorageID & id) { nested_table_id.emplace(id); } static std::shared_ptr makeNestedTableContext(ContextPtr from_context); @@ -156,7 +158,7 @@ private: /// whether to access nested table or a wrapper over nested (materialized table). ContextPtr nested_context; - StorageID nested_table_id; + std::optional nested_table_id; }; } diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index ad27efe33cd..6f5acb88f1f 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -371,59 +371,61 @@ def test_replica_identity_index(started_cluster): check_tables_are_synchronized('postgresql_replica', order_by='key1'); -#@pytest.mark.timeout(320) -#def test_table_schema_changes(started_cluster): -# instance.query("DROP DATABASE IF EXISTS test_database") -# conn = get_postgres_conn(True) -# cursor = conn.cursor() -# NUM_TABLES = 5 -# -# for i in range(NUM_TABLES): -# create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); -# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) -# -# instance.query( -# """CREATE DATABASE test_database -# ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') -# SETTINGS postgresql_replica_allow_minimal_ddl = 1; -# """) -# -# for i in range(NUM_TABLES): -# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) -# -# for i in range(NUM_TABLES): -# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); -# -# expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); -# -# altered_table = random.randint(0, 4) -# cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) -# -# for i in range(NUM_TABLES): -# cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) -# cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) -# -# check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); -# print('check1 OK') -# for i in range(NUM_TABLES): -# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); -# -# for i in range(NUM_TABLES): -# if i != altered_table: -# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) -# else: -# instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) -# -# check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); -# print('check2 OK') -# for i in range(NUM_TABLES): -# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); -# -# for i in range(NUM_TABLES): -# cursor.execute('drop table postgresql_replica_{};'.format(i)) -# -# instance.query("DROP DATABASE test_database") -# +@pytest.mark.timeout(320) +def test_table_schema_changes(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + + instance.query( + """CREATE DATABASE test_database + ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + SETTINGS postgresql_replica_allow_minimal_ddl = 1; + """) + + for i in range(NUM_TABLES): + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); + + altered_table = random.randint(0, 4) + cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) + + for i in range(NUM_TABLES): + cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) + cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) + + assert_nested_table_is_created('postgresql_replica_{}'.format(altered_table)) + check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)) + print('check1 OK') + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + if i != altered_table: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) + else: + instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) + + check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); + print('check2 OK') + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + + for i in range(NUM_TABLES): + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + instance.query("DROP DATABASE test_database") + if __name__ == '__main__': cluster.start() From 19ecdceb6b9ea5f209a7258ea571b95a5cfa8841 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 3 May 2021 18:38:44 +0000 Subject: [PATCH 095/931] Do not start sync startup in a separate thread in case of single storage --- .../PostgreSQLReplicationHandler.cpp | 10 ++++-- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 +-- .../StorageMaterializePostgreSQL.cpp | 32 ++++++++++++------- .../PostgreSQL/StorageMaterializePostgreSQL.h | 13 +++++--- .../test_storage_postgresql_replica/test.py | 9 ++++-- 5 files changed, 45 insertions(+), 23 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index efa081bd00e..82087b3f292 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -66,7 +66,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { /// Will throw pqxx::broken_connection if no connection at the moment connection->get(); - startSynchronization(); + startSynchronization(false); } catch (const pqxx::broken_connection & pqxx_error) { @@ -88,7 +88,7 @@ void PostgreSQLReplicationHandler::shutdown() } -void PostgreSQLReplicationHandler::startSynchronization() +void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { { postgres::Transaction tx(connection->getRef()); @@ -116,6 +116,9 @@ void PostgreSQLReplicationHandler::startSynchronization() { e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); + + if (throw_on_error) + throw; } } }; @@ -164,6 +167,9 @@ void PostgreSQLReplicationHandler::startSynchronization() { e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); + + if (throw_on_error) + throw; } } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 4a1269f4761..2ace4c31198 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -50,6 +50,8 @@ public: /// Fetch list of tables which are going to be replicated. Used for database engine. NameSet fetchRequiredTables(pqxx::connection & connection_); + void startSynchronization(bool throw_on_error); + private: using MaterializedStorages = std::unordered_map; @@ -69,8 +71,6 @@ private: void waitConnectionAndStart(); - void startSynchronization(); - void consumerFunc(); StoragePtr loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 402a2d21ecc..ab0e2d4aa0a 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -42,6 +42,7 @@ static const auto TMP_SUFFIX = "_tmp"; /// For the case of single storage. StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const StorageID & table_id_, + bool is_attach_, const String & remote_database_name, const String & remote_table_name_, const postgres::ConnectionInfo & connection_info, @@ -50,10 +51,12 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( std::unique_ptr replication_settings) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , remote_table_name(remote_table_name_) , is_materialize_postgresql_database(false) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) + , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) + , remote_table_name(remote_table_name_) + , is_attach(is_attach_) { if (table_id_.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializePostgreSQL is allowed only for Atomic database"); @@ -158,7 +161,7 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure interpreter.execute(); auto table_id = getStorageID(); - auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); + auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, getNestedTableName()), nested_context); /// Save storage_id with correct uuid. nested_table_id = nested_storage->getStorageID(); @@ -194,7 +197,20 @@ void StorageMaterializePostgreSQL::startup() if (!is_materialize_postgresql_database) { replication_handler->addStorage(remote_table_name, this); - replication_handler->startup(); + + if (is_attach) + { + /// In case of attach table use background startup in a separate thread. First wait untill connection is reachable, + /// then check for nested table -- it should already be created. + replication_handler->startup(); + } + else + { + /// Start synchronization preliminary setup immediately and throw in case of failure. + /// It should be guaranteed that if MaterializePostgreSQL table was created successfully, then + /// its nested table was also created. + replication_handler->startSynchronization(/* throw_on_error */ true); + } } } @@ -235,14 +251,6 @@ Pipe StorageMaterializePostgreSQL::read( size_t max_block_size, unsigned num_streams) { - /// For database engine there is an invariant: table exists only if its nested table exists, so - /// this check is not needed because read() will never be called until nested is loaded. - /// But for single storage, there is no such invarient. Actually, not sure whether it it better - /// to silently wait until nested is loaded or to throw on read() requests until nested is loaded. - /// TODO: do not use a separate thread in case of single storage, then this problem will be fixed. - if (!has_nested.load()) - return Pipe(); - auto nested_table = getNested(); return readFinalFromNestedStorage(nested_table, column_names, metadata_snapshot, query_info, context_, processed_stage, max_block_size, num_streams); @@ -453,7 +461,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) engine_args[4]->as().value.safeGet()); return StorageMaterializePostgreSQL::create( - args.table_id, remote_database, remote_table, connection_info, + args.table_id, args.attach, remote_database, remote_table, connection_info, metadata, args.getContext(), std::move(postgresql_replication_settings)); }; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 6dc405a21f0..02c758bc95d 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -29,7 +29,7 @@ namespace DB * A user creates a table with engine MaterializePostgreSQL. Order by expression must be specified (needed for * nested ReplacingMergeTree table). This storage owns its own replication handler, which loads table data * from PostgreSQL into nested ReplacingMergeTree table. If table is not created, but attached, replication handler - * will not start loading-fron-snapshot procedure, instead it will continue for last commited lsn. + * will not start loading-from-snapshot procedure, instead it will continue from last commited lsn. * * Main point: Both tables exist on disk; database engine interacts only with the main table and main table takes * total ownershot over nested table. Nested table has name `main_table_uuid` + NESTED_SUFFIX. @@ -117,6 +117,7 @@ public: protected: StorageMaterializePostgreSQL( const StorageID & table_id_, + bool is_attach_, const String & remote_database_name, const String & remote_table_name, const postgres::ConnectionInfo & connection_info, @@ -134,10 +135,6 @@ private: String getNestedTableName() const; - /// Needed only for the case of single MaterializePostgreSQL storage - in order to make - /// delayed storage forwarding into replication handler. - String remote_table_name; - /// Not nullptr only for single MaterializePostgreSQL storage, because for MaterializePostgreSQL /// database engine there is one replication handler for all tables. std::unique_ptr replication_handler; @@ -159,6 +156,12 @@ private: ContextPtr nested_context; std::optional nested_table_id; + + /// Needed only for the case of single MaterializePostgreSQL storage - in order to make + /// delayed storage forwarding into replication handler. + String remote_table_name; + + bool is_attach; }; } diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 678c7384c1d..ed61dcba935 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -100,7 +100,6 @@ def test_no_connection_at_startup(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") - started_cluster.pause_container('postgres1') instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) ENGINE = MaterializePostgreSQL( @@ -108,6 +107,12 @@ def test_no_connection_at_startup(started_cluster): PRIMARY KEY key; ''') time.sleep(3) + + instance.query('DETACH TABLE test.postgresql_replica') + started_cluster.pause_container('postgres1') + + instance.query('ATTACH TABLE test.postgresql_replica') + time.sleep(3) started_cluster.unpause_container('postgres1') result = instance.query('SELECT count() FROM test.postgresql_replica;') @@ -325,7 +330,7 @@ def test_many_replication_messages(started_cluster): ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 100000): + while (int(result) != 100000): time.sleep(0.2) result = instance.query('SELECT count() FROM test.postgresql_replica;') print("SYNC OK") From eff26f9d5451a40ba857009855f78eb43cff2aba Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 3 May 2021 21:42:06 +0000 Subject: [PATCH 096/931] Add comments, small improvements --- .../DatabaseMaterializePostgreSQL.cpp | 19 ++++-- .../MaterializePostgreSQLConsumer.cpp | 66 +++++++++++++------ .../MaterializePostgreSQLConsumer.h | 11 ++-- .../MaterializePostgreSQLSettings.cpp | 2 +- .../MaterializePostgreSQLSettings.h | 10 +-- .../PostgreSQLReplicationHandler.cpp | 47 ++++++------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 15 ++++- .../StorageMaterializePostgreSQL.cpp | 5 +- .../PostgreSQL/StorageMaterializePostgreSQL.h | 17 ++++- .../test.py | 4 +- .../test_storage_postgresql_replica/test.py | 2 +- 11 files changed, 128 insertions(+), 70 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index b472e151092..f9cc9aa5ce2 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -static const auto METADATA_SUFFIX = ".postgresql_replica_metadata"; +static const auto METADATA_SUFFIX = ".materialize_postgresql_metadata"; DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( ContextPtr context_, @@ -60,10 +60,10 @@ void DatabaseMaterializePostgreSQL::startSynchronization() connection->getConnectionInfo(), metadata_path + METADATA_SUFFIX, getContext(), - settings->postgresql_replica_max_block_size.value, - settings->postgresql_replica_allow_minimal_ddl, + settings->materialize_postgresql_max_block_size.value, + settings->materialize_postgresql_allow_automatic_update, /* is_materialize_postgresql_database = */ true, - settings->postgresql_replica_tables_list.value); + settings->materialize_postgresql_tables_list.value); std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection->getRef()); @@ -90,7 +90,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() replication_handler->addStorage(table_name, storage->as()); } - LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); + LOG_TRACE(log, "Loaded {} tables. Starting synchronization, (database: {})", materialized_tables.size(), database_name); replication_handler->startup(); } @@ -123,8 +123,10 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects(ContextPtr local_context, StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, ContextPtr local_context) const { - /// When a nested ReplacingMergeTree table is managed from PostgreSQLReplicationHandler, its context is modified - /// to show the type of managed table. + /// In otder to define which table access is needed - to MaterializePostgreSQL table (only in case of SELECT queries) or + /// to its nested ReplacingMergeTree table (in all other cases), the context of a query os modified. + /// Also if materialzied_tables set is empty - it means all access is done to ReplacingMergeTree tables - it is a case after + /// replication_handler was shutdown. if ((local_context->hasQueryContext() && local_context->getQueryContext()->getQueryFactoriesInfo().storages.count("ReplacingMergeTree")) || materialized_tables.empty()) { @@ -149,6 +151,7 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & table, const ASTPtr & query) { + /// Create table query can only be called from replication thread. if (local_context->hasQueryContext()) { auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; @@ -176,6 +179,7 @@ void DatabaseMaterializePostgreSQL::stopReplication() void DatabaseMaterializePostgreSQL::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) { + /// Modify context into nested_context and pass query to Atomic database. DatabaseAtomic::dropTable(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), table_name, no_delay); } @@ -198,6 +202,7 @@ void DatabaseMaterializePostgreSQL::drop(ContextPtr local_context) DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) { + /// Modify context into nested_context and pass query to Atomic database. return DatabaseAtomic::getTablesIterator(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), filter_by_table_name); } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 7ab6a6f126d..74a6419ac5d 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -30,7 +30,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, - bool allow_minimal_ddl_, + bool allow_automatic_update_, Storages storages_) : log(&Poco::Logger::get("PostgreSQLReaplicaConsumer")) , context(context_) @@ -40,7 +40,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( , connection(std::move(connection_)) , current_lsn(start_lsn) , max_block_size(max_block_size_) - , allow_minimal_ddl(allow_minimal_ddl_) + , allow_automatic_update(allow_automatic_update_) , storages(storages_) { for (const auto & [table_name, storage] : storages) @@ -218,10 +218,13 @@ void MaterializePostgreSQLConsumer::readTupleData( break; } case 'u': /// TOAST value && unchanged at the same time. Actual value is not sent. + { /// TOAST values are not supported. (TOAST values are values that are considered in postgres /// to be too large to be stored directly) + LOG_WARNING(log, "Got TOAST value, which is not supported, default value will be used instead."); insertDefaultValue(buffer, column_idx); break; + } } }; @@ -536,13 +539,20 @@ String MaterializePostgreSQLConsumer::advanceLSN(std::shared_ptrsecond; + + /// Table is in a skip list and has not yet received a valid lsn == it has not been reloaded. if (table_start_lsn.empty()) return false; + /// Table has received a valid lsn, but it is not yet at a position, from which synchronization is + /// allowed. It is allowed only after lsn position, returned with snapshot, from which + /// table was reloaded. if (getLSNValue(current_lsn) >= getLSNValue(table_start_lsn)) { LOG_TRACE(log, "Synchronization is resumed for table: {} (start_lsn: {})", @@ -559,14 +569,21 @@ bool MaterializePostgreSQLConsumer::isSyncAllowed(Int32 relation_id) void MaterializePostgreSQLConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name) { + /// Empty lsn string means - continue wating for valid lsn. skip_list.insert({relation_id, ""}); + + /// Erase cached schema identifiers. It will be updated again once table is allowed back into replication stream + /// and it receives first data after update. schema_data.erase(relation_id); + + /// Clear table buffer. auto & buffer = buffers.find(relation_name)->second; buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); - if (!allow_minimal_ddl) - LOG_WARNING(log, "Table {} is skipped, because table schema has changed", relation_name); + + if (allow_automatic_update) + LOG_TRACE(log, "Table {} (relation_id: {}) is skipped temporarily. It will be reloaded in the background", relation_name, relation_id); else - LOG_TRACE(log, "Table {} is skipped temporarily. ID: {}", relation_name, relation_id); + LOG_WARNING(log, "Table {} (relation_id: {}) is skipped, because table schema has changed", relation_name); } @@ -646,37 +663,48 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() bool MaterializePostgreSQLConsumer::consume(std::vector> & skipped_tables) { + /// Check if there are tables, which are skipped from being updated by changes from replication stream, + /// because schema changes were detected. Update them, if it is allowed. + if (allow_automatic_update && !skip_list.empty()) + { + for (const auto & [relation_id, lsn] : skip_list) + { + /// Non-empty lsn in this place means that table was already updated, but no changes for that table were + /// received in a previous stream. A table is removed from skip list only when there came + /// changes for table with lsn higher than lsn of snapshot, from which table was reloaded. Since table + /// reaload and reading from replication stream are done in the same thread, no lsn will be skipped + /// between these two events. + if (lsn.empty()) + skipped_tables.emplace_back(std::make_pair(relation_id, relation_id_to_name[relation_id])); + } + } + + /// Read up to max_block_size changed (approximately - in same cases might be more). if (!readFromReplicationSlot()) { - if (allow_minimal_ddl && !skip_list.empty()) - { - for (const auto & [relation_id, lsn] : skip_list) - { - if (lsn.empty()) - skipped_tables.emplace_back(std::make_pair(relation_id, relation_id_to_name[relation_id])); - } - } - + /// No data was read, reschedule. return false; } + /// Some data was read, schedule as soon as possible. return true; } -void MaterializePostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage) +void MaterializePostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn) { + /// Cache new pointer to replacingMergeTree table. storages[table_name] = nested_storage; + + /// Create a new empty buffer (with updated metadata), where data is first loaded before syncing into actual table. auto & buffer = buffers.find(table_name)->second; buffer.createEmptyBuffer(nested_storage); -} - -void MaterializePostgreSQLConsumer::updateSkipList(Int32 table_id, const String & table_start_lsn) -{ + /// Set start position to valid lsn. Before it was an empty string. Futher read for table allowed, if it has a valid lsn. skip_list[table_id] = table_start_lsn; } + } #endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 3bef0c717ba..5bf8c8c7755 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -34,18 +34,19 @@ public: const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, - bool allow_minimal_ddl_, + bool allow_automatic_update_, Storages storages_); void readMetadata(); bool consume(std::vector> & skipped_tables); - void updateNested(const String & table_name, StoragePtr nested_storage); - - void updateSkipList(Int32 table_id, const String & table_start_lsn); + /// Called from reloadFromSnapshot by replication handler. This method is needed to move a table back into synchronization + /// process if it was skipped due to schema changes. + void updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn); private: + /// Read approximarely up to max_block_size changes from WAL. bool readFromReplicationSlot(); void syncTables(std::shared_ptr tx); @@ -109,7 +110,7 @@ private: std::string current_lsn, final_lsn; const size_t max_block_size; - bool allow_minimal_ddl; + bool allow_automatic_update; std::string table_to_insert; diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp index 48fe61b4182..2682bd6194f 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp @@ -15,7 +15,7 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) +IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) void MaterializePostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) { diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h index a2ad76ceaeb..8875c45f9fa 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h @@ -13,12 +13,12 @@ namespace DB class ASTStorage; -#define LIST_OF_POSTGRESQL_REPLICA_SETTINGS(M) \ - M(UInt64, postgresql_replica_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ - M(String, postgresql_replica_tables_list, "", "List of tables for MaterializePostgreSQL database engine", 0) \ - M(Bool, postgresql_replica_allow_minimal_ddl, 0, "Allow to track minimal possible ddl. By default, table after ddl will get into a skip list", 0) \ +#define LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS(M) \ + M(UInt64, materialize_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ + M(String, materialize_postgresql_tables_list, "", "List of tables for MaterializePostgreSQL database engine", 0) \ + M(Bool, materialize_postgresql_allow_automatic_update, 0, "Allow to reload table in the background, when schema changes are detected", 0) \ -DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_POSTGRESQL_REPLICA_SETTINGS) +DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) struct MaterializePostgreSQLSettings : public BaseSettings { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 82087b3f292..1675bf5870f 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -25,7 +25,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const std::string & metadata_path_, ContextPtr context_, const size_t max_block_size_, - bool allow_minimal_ddl_, + bool allow_automatic_update_, bool is_materialize_postgresql_database_, const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) @@ -35,7 +35,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , metadata_path(metadata_path_) , connection_info(connection_info_) , max_block_size(max_block_size_) - , allow_minimal_ddl(allow_minimal_ddl_) + , allow_automatic_update(allow_automatic_update_) , is_materialize_postgresql_database(is_materialize_postgresql_database_) , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) @@ -123,19 +123,6 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } }; - /// TODO: think for more cases - bool force_reload = false; - if (is_materialize_postgresql_database) - { - force_reload = !Poco::File(metadata_path).exists(); - } - else - { - assert(materialized_storages.size() == 1); - auto materialized_storage = materialized_storages.begin()->second; - force_reload = !materialized_storage->tryGetNested(); - } - /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. @@ -143,11 +130,10 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { initial_sync(); } - else if (new_publication_created || force_reload) + else if (new_publication_created) { - /// There are the following cases, which mean that something non-intentioanal happened. - /// 1. If replication slot exists and metadata file does not exist, it is not ok. - /// 2. If replication slot exists before publication is created. + /// Replication slot depends on publication, so if replication slot exists and new + /// publication was just created - drop that replication slot and start from scratch. dropReplicationSlot(tx.getRef()); initial_sync(); } @@ -165,6 +151,21 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } catch (Exception & e) { + if (e.code() == ErrorCodes::UNKNOWN_TABLE) + { + try + { + /// If nested table does not exist, try load it once again. + loadFromSnapshot(snapshot_name, table_name, storage->as ()); + nested_storages[table_name] = materialized_storage->prepare(); + continue; + } + catch (Exception & e) + { + e.addMessage("Table load failed for the second time"); + } + } + e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); @@ -182,7 +183,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) metadata_path, start_lsn, max_block_size, - allow_minimal_ddl, + allow_automatic_update, nested_storages); consumer_task->activateAndSchedule(); @@ -469,16 +470,16 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorlockForShare(String(), context->getSettingsRef().lock_acquire_timeout); auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); + auto table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); auto nested_table_id = nested_storage->getStorageID(); materialized_storage->setNestedStorageID(nested_table_id); nested_storage = materialized_storage->prepare(); LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); - consumer->updateNested(table_name, nested_storage); - consumer->updateSkipList(relation_id, start_lsn); + /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. + consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); } LOG_DEBUG(log, "Dropping table {}.{} ({})", temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 2ace4c31198..c337f354247 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -33,10 +33,11 @@ public: const String & metadata_path_, ContextPtr context_, const size_t max_block_size_, - bool allow_minimal_ddl_, + bool allow_automatic_update_, bool is_materialize_postgresql_database_, const String tables_list = ""); + /// Activate task to be run from a separate thread: wait untill connection is available and call startReplication(). void startup(); /// Stop replication without cleanup. @@ -45,16 +46,20 @@ public: /// Clean up replication: remove publication and replication slots. void shutdownFinal(); + /// Add storage pointer to let handler know which tables it needs to keep in sync. void addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage); /// Fetch list of tables which are going to be replicated. Used for database engine. NameSet fetchRequiredTables(pqxx::connection & connection_); + /// Start replication setup immediately. void startSynchronization(bool throw_on_error); private: using MaterializedStorages = std::unordered_map; + /// Methods to manage Publication. + bool isPublicationExist(pqxx::work & tx); void createPublicationIfNeeded(pqxx::work & tx, bool create_without_check = false); @@ -63,12 +68,16 @@ private: void dropPublication(pqxx::nontransaction & ntx); + /// Methods to manage Replication Slots. + bool isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name); void createReplicationSlot(pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); void dropReplicationSlot(pqxx::nontransaction & tx, bool temporary = false); + /// Methods to manage replication. + void waitConnectionAndStart(); void consumerFunc(); @@ -82,7 +91,6 @@ private: Poco::Logger * log; ContextPtr context; - /// Remote database name. const String remote_database_name, current_database_name; /// Path for replication metadata. @@ -95,7 +103,8 @@ private: const size_t max_block_size; /// Table structure changes are always tracked. By default, table with changed schema will get into a skip list. - bool allow_minimal_ddl = false; + /// This setting allows to reloas table in the background. + bool allow_automatic_update = false; /// To distinguish whether current replication handler belongs to a MaterializePostgreSQL database engine or single storage. bool is_materialize_postgresql_database; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index ab0e2d4aa0a..11a44f7d022 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -24,6 +24,7 @@ #include #include +/// TODO: Add test for allow_automatic_update setting in case of single storage. namespace DB { @@ -73,8 +74,8 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( connection_info, metadata_path, getContext(), - replication_settings->postgresql_replica_max_block_size.value, - replication_settings->postgresql_replica_allow_minimal_ddl.value, false); + replication_settings->materialize_postgresql_max_block_size.value, + replication_settings->materialize_postgresql_allow_automatic_update.value, false); } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 02c758bc95d..470ea81cb25 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -92,6 +92,9 @@ public: size_t max_block_size, unsigned num_streams) override; + /// This method is called only from MateriaizePostgreSQL database engine, because it needs to maintain + /// an invariant: a table exists only if its nested table exists. This atomic variable is set to _true_ + /// only once - when nested table is successfully created and is never changed afterwards. bool hasNested() { return has_nested.load(); } void createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure); @@ -100,18 +103,21 @@ public: StoragePtr tryGetNested() const; + /// Create a temporary MaterializePostgreSQL table with current_table_name + TMP_SUFFIX. + /// An empty wrapper is returned - it does not have inMemory metadata, just acts as an empty wrapper over + /// temporary nested, which will be created shortly after. StoragePtr createTemporary() const; ContextPtr getNestedTableContext() const { return nested_context; } - void renameNested(); - StorageID getNestedStorageID() const; void setNestedStorageID(const StorageID & id) { nested_table_id.emplace(id); } static std::shared_ptr makeNestedTableContext(ContextPtr from_context); + /// Get nested table (or throw if it does not exist), set in-memory metadata (taken from nested table) + /// for current table, set has_nested = true. StoragePtr prepare(); protected: @@ -155,12 +161,19 @@ private: /// whether to access nested table or a wrapper over nested (materialized table). ContextPtr nested_context; + /// Save nested storageID to be able to fetch it. It is set once nested is created and will be + /// updated only when nested is reloaded or renamed. std::optional nested_table_id; /// Needed only for the case of single MaterializePostgreSQL storage - in order to make /// delayed storage forwarding into replication handler. String remote_table_name; + /// Needed only for the case of single MaterializePostgreSQL storage, because in case of create + /// query (not attach) initial setup wiil be done immediately and error message is thrown at once. + /// It results in the fact: single MaterializePostgreSQL storage is created only if its nested table is created. + /// In case of attach - this setup will be done in a separate thread in the background. It will also + /// be checked for nested table and attempted to load it if it does not exist for some reason. bool is_attach; }; diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 6f5acb88f1f..abfb3427c99 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -270,7 +270,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query(''' CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS postgresql_replica_tables_list = '{}'; + SETTINGS materialize_postgresql_tables_list = '{}'; '''.format(publication_tables)) assert 'test_database' in instance.query('SHOW DATABASES') @@ -385,7 +385,7 @@ def test_table_schema_changes(started_cluster): instance.query( """CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS postgresql_replica_allow_minimal_ddl = 1; + SETTINGS materialize_postgresql_allow_automatic_update = 1; """) for i in range(NUM_TABLES): diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index ed61dcba935..20d21008629 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -326,7 +326,7 @@ def test_many_replication_messages(started_cluster): PRIMARY KEY(key)) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - SETTINGS postgresql_replica_max_block_size = 50000; + SETTINGS materialize_postgresql_max_block_size = 50000; ''') result = instance.query('SELECT count() FROM test.postgresql_replica;') From 89144ba8438aec054a408a1249925fe33e08b694 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 3 May 2021 22:06:21 +0000 Subject: [PATCH 097/931] Try adding lock for storage --- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 3 ++- src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 1675bf5870f..5b6e0ceb16c 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -467,11 +467,12 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorlockForShare(String(), context->getSettingsRef().lock_acquire_timeout); InterpreterRenameQuery(ast_rename, nested_context).execute(); { auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); - auto table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + auto nested_table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); auto nested_table_id = nested_storage->getStorageID(); materialized_storage->setNestedStorageID(nested_table_id); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 11a44f7d022..38d4790a826 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -24,7 +24,6 @@ #include #include -/// TODO: Add test for allow_automatic_update setting in case of single storage. namespace DB { @@ -75,7 +74,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( metadata_path, getContext(), replication_settings->materialize_postgresql_max_block_size.value, - replication_settings->materialize_postgresql_allow_automatic_update.value, false); + /* allow_automatic_update */ false, /* is_materialize_postgresql_database */false); } @@ -252,6 +251,7 @@ Pipe StorageMaterializePostgreSQL::read( size_t max_block_size, unsigned num_streams) { + auto materialized_table_lock = lockForShare(String(), context_->getSettingsRef().lock_acquire_timeout); auto nested_table = getNested(); return readFinalFromNestedStorage(nested_table, column_names, metadata_snapshot, query_info, context_, processed_stage, max_block_size, num_streams); From ed5c2321cc0b542e379371a61f781c3deb452ba1 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 4 May 2021 10:43:21 +0000 Subject: [PATCH 098/931] Some fixes --- .../materialize-postgresql.md | 31 +++++++++++++++++-- .../integrations/materialize-postgresql.md | 24 ++++++++++++-- .../DatabaseMaterializePostgreSQL.cpp | 2 +- .../DatabaseMaterializePostgreSQL.h | 2 +- .../MaterializePostgreSQLConsumer.cpp | 30 +++++++++--------- .../PostgreSQLReplicationHandler.cpp | 15 ++++++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 +- .../StorageMaterializePostgreSQL.cpp | 4 +-- .../PostgreSQL/StorageMaterializePostgreSQL.h | 11 +++---- 9 files changed, 86 insertions(+), 35 deletions(-) diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialize-postgresql.md index 5f1ee614704..f657035d050 100644 --- a/docs/en/engines/database-engines/materialize-postgresql.md +++ b/docs/en/engines/database-engines/materialize-postgresql.md @@ -7,8 +7,36 @@ toc_title: MaterializePostgreSQL ## Creating a Database {#creating-a-database} +``` sql +CREATE DATABASE test_database +ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' + +SELECT * FROM test_database.postgres_table; +``` + + +## Settings {#settings} + +1. `materialize_postgresql_max_block_size` - Number of rows collected before flushing data into table. Default: `65536`. + +2. `materialize_postgresql_tables_list` - List of tables for MaterializePostgreSQL database engine. Default: `whole database`. + +3. `materialize_postgresql_allow_automatic_update` - Allow to reload table in the background, when schema changes are detected. Default: `0` (`false`). + +``` sql +CREATE DATABASE test_database +ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' +SETTINGS materialize_postgresql_max_block_size = 65536, + materialize_postgresql_tables_list = 'table1,table2,table3'; + +SELECT * FROM test_database.table1; +``` + + ## Requirements {#requirements} +- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. + - Each replicated table must have one of the following **replica identity**: 1. **default** (primary key) @@ -36,6 +64,3 @@ postgres# SELECT CASE relreplident FROM pg_class WHERE oid = 'postgres_table'::regclass; ``` - -- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. - diff --git a/docs/en/engines/table-engines/integrations/materialize-postgresql.md b/docs/en/engines/table-engines/integrations/materialize-postgresql.md index e3cbfbb087b..c40ea6b72db 100644 --- a/docs/en/engines/table-engines/integrations/materialize-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialize-postgresql.md @@ -7,10 +7,30 @@ toc_title: MateriaziePostgreSQL ## Creating a Table {#creating-a-table} +``` sql +CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) +ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') +PRIMARY KEY key; +``` + + ## Requirements {#requirements} -- A table with engine `MaterializePostgreSQL` must have a primary key - the same as a replica identity index of a postgres table (See [details on replica identity index](../../database-engines/materialize-postgresql.md#requirements)). +- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. + +- A table with engine `MaterializePostgreSQL` must have a primary key - the same as a replica identity index (default: primary key) of a postgres table (See [details on replica identity index](../../database-engines/materialize-postgresql.md#requirements)). - Only database `Atomic` is allowed. -- Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. + +## Virtual columns {#creating-a-table} + +- `_version` + +- `_sign` + +``` sql +CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) +ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') +PRIMARY KEY key; +``` diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index f9cc9aa5ce2..d954c7ebe7f 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -79,7 +79,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() } else { - /// Nested table was already created and syncronized. + /// Nested table was already created and synchronized. storage = StorageMaterializePostgreSQL::create(storage, getContext()); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 931ef6836d5..c04564fccbb 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -51,7 +51,7 @@ public: void createTable(ContextPtr context, const String & name, const StoragePtr & table, const ASTPtr & query) override; - void dropTable(ContextPtr context_, const String & name, bool no_delay) override; + void dropTable(ContextPtr local_context, const String & name, bool no_delay) override; void drop(ContextPtr local_context) override; diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 74a6419ac5d..664bdde7d70 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -53,12 +53,17 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage) { const auto storage_metadata = storage->getInMemoryMetadataPtr(); - description.init(storage_metadata->getSampleBlock()); + const Block sample_block = storage_metadata->getSampleBlock(); + description.init(sample_block); columns = description.sample_block.cloneEmptyColumns(); const auto & storage_columns = storage_metadata->getColumns().getAllPhysical(); auto insert_columns = std::make_shared(); + auto table_id = storage->getStorageID(); + LOG_TRACE(&Poco::Logger::get("MaterializePostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}", + table_id.database_name, table_id.table_name, toString(table_id.uuid), sample_block.dumpStructure()); + assert(description.sample_block.columns() == storage_columns.size()); size_t idx = 0; @@ -158,7 +163,7 @@ T MaterializePostgreSQLConsumer::unhexN(const char * message, size_t pos, size_t Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) { - assert(size > pos + 16); + assert(size >= pos + 16); Int64 result = unhexN(message, pos, 8); pos += 16; return result; @@ -167,7 +172,7 @@ Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & po Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) { - assert(size > pos + 8); + assert(size >= pos + 8); Int32 result = unhexN(message, pos, 4); pos += 8; return result; @@ -176,7 +181,7 @@ Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & po Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) { - assert(size > pos + 4); + assert(size >= pos + 4); Int16 result = unhexN(message, pos, 2); pos += 4; return result; @@ -185,7 +190,7 @@ Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & po Int8 MaterializePostgreSQLConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) { - assert(size > pos + 2); + assert(size >= pos + 2); Int8 result = unhex2(message + pos); pos += 2; return result; @@ -569,7 +574,7 @@ bool MaterializePostgreSQLConsumer::isSyncAllowed(Int32 relation_id) void MaterializePostgreSQLConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name) { - /// Empty lsn string means - continue wating for valid lsn. + /// Empty lsn string means - continue waiting for valid lsn. skip_list.insert({relation_id, ""}); /// Erase cached schema identifiers. It will be updated again once table is allowed back into replication stream @@ -680,14 +685,9 @@ bool MaterializePostgreSQLConsumer::consume(std::vector } /// Read up to max_block_size changed (approximately - in same cases might be more). - if (!readFromReplicationSlot()) - { - /// No data was read, reschedule. - return false; - } - - /// Some data was read, schedule as soon as possible. - return true; + /// false: no data was read, reschedule. + /// true: some data was read, schedule as soon as possible. + return readFromReplicationSlot(); } @@ -700,7 +700,7 @@ void MaterializePostgreSQLConsumer::updateNested(const String & table_name, Stor auto & buffer = buffers.find(table_name)->second; buffer.createEmptyBuffer(nested_storage); - /// Set start position to valid lsn. Before it was an empty string. Futher read for table allowed, if it has a valid lsn. + /// Set start position to valid lsn. Before it was an empty string. Further read for table allowed, if it has a valid lsn. skip_list[table_id] = table_start_lsn; } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 5b6e0ceb16c..179764564f2 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -17,6 +17,10 @@ namespace DB static const auto reschedule_ms = 500; +namespace ErrorCodes +{ + extern const int UNKNOWN_TABLE; +} PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const String & remote_database_name_, @@ -143,7 +147,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) LOG_TRACE(log, "Loading {} tables...", materialized_storages.size()); for (const auto & [table_name, storage] : materialized_storages) { - auto materialized_storage = storage->as (); + auto * materialized_storage = storage->as (); try { /// Try load nested table, set materialized table metadata. @@ -160,7 +164,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) nested_storages[table_name] = materialized_storage->prepare(); continue; } - catch (Exception & e) + catch (...) { e.addMessage("Table load failed for the second time"); } @@ -438,9 +442,10 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras (); + context); + auto * materialized_storage = storage->as (); auto temp_materialized_storage = materialized_storage->createTemporary(); @@ -485,6 +490,8 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorstartup(); } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 470ea81cb25..6b896c24dfa 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -29,12 +29,11 @@ namespace DB * A user creates a table with engine MaterializePostgreSQL. Order by expression must be specified (needed for * nested ReplacingMergeTree table). This storage owns its own replication handler, which loads table data * from PostgreSQL into nested ReplacingMergeTree table. If table is not created, but attached, replication handler - * will not start loading-from-snapshot procedure, instead it will continue from last commited lsn. + * will not start loading-from-snapshot procedure, instead it will continue from last committed lsn. * * Main point: Both tables exist on disk; database engine interacts only with the main table and main table takes * total ownershot over nested table. Nested table has name `main_table_uuid` + NESTED_SUFFIX. * - * TODO: a check is needed for existance of nested, now this case is checked via replication slot existance. **/ @@ -43,13 +42,13 @@ namespace DB * MaterializePostgreSQL table exists only in memory and acts as a wrapper for nested table, i.e. only provides an * interface to work with nested table. Both tables share the same StorageID. * - * Main table is never created or droppped via database method. The only way database engine interacts with + * Main table is never created or dropped via database method. The only way database engine interacts with * MaterializePostgreSQL table - in tryGetTable() method, a MaterializePostgreSQL table is returned in order to wrap * and redirect read requests. Set of such wrapper-tables is cached inside database engine. All other methods in * regard to materializePostgreSQL table are handled by replication handler. * * All database methods, apart from tryGetTable(), are devoted only to nested table. - * TODO: It makes sence to allow rename method for MaterializePostgreSQL table via database method. + * NOTE: It makes sense to allow rename method for MaterializePostgreSQL table via database method. * TODO: Make sure replication-to-table data channel is done only by relation_id. * * Also main table has the same InMemoryMetadata as its nested table, so if metadata of nested table changes - main table also has @@ -70,7 +69,7 @@ class StorageMaterializePostgreSQL final : public ext::shared_ptr_helper Date: Wed, 5 May 2021 09:00:24 +0000 Subject: [PATCH 099/931] Add untested ProcessorStatisticsOS class --- src/Common/ProcessorStatisticsOS.cpp | 208 +++++++++++++++++++++++++++ src/Common/ProcessorStatisticsOS.h | 80 +++++++++++ 2 files changed, 288 insertions(+) create mode 100644 src/Common/ProcessorStatisticsOS.cpp create mode 100644 src/Common/ProcessorStatisticsOS.h diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp new file mode 100644 index 00000000000..e83cc0bdf6f --- /dev/null +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -0,0 +1,208 @@ +#if defined(OS_LINUX) + +#include +#include +#include +#include +#include +#include + +#include "ProcessorStatisticsOS.h" + + +#include + +#include + +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int FILE_DOESNT_EXIST; + extern const int CANNOT_OPEN_FILE; + extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR; + extern const int CANNOT_CLOSE_FILE; +} + +static constexpr auto loadavg_filename = "/proc/loadavg"; +static constexpr auto procst_filename = "/proc/stat"; +static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; + +ProcessorStatisticsOS::ProcessorStatisticsOS() + : loadavg_fd(openWithCheck(loadavg_filename, O_RDONLY | O_CLOEXEC)) + , procst_fd(openWithCheck(procst_filename, O_RDONLY | O_CLOEXEC)) + , cpuinfo_fd(openWithCheck(cpuinfo_filename, O_RDONLY | O_CLOEXEC)) +{} + +ProcessorStatisticsOS::~ProcessorStatisticsOS() +{ + closeFD(loadavg_fd, String(loadavg_filename)); + closeFD(procst_fd, String(procst_filename)); + closeFD(cpuinfo_fd, String(cpuinfo_filename)); +} + +int ProcessorStatisticsOS::openWithCheck(const String & filename, int flags) +{ + int fd = ::open(filename.c_str(), flags); + checkFDAfterOpen(fd, filename); + return fd; +} + +void ProcessorStatisticsOS::checkFDAfterOpen(int fd, const String & filename) +{ + if (-1 == fd) + throwFromErrno( + "Cannot open file" + String(filename), + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); +} + +void ProcessorStatisticsOS::closeFD(int fd, const String & filename) +{ + if (0 != ::close(fd)) + { + try + { + throwFromErrno( + "File descriptor for \"" + filename + "\" could not be closed. " + "Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE); + } catch(const ErrnoException&) + { + DB::tryLogCurrentException(__PRETTY_FUNCTION__); + } + } +} + +ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() const +{ + Data data; + readLoadavg(data); + readProcst(data); + readCpuinfo(data); + return data; +} + +void ProcessorStatisticsOS::readLoadavg(Data & data) const +{ + constexpr size_t buf_size = 1024; + char buf[buf_size]; + + ssize_t res = 0; + + do + { + res = ::pread(loadavg_fd, buf, buf_size, 0); + + if (-1 == res) + { + if (errno == EINTR) + continue; + + throwFromErrno("Cannot read from file " + String(loadavg_filename), + ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + } + + assert(res >= 0); + break; + } while (true); + + ReadBufferFromMemory in(buf, res); + + readFloatAndSkipWhitespaceIfAny(data.avg1, in); + readFloatAndSkipWhitespaceIfAny(data.avg5, in); + readFloatAndSkipWhitespaceIfAny(data.avg15, in); +} + +void ProcessorStatisticsOS::readProcst(Data & data) const +{ + MMappedFileDescriptor mapped_procst(procst_fd, 0); + ReadBufferFromMemory in(mapped_procst.getData(), + mapped_procst.getLength()); + + String field_name, field_val; + uint64_t unused; + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + + readIntTextAndSkipWhitespaceIfAny(data.user_time, in); + readIntTextAndSkipWhitespaceIfAny(data.nice_time, in); + readIntTextAndSkipWhitespaceIfAny(data.system_time, in); + readIntTextAndSkipWhitespaceIfAny(data.idle_time, in); + readIntTextAndSkipWhitespaceIfAny(data.iowait_time, in); + + readIntTextAndSkipWhitespaceIfAny(unused, in); + readIntTextAndSkipWhitespaceIfAny(unused, in); + + readIntTextAndSkipWhitespaceIfAny(data.steal_time, in); + readIntTextAndSkipWhitespaceIfAny(data.guest_time, in); + readIntTextAndSkipWhitespaceIfAny(data.nice_time, in); + + do + { + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, in); + } while (field_name != String("processes")); + + data.processes = static_cast(std::stoul(field_val)); + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + readIntTextAndSkipWhitespaceIfAny(data.procs_running, in); + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + readIntTextAndSkipWhitespaceIfAny(data.procs_blocked, in); +} + +void ProcessorStatisticsOS::readCpuinfo(Data & data) const +{ + MMappedFileDescriptor mapped_cpuinfo(cpuinfo_fd, 0); + ReadBufferFromMemory in(mapped_cpuinfo.getData(), + mapped_cpuinfo.getLength()); + + String field_name, field_val; + char unused; + + do + { + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + readCharAndSkipWhitespaceIfAny(unused, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, in); + } while (field_name != String("cpu MHz")); + + data.freq = stof(field_val); +} + +template +void ProcessorStatisticsOS::readIntTextAndSkipWhitespaceIfAny(T& x, ReadBuffer& buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); +} + +void ProcessorStatisticsOS::readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); +} + +void ProcessorStatisticsOS::readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) +{ + readChar(c, buf); + skipWhitespaceIfAny(buf); +} + +void ProcessorStatisticsOS::readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) +{ + readFloatText(f, buf); + skipWhitespaceIfAny(buf); +} + +} + +#endif diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h new file mode 100644 index 00000000000..1ae9f6ba760 --- /dev/null +++ b/src/Common/ProcessorStatisticsOS.h @@ -0,0 +1,80 @@ +#pragma once +#if defined(OS_LINUX) + +#include +#include + +#include + +#include + +namespace DB +{ + +/** Opens files: /proc/loadav, /proc/stat, /proc/cpuinfo. Keeps it open and reads processor statistics. + * This is Linux specific. + * See: man procfs + */ + +class ProcessorStatisticsOS +{ +public: + struct Data + { + float avg1; + float avg5; + float avg15; + + /** The amount of time, measured in units of USER_HZ + * (1/100ths of a second on most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value) + */ + uint64_t user_time; + uint64_t nice_time; + uint64_t system_time; + uint64_t idle_time; + uint64_t iowait_time; + uint64_t steal_time; + uint64_t guest_time; + uint64_t guest_nice_time; + + uint32_t processes; + uint32_t procs_running; + uint32_t procs_blocked; + + float freq; + }; + + ProcessorStatisticsOS(); + ~ProcessorStatisticsOS(); + + Data get() const; + +private: + static int openWithCheck(const String & filename, int flags); + + static void checkFDAfterOpen(int fd, const String & filename); + + static void closeFD(int fd, const String & filename); + + template + static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); + + static void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf); + + static void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf); + + static void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf); + + void readLoadavg(Data & data) const; + void readProcst(Data & data) const; + void readCpuinfo(Data & data) const; + +private: + int loadavg_fd; + int procst_fd; + int cpuinfo_fd; +}; + +} + +#endif From 6765858e96ca3c2a99ccf67a1375878acae0f2d1 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Thu, 6 May 2021 10:12:01 +0000 Subject: [PATCH 100/931] Update logic and functionality (untested) --- src/Common/ProcessorStatisticsOS.cpp | 212 +++++++++++++-------------- src/Common/ProcessorStatisticsOS.h | 76 ++++++---- 2 files changed, 152 insertions(+), 136 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index e83cc0bdf6f..252b6b776e9 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -1,11 +1,9 @@ #if defined(OS_LINUX) -#include -#include -#include #include #include #include +#include #include "ProcessorStatisticsOS.h" @@ -16,9 +14,8 @@ #include -#include +#include #include -#include namespace DB { @@ -35,147 +32,144 @@ static constexpr auto loadavg_filename = "/proc/loadavg"; static constexpr auto procst_filename = "/proc/stat"; static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; +static const long USER_HZ = sysconf(_SC_CLK_TCK); + ProcessorStatisticsOS::ProcessorStatisticsOS() - : loadavg_fd(openWithCheck(loadavg_filename, O_RDONLY | O_CLOEXEC)) - , procst_fd(openWithCheck(procst_filename, O_RDONLY | O_CLOEXEC)) - , cpuinfo_fd(openWithCheck(cpuinfo_filename, O_RDONLY | O_CLOEXEC)) -{} - -ProcessorStatisticsOS::~ProcessorStatisticsOS() + : loadavg_in(loadavg_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) + , procst_in(procst_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) + , cpuinfo_in(cpuinfo_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) { - closeFD(loadavg_fd, String(loadavg_filename)); - closeFD(procst_fd, String(procst_filename)); - closeFD(cpuinfo_fd, String(cpuinfo_filename)); + ProcStLoad unused; + calcStLoad(unused); } -int ProcessorStatisticsOS::openWithCheck(const String & filename, int flags) -{ - int fd = ::open(filename.c_str(), flags); - checkFDAfterOpen(fd, filename); - return fd; -} +ProcessorStatisticsOS::~ProcessorStatisticsOS() {} -void ProcessorStatisticsOS::checkFDAfterOpen(int fd, const String & filename) -{ - if (-1 == fd) - throwFromErrno( - "Cannot open file" + String(filename), - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); -} - -void ProcessorStatisticsOS::closeFD(int fd, const String & filename) -{ - if (0 != ::close(fd)) - { - try - { - throwFromErrno( - "File descriptor for \"" + filename + "\" could not be closed. " - "Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE); - } catch(const ErrnoException&) - { - DB::tryLogCurrentException(__PRETTY_FUNCTION__); - } - } -} - -ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() const +ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() { Data data; - readLoadavg(data); - readProcst(data); - readCpuinfo(data); + readLoadavg(data.loadavg); + calcStLoad(data.stload); + readFreq(data.freq); return data; } -void ProcessorStatisticsOS::readLoadavg(Data & data) const +void ProcessorStatisticsOS::readLoadavg(ProcLoadavg& loadavg) { - constexpr size_t buf_size = 1024; - char buf[buf_size]; - - ssize_t res = 0; - - do - { - res = ::pread(loadavg_fd, buf, buf_size, 0); - - if (-1 == res) - { - if (errno == EINTR) - continue; - - throwFromErrno("Cannot read from file " + String(loadavg_filename), - ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); - } - - assert(res >= 0); - break; - } while (true); - - ReadBufferFromMemory in(buf, res); + loadavg_in.seek(0, SEEK_SET); - readFloatAndSkipWhitespaceIfAny(data.avg1, in); - readFloatAndSkipWhitespaceIfAny(data.avg5, in); - readFloatAndSkipWhitespaceIfAny(data.avg15, in); + readFloatAndSkipWhitespaceIfAny(loadavg.avg1, loadavg_in); + readFloatAndSkipWhitespaceIfAny(loadavg.avg5, loadavg_in); + readFloatAndSkipWhitespaceIfAny(loadavg.avg15, loadavg_in); } -void ProcessorStatisticsOS::readProcst(Data & data) const +void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) { - MMappedFileDescriptor mapped_procst(procst_fd, 0); - ReadBufferFromMemory in(mapped_procst.getData(), - mapped_procst.getLength()); + ProcTime cur_proc_time; + readProcTimeAndProcesses(cur_proc_time, stload); + + std::time_t cur_time = std::time(nullptr); + float time_dif = static_cast(cur_time - last_stload_call_time); + + stload.user_time = + (cur_proc_time.user - last_proc_time.user) / time_dif; + stload.nice_time = + (cur_proc_time.nice - last_proc_time.nice) / time_dif; + stload.system_time = + (cur_proc_time.system - last_proc_time.system) / time_dif; + stload.idle_time = + (cur_proc_time.idle - last_proc_time.idle) / time_dif; + stload.iowait_time = + (cur_proc_time.iowait - last_proc_time.iowait) / time_dif; + stload.steal_time = + (cur_proc_time.steal - last_proc_time.steal) / time_dif; + stload.guest_time = + (cur_proc_time.guest - last_proc_time.guest) / time_dif; + stload.guest_nice_time = + (cur_proc_time.guest_nice - last_proc_time.guest_nice) / time_dif; + + last_stload_call_time = cur_time; + last_proc_time = cur_proc_time; +} + +void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload) +{ + procst_in.seek(0, SEEK_SET); String field_name, field_val; uint64_t unused; - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readIntTextAndSkipWhitespaceIfAny(data.user_time, in); - readIntTextAndSkipWhitespaceIfAny(data.nice_time, in); - readIntTextAndSkipWhitespaceIfAny(data.system_time, in); - readIntTextAndSkipWhitespaceIfAny(data.idle_time, in); - readIntTextAndSkipWhitespaceIfAny(data.iowait_time, in); + readIntTextAndSkipWhitespaceIfAny(proc_time.user, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.nice, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.system, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.idle, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.iowait, procst_in); + proc_time.user *= USER_HZ; + proc_time.nice *= USER_HZ; + proc_time.system *= USER_HZ; + proc_time.idle *= USER_HZ; + proc_time.iowait *= USER_HZ; - readIntTextAndSkipWhitespaceIfAny(unused, in); - readIntTextAndSkipWhitespaceIfAny(unused, in); + readIntTextAndSkipWhitespaceIfAny(unused, procst_in); + readIntTextAndSkipWhitespaceIfAny(unused, procst_in); - readIntTextAndSkipWhitespaceIfAny(data.steal_time, in); - readIntTextAndSkipWhitespaceIfAny(data.guest_time, in); - readIntTextAndSkipWhitespaceIfAny(data.nice_time, in); + readIntTextAndSkipWhitespaceIfAny(proc_time.steal, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.guest, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.guest_nice, procst_in); + proc_time.steal *= USER_HZ; + proc_time.guest *= USER_HZ; + proc_time.guest_nice *= USER_HZ; do { - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, procst_in); } while (field_name != String("processes")); - data.processes = static_cast(std::stoul(field_val)); + stload.processes = static_cast(std::stoul(field_val)); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); - readIntTextAndSkipWhitespaceIfAny(data.procs_running, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); + readIntTextAndSkipWhitespaceIfAny(stload.procs_running, procst_in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); - readIntTextAndSkipWhitespaceIfAny(data.procs_blocked, in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); + readIntTextAndSkipWhitespaceIfAny(stload.procs_blocked, procst_in); } -void ProcessorStatisticsOS::readCpuinfo(Data & data) const -{ - MMappedFileDescriptor mapped_cpuinfo(cpuinfo_fd, 0); - ReadBufferFromMemory in(mapped_cpuinfo.getData(), - mapped_cpuinfo.getLength()); +void ProcessorStatisticsOS::readFreq(ProcFreq & freq) +{ + cpuinfo_in.seek(0, SEEK_SET); String field_name, field_val; char unused; + int cpu_count = 0; - do + do { + do + { + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, cpuinfo_in); + } while (!cpuinfo_in.eof() && field_name != String("cpu MHz")); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, in); - readCharAndSkipWhitespaceIfAny(unused, in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, in); - } while (field_name != String("cpu MHz")); - - data.freq = stof(field_val); + if (cpuinfo_in.eof()) + break; + + readCharAndSkipWhitespaceIfAny(unused, cpuinfo_in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, cpuinfo_in); + + cpu_count++; + + float cur_cpu_freq = stof(field_val); + + freq.avg += cur_cpu_freq; + freq.max = (cpu_count == 1 ? cur_cpu_freq : + std::max(freq.max, cur_cpu_freq)); + freq.min = (cpu_count == 1 ? cur_cpu_freq : + std::min(freq.min, cur_cpu_freq)); + } while (true); + + freq.avg /= static_cast(cpu_count); } template diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 1ae9f6ba760..7bc77496f4a 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -6,7 +6,7 @@ #include -#include +#include namespace DB { @@ -19,42 +19,59 @@ namespace DB class ProcessorStatisticsOS { public: - struct Data - { + struct ProcLoadavg { float avg1; float avg5; float avg15; + }; - /** The amount of time, measured in units of USER_HZ - * (1/100ths of a second on most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value) - */ - uint64_t user_time; - uint64_t nice_time; - uint64_t system_time; - uint64_t idle_time; - uint64_t iowait_time; - uint64_t steal_time; - uint64_t guest_time; - uint64_t guest_nice_time; + struct ProcStLoad { + float user_time; + float nice_time; + float system_time; + float idle_time; + float iowait_time; + float steal_time; + float guest_time; + float guest_nice_time; uint32_t processes; uint32_t procs_running; uint32_t procs_blocked; + }; - float freq; + struct ProcFreq { + float max; + float min; + float avg; + }; + + struct Data + { + ProcLoadavg loadavg; + ProcStLoad stload; + ProcFreq freq; }; ProcessorStatisticsOS(); ~ProcessorStatisticsOS(); - Data get() const; + Data get(); private: - static int openWithCheck(const String & filename, int flags); - - static void checkFDAfterOpen(int fd, const String & filename); - - static void closeFD(int fd, const String & filename); + struct ProcTime { + /** The amount of time, measured in units of USER_HZ + * (1/100ths of a second on most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value) + */ + uint64_t user; + uint64_t nice; + uint64_t system; + uint64_t idle; + uint64_t iowait; + uint64_t steal; + uint64_t guest; + uint64_t guest_nice; + }; template static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); @@ -65,14 +82,19 @@ private: static void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf); - void readLoadavg(Data & data) const; - void readProcst(Data & data) const; - void readCpuinfo(Data & data) const; + void readLoadavg(ProcLoadavg & loadavg); + void calcStLoad(ProcStLoad & stload); + void readFreq(ProcFreq & freq); + + void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload); private: - int loadavg_fd; - int procst_fd; - int cpuinfo_fd; + ReadBufferFromFile loadavg_in; + ReadBufferFromFile procst_in; + ReadBufferFromFile cpuinfo_in; + + std::time_t last_stload_call_time; + ProcTime last_proc_time; }; } From 69ccdb3aa97d038a3857ad0d9773f61f6b2b59a3 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Thu, 6 May 2021 11:14:51 +0000 Subject: [PATCH 101/931] Changed comment for ProcTime structure --- src/Common/ProcessorStatisticsOS.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 7bc77496f4a..51bb8c3a157 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -60,9 +60,7 @@ public: private: struct ProcTime { - /** The amount of time, measured in units of USER_HZ - * (1/100ths of a second on most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value) - */ + // The amount of time, measured in seconds uint64_t user; uint64_t nice; uint64_t system; From e4f2f36c1dfd019c3eaf67720e1aaf6072fc6e15 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Thu, 6 May 2021 11:35:11 +0000 Subject: [PATCH 102/931] Changed "*= USER_HZ" to "/= USER_HZ" --- src/Common/ProcessorStatisticsOS.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 252b6b776e9..e05cb589f95 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -106,11 +106,11 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS readIntTextAndSkipWhitespaceIfAny(proc_time.system, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.idle, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.iowait, procst_in); - proc_time.user *= USER_HZ; - proc_time.nice *= USER_HZ; - proc_time.system *= USER_HZ; - proc_time.idle *= USER_HZ; - proc_time.iowait *= USER_HZ; + proc_time.user /= USER_HZ; + proc_time.nice /= USER_HZ; + proc_time.system /= USER_HZ; + proc_time.idle /= USER_HZ; + proc_time.iowait /= USER_HZ; readIntTextAndSkipWhitespaceIfAny(unused, procst_in); readIntTextAndSkipWhitespaceIfAny(unused, procst_in); @@ -118,9 +118,9 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS readIntTextAndSkipWhitespaceIfAny(proc_time.steal, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.guest, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.guest_nice, procst_in); - proc_time.steal *= USER_HZ; - proc_time.guest *= USER_HZ; - proc_time.guest_nice *= USER_HZ; + proc_time.steal /= USER_HZ; + proc_time.guest /= USER_HZ; + proc_time.guest_nice /= USER_HZ; do { From a163eeb12e7cd0690b1ca2cb2ae39138e4440cab Mon Sep 17 00:00:00 2001 From: elevankoff Date: Thu, 6 May 2021 11:55:14 +0000 Subject: [PATCH 103/931] Delete whitespace --- src/Common/ProcessorStatisticsOS.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index e05cb589f95..9046db431e7 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -7,7 +7,6 @@ #include "ProcessorStatisticsOS.h" - #include #include From cca53e59f54ee17751d67dbb6cbb20a4402d6169 Mon Sep 17 00:00:00 2001 From: hexiaoting Date: Fri, 7 May 2021 17:17:43 +0800 Subject: [PATCH 104/931] Fix bug --- src/Interpreters/OpenTelemetrySpanLog.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/OpenTelemetrySpanLog.cpp b/src/Interpreters/OpenTelemetrySpanLog.cpp index e3688f46ac8..e19e7b8ffaa 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.cpp +++ b/src/Interpreters/OpenTelemetrySpanLog.cpp @@ -49,7 +49,6 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(start_time_us); columns[i++]->insert(finish_time_us); columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000).toUnderType()); - columns[i++]->insert(attribute_names); // The user might add some ints values, and we will have Int Field, and the // insert will fail because the column requires Strings. Convert the fields // here, because it's hard to remember to convert them in all other places. From 505b0516778cb62cc1ffec0c5bf1948932a43f2b Mon Sep 17 00:00:00 2001 From: elevankoff Date: Fri, 7 May 2021 10:16:32 +0000 Subject: [PATCH 105/931] Fixed some bugs --- src/Common/ProcessorStatisticsOS.cpp | 19 ++++++++++++++----- src/Common/ProcessorStatisticsOS.h | 2 ++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 9046db431e7..7b341cbea44 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -7,6 +7,8 @@ #include "ProcessorStatisticsOS.h" +#include "Poco/String.h" + #include #include @@ -31,7 +33,7 @@ static constexpr auto loadavg_filename = "/proc/loadavg"; static constexpr auto procst_filename = "/proc/stat"; static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; -static const long USER_HZ = sysconf(_SC_CLK_TCK); +static const uint64_t USER_HZ = static_cast(sysconf(_SC_CLK_TCK)); ProcessorStatisticsOS::ProcessorStatisticsOS() : loadavg_in(loadavg_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) @@ -97,7 +99,7 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS String field_name, field_val; uint64_t unused; - + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.user, procst_in); @@ -124,7 +126,8 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS do { readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, procst_in); + readString(field_val, procst_in); + skipWhitespaceIfAny(procst_in); } while (field_name != String("processes")); stload.processes = static_cast(std::stoul(field_val)); @@ -148,7 +151,7 @@ void ProcessorStatisticsOS::readFreq(ProcFreq & freq) { do { - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, cpuinfo_in); + readStringAndSkipWhitespaceIfAny(field_name, cpuinfo_in); } while (!cpuinfo_in.eof() && field_name != String("cpu MHz")); if (cpuinfo_in.eof()) @@ -172,12 +175,18 @@ void ProcessorStatisticsOS::readFreq(ProcFreq & freq) } template -void ProcessorStatisticsOS::readIntTextAndSkipWhitespaceIfAny(T& x, ReadBuffer& buf) +void ProcessorStatisticsOS::readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) { readIntText(x, buf); skipWhitespaceIfAny(buf); } +void ProcessorStatisticsOS::readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readString(s, buf); + skipWhitespaceIfAny(buf); +} + void ProcessorStatisticsOS::readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) { readStringUntilWhitespace(s, buf); diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 51bb8c3a157..cd0d15770ed 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -75,6 +75,8 @@ private: static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); static void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf); + + static void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer& buf); static void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf); From 44fb1ebc37c106f6abc8c9b8b47cd1073c2f2e16 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Fri, 7 May 2021 12:39:20 +0000 Subject: [PATCH 106/931] Small fix --- src/Common/ProcessorStatisticsOS.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 7b341cbea44..d3124ebddd3 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -126,8 +126,7 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS do { readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readString(field_val, procst_in); - skipWhitespaceIfAny(procst_in); + readStringAndSkipWhitespaceIfAny(field_val, procst_in); } while (field_name != String("processes")); stload.processes = static_cast(std::stoul(field_val)); From 69efc15f2ac8ab798cca3eeb8e77faae6eecc50a Mon Sep 17 00:00:00 2001 From: elevankoff Date: Fri, 7 May 2021 18:06:12 +0000 Subject: [PATCH 107/931] Add untested "MemoryInfo" class --- src/Common/MemoryInfoOS.cpp | 78 +++++++++++++++++++++++++++++++++++++ src/Common/MemoryInfoOS.h | 55 ++++++++++++++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 src/Common/MemoryInfoOS.cpp create mode 100644 src/Common/MemoryInfoOS.h diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp new file mode 100644 index 00000000000..02edccf579f --- /dev/null +++ b/src/Common/MemoryInfoOS.cpp @@ -0,0 +1,78 @@ +#if defined(OS_LINUX) + +#include +#include +#include + +#include "MemoryInfoOS.h" + +#include + +#include +#include + +namespace DB +{ + +static constexpr auto meminfo_filename = "/proc/meminfo"; + +MemoryInfoOS::MemoryInfoOS() + : meminfo_in(meminfo_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) +{} + +MemoryInfoOS::~MemoryInfoOS() {} + +MemoryInfoOS::Data MemoryInfoOS::get() +{ + meminfo_in.seek(0, SEEK_SET); + + MemoryInfoOS::Data data; + String field_name; + + assert(readField(data.total, String("MemTotal"))); + assert(readField(data.free, String("MemFree"))); + skipField(); + assert(readField(data.buffers, String("Buffers"))); + assert(readField(data.cached, String("Cached"))); + + data.free_and_cached = data.free + data.cached; + + assert(readField(data.swap_cached, String("SwapCached"))); + + while (!readField(data.swap_total, String("SwapTotal"))) {} + + assert(readField(data.swap_free, String("SwapFree"))); + + return data; +} + +bool MemoryInfoOS::readField(unsigned long & field_val, const String & field_name_target) +{ + String field_name; + + readStringAndSkipWhitespaceIfAny(field_name, meminfo_in); + readIntTextAndSkipWhitespaceIfAny(field_val, meminfo_in); + return (field_name == (field_name_target + String(":"))); +} + +void MemoryInfoOS::skipField() +{ + skipToNextLineOrEOF(meminfo_in); +} + +void MemoryInfoOS::readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readString(s, buf); + skipWhitespaceIfAny(buf); +} + +template +void MemoryInfoOS::readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); +} + +} + +#endif diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h new file mode 100644 index 00000000000..d6d07850ead --- /dev/null +++ b/src/Common/MemoryInfoOS.h @@ -0,0 +1,55 @@ +#pragma once +#if defined(OS_LINUX) + +#include +#include + +#include + +#include + +namespace DB +{ + +/** Opens file /proc/meminfo. Keeps it open and reads statistics about memory usage. + * This is Linux specific. + * See: man procfs + */ + +class MemoryInfoOS +{ +public: + // In kB + struct Data { + unsigned long total; + unsigned long free; + unsigned long buffers; + unsigned long cached; + unsigned long free_and_cached; + + unsigned long swap_total; + unsigned long swap_free; + unsigned long swap_cached; + }; + + MemoryInfoOS(); + ~MemoryInfoOS(); + + Data get(); + +private: + ReadBufferFromFile meminfo_in; + + bool readField(unsigned long & field_val, const String & field_name_target); + + void skipField(); + + static void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf); + + template + static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); +}; + +} + +#endif From 6ab7dd9f29f47eaaef944f77ed12839869243f17 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Fri, 7 May 2021 19:36:19 +0000 Subject: [PATCH 108/931] Change unsigned long -> uint64_t; delete private static functions from .h; another small fixes --- src/Common/MemoryInfoOS.cpp | 34 +++++++++++++++++----------------- src/Common/MemoryInfoOS.h | 23 +++++++++-------------- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index 02edccf579f..d2e3929b264 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -1,7 +1,5 @@ #if defined(OS_LINUX) -#include -#include #include #include "MemoryInfoOS.h" @@ -15,9 +13,24 @@ namespace DB { static constexpr auto meminfo_filename = "/proc/meminfo"; - + +static constexpr int READ_BUFFER_BUF_SIZE = (64 << 10); + +void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readString(s, buf); + skipWhitespaceIfAny(buf); +} + +template +void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); +} + MemoryInfoOS::MemoryInfoOS() - : meminfo_in(meminfo_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) + : meminfo_in(meminfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) {} MemoryInfoOS::~MemoryInfoOS() {} @@ -60,19 +73,6 @@ void MemoryInfoOS::skipField() skipToNextLineOrEOF(meminfo_in); } -void MemoryInfoOS::readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readString(s, buf); - skipWhitespaceIfAny(buf); -} - -template -void MemoryInfoOS::readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} - } #endif diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index d6d07850ead..8c98a11692d 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -21,15 +21,15 @@ class MemoryInfoOS public: // In kB struct Data { - unsigned long total; - unsigned long free; - unsigned long buffers; - unsigned long cached; - unsigned long free_and_cached; + uint64_t total; + uint64_t free; + uint64_t buffers; + uint64_t cached; + uint64_t free_and_cached; - unsigned long swap_total; - unsigned long swap_free; - unsigned long swap_cached; + uint64_t swap_total; + uint64_t swap_free; + uint64_t swap_cached; }; MemoryInfoOS(); @@ -40,14 +40,9 @@ public: private: ReadBufferFromFile meminfo_in; - bool readField(unsigned long & field_val, const String & field_name_target); + bool readField(uint64_t & field_val, const String & field_name_target); void skipField(); - - static void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf); - - template - static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); }; } From fd5827f735767bd7c05e979a76a6dfb34cd4c527 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Fri, 7 May 2021 19:38:22 +0000 Subject: [PATCH 109/931] Change data type of READ_BUFFER_BUF_SIZE --- src/Common/MemoryInfoOS.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index d2e3929b264..b2cfd1609e3 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -14,7 +14,7 @@ namespace DB static constexpr auto meminfo_filename = "/proc/meminfo"; -static constexpr int READ_BUFFER_BUF_SIZE = (64 << 10); +static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) { From c5c9e95744e712fbdaa452afee329bfe2da90f3c Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 8 May 2021 06:37:06 +0000 Subject: [PATCH 110/931] Change size of beffer for ReadBuffer; delete private static functions from class --- src/Common/ProcessorStatisticsOS.cpp | 70 ++++++++++++++-------------- src/Common/ProcessorStatisticsOS.h | 11 ----- 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index d3124ebddd3..d7d308916b7 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -35,10 +35,43 @@ static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; static const uint64_t USER_HZ = static_cast(sysconf(_SC_CLK_TCK)); +static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); + +template +void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); +} + +void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readString(s, buf); + skipWhitespaceIfAny(buf); +} + +void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); +} + +void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) +{ + readChar(c, buf); + skipWhitespaceIfAny(buf); +} + +void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) +{ + readFloatText(f, buf); + skipWhitespaceIfAny(buf); +} + ProcessorStatisticsOS::ProcessorStatisticsOS() - : loadavg_in(loadavg_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) - , procst_in(procst_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) - , cpuinfo_in(cpuinfo_filename, DBMS_DEFAULT_BUFFER_SIZE, O_RDONLY | O_CLOEXEC) + : loadavg_in(loadavg_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) + , procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) + , cpuinfo_in(cpuinfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) { ProcStLoad unused; calcStLoad(unused); @@ -173,37 +206,6 @@ void ProcessorStatisticsOS::readFreq(ProcFreq & freq) freq.avg /= static_cast(cpu_count); } -template -void ProcessorStatisticsOS::readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} - -void ProcessorStatisticsOS::readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readString(s, buf); - skipWhitespaceIfAny(buf); -} - -void ProcessorStatisticsOS::readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} - -void ProcessorStatisticsOS::readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) -{ - readChar(c, buf); - skipWhitespaceIfAny(buf); -} - -void ProcessorStatisticsOS::readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) -{ - readFloatText(f, buf); - skipWhitespaceIfAny(buf); -} - } #endif diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index cd0d15770ed..123f9385113 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -71,17 +71,6 @@ private: uint64_t guest_nice; }; - template - static void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf); - - static void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf); - - static void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer& buf); - - static void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf); - - static void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf); - void readLoadavg(ProcLoadavg & loadavg); void calcStLoad(ProcStLoad & stload); void readFreq(ProcFreq & freq); From 6066d557a8819b0cf5a04dcaf62d4debaef81f79 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 8 May 2021 07:41:47 +0000 Subject: [PATCH 111/931] Made "get()" method order-independent and fixed bug of reading in the "readField" method --- src/Common/MemoryInfoOS.cpp | 47 ++++++++++++++++++++----------------- src/Common/MemoryInfoOS.h | 5 ++-- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index b2cfd1609e3..d8160561ee8 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -1,6 +1,8 @@ #if defined(OS_LINUX) #include +#include +#include #include "MemoryInfoOS.h" @@ -16,9 +18,9 @@ static constexpr auto meminfo_filename = "/proc/meminfo"; static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); -void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) { - readString(s, buf); + readStringUntilWhitespace(s, buf); skipWhitespaceIfAny(buf); } @@ -42,35 +44,36 @@ MemoryInfoOS::Data MemoryInfoOS::get() MemoryInfoOS::Data data; String field_name; - assert(readField(data.total, String("MemTotal"))); - assert(readField(data.free, String("MemFree"))); - skipField(); - assert(readField(data.buffers, String("Buffers"))); - assert(readField(data.cached, String("Cached"))); + std::unordered_map meminfo; + + while (!meminfo_in.eof()) + meminfo.insert(readField()); + + data.total = meminfo["MemTotal"]; + data.free = meminfo["MemFree"]; + data.buffers = meminfo["Buffers"]; + data.cached = meminfo["Cached"]; + data.swap_total = meminfo["SwapTotal"]; + data.swap_cached = meminfo["SwapCached"]; + data.swap_free = meminfo["SwapFree"]; data.free_and_cached = data.free + data.cached; - assert(readField(data.swap_cached, String("SwapCached"))); - - while (!readField(data.swap_total, String("SwapTotal"))) {} - - assert(readField(data.swap_free, String("SwapFree"))); - return data; } -bool MemoryInfoOS::readField(unsigned long & field_val, const String & field_name_target) +std::pair MemoryInfoOS::readField() { - String field_name; + String key; + uint64_t val; - readStringAndSkipWhitespaceIfAny(field_name, meminfo_in); - readIntTextAndSkipWhitespaceIfAny(field_val, meminfo_in); - return (field_name == (field_name_target + String(":"))); -} + readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); + readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); -void MemoryInfoOS::skipField() -{ - skipToNextLineOrEOF(meminfo_in); + // Delete the read ":" from the end + key.pop_back(); + + return std::make_pair(key, val); } } diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index 8c98a11692d..e1bf1dcfde4 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -3,6 +3,7 @@ #include #include +#include #include @@ -40,9 +41,7 @@ public: private: ReadBufferFromFile meminfo_in; - bool readField(uint64_t & field_val, const String & field_name_target); - - void skipField(); + std::pair readField(); }; } From dc7b84a3cc55ad73c8a8c9e6a397bf2e556555b6 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 8 May 2021 13:04:08 +0000 Subject: [PATCH 112/931] Fix bug --- src/Common/MemoryInfoOS.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index d8160561ee8..5eb2501e322 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -69,6 +69,7 @@ std::pair MemoryInfoOS::readField() readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); + skipToNextLineOrEOF(meminfo_in); // Delete the read ":" from the end key.pop_back(); From 7bc0d846b7a6804e212c7485775a89679524f9bb Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 8 May 2021 20:38:10 +0000 Subject: [PATCH 113/931] Add "DiskStatisticsOS" class --- src/Common/DiskStatisticsOS.cpp | 76 +++++++++++++++++++++++++++++++++ src/Common/DiskStatisticsOS.h | 38 +++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 src/Common/DiskStatisticsOS.cpp create mode 100644 src/Common/DiskStatisticsOS.h diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp new file mode 100644 index 00000000000..40ba15ac6b8 --- /dev/null +++ b/src/Common/DiskStatisticsOS.cpp @@ -0,0 +1,76 @@ +#if defined(OS_LINUX) + +#include "DiskStatisticsOS.h" + +#include + +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_STATVFS; +} + +static constexpr auto mounts_filename = "/proc/mounts"; + +static constexpr std::size_t READ_BUFFER_BUF_SIZE = (64 << 10); + +void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); +} + +DiskStatisticsOS::DiskStatisticsOS() + : mounts_in(mounts_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) +{} + +DiskStatisticsOS::~DiskStatisticsOS() {} + +DiskStatisticsOS::Data DiskStatisticsOS::get() +{ + mounts_in.seek(0, SEEK_SET); + + DiskStatisticsOS::Data data = {0, 0}; + + while (!mounts_in.eof()) + { + String filesystem = readNextFilesystem(); + + struct statvfs stat; + + if (statvfs(filesystem.c_str(), &stat)) + throwFromErrno("Cannot statvfs", ErrorCodes::CANNOT_STATVFS); + + uint64_t total_blocks = static_cast(stat.f_blocks); + uint64_t free_blocks = static_cast(stat.f_bfree); + uint64_t used_blocks = total_blocks - free_blocks; + uint64_t block_size = static_cast(stat.f_bsize); + + data.total += total_blocks * block_size; + data.used += used_blocks * block_size; + } + + return data; +} + +String DiskStatisticsOS::readNextFilesystem() +{ + String filesystem, unused; + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(unused, mounts_in); + readStringUntilWhitespace(filesystem, mounts_in); + skipToNextLineOrEOF(mounts_in); + + return filesystem; +} + +} + +#endif diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h new file mode 100644 index 00000000000..a1c260f24c3 --- /dev/null +++ b/src/Common/DiskStatisticsOS.h @@ -0,0 +1,38 @@ +#if defined (OS_LINUX) + +#include + +#include + +#include + +namespace DB +{ + +/** Opens file /proc/mounts. Keeps it open, reads all mounted filesytems and + * calculates disk usage. + */ +class DiskStatisticsOS +{ +public: + // In bytes + struct Data { + uint64_t total; + uint64_t used; + }; + + DiskStatisticsOS(); + ~DiskStatisticsOS(); + + Data get(); + +private: + String readNextFilesystem(); + +private: + ReadBufferFromFile mounts_in; +}; + +} + +#endif From 1b827ac4249c6f891037a48e62fbf943e24ccc84 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 8 May 2021 14:55:53 +0000 Subject: [PATCH 114/931] Correct merge, finish refactoring --- src/Core/PostgreSQL/Connection.cpp | 35 ++++ src/Core/PostgreSQL/Connection.h | 30 ++++ src/Core/PostgreSQL/ConnectionHolder.h | 37 +++++ src/Core/PostgreSQL/PoolWithFailover.cpp | 138 ++++++++++++++++ src/Core/PostgreSQL/PoolWithFailover.h | 65 ++++++++ src/Core/PostgreSQL/Utils.cpp | 19 +++ src/Core/PostgreSQL/Utils.h | 37 +++++ .../MaterializePostgreSQLConsumer.cpp | 8 +- .../MaterializePostgreSQLConsumer.h | 4 +- .../PostgreSQLReplicationHandler.cpp | 156 +++++++++--------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 3 +- .../PostgreSQL/StorageMaterializePostgreSQL.h | 2 +- .../TableFunctionPostgreSQL.cpp | 4 +- src/TableFunctions/TableFunctionPostgreSQL.h | 2 +- .../__init__.py | 0 .../configs/config.xml | 30 ++++ .../configs/users.xml | 23 +++ 17 files changed, 505 insertions(+), 88 deletions(-) create mode 100644 src/Core/PostgreSQL/Connection.cpp create mode 100644 src/Core/PostgreSQL/Connection.h create mode 100644 src/Core/PostgreSQL/ConnectionHolder.h create mode 100644 src/Core/PostgreSQL/PoolWithFailover.cpp create mode 100644 src/Core/PostgreSQL/PoolWithFailover.h create mode 100644 src/Core/PostgreSQL/Utils.cpp create mode 100644 src/Core/PostgreSQL/Utils.h create mode 100644 tests/integration/test_dictionaries_update_field/__init__.py create mode 100644 tests/integration/test_dictionaries_update_field/configs/config.xml create mode 100644 tests/integration/test_dictionaries_update_field/configs/users.xml diff --git a/src/Core/PostgreSQL/Connection.cpp b/src/Core/PostgreSQL/Connection.cpp new file mode 100644 index 00000000000..ff6197d1390 --- /dev/null +++ b/src/Core/PostgreSQL/Connection.cpp @@ -0,0 +1,35 @@ +#include "Connection.h" +#include + +namespace postgres +{ + +Connection::Connection(const ConnectionInfo & connection_info_, bool replication_) + : connection_info(connection_info_), replication(replication_) +{ + if (replication) + { + connection_info = std::make_pair( + fmt::format("{} replication=database", connection_info.first), connection_info.second); + } +} + +pqxx::connection & Connection::getRef() +{ + connect(); + assert(connection != nullptr); + return *connection; +} + +void Connection::connect() +{ + if (!connection || !connection->is_open()) + { + /// Always throws if there is no connection. + connection = std::make_unique(connection_info.first); + if (replication) + connection->set_variable("default_transaction_isolation", "'repeatable read'"); + LOG_DEBUG(&Poco::Logger::get("PostgreSQLConnection"), "New connection to {}", connection_info.second); + } +} +} diff --git a/src/Core/PostgreSQL/Connection.h b/src/Core/PostgreSQL/Connection.h new file mode 100644 index 00000000000..1e9334eace5 --- /dev/null +++ b/src/Core/PostgreSQL/Connection.h @@ -0,0 +1,30 @@ +#pragma once + +#include // Y_IGNORE +#include + + +namespace postgres +{ +using ConnectionInfo = std::pair; +using ConnectionPtr = std::unique_ptr; + +class Connection +{ +public: + Connection(const ConnectionInfo & connection_info_, bool replication_ = false); + + Connection(const Connection & other) = delete; + + pqxx::connection & getRef(); + + void connect(); + + const ConnectionInfo & getConnectionInfo() { return connection_info; } + +private: + ConnectionPtr connection; + ConnectionInfo connection_info; + bool replication; +}; +} diff --git a/src/Core/PostgreSQL/ConnectionHolder.h b/src/Core/PostgreSQL/ConnectionHolder.h new file mode 100644 index 00000000000..98ab7df182d --- /dev/null +++ b/src/Core/PostgreSQL/ConnectionHolder.h @@ -0,0 +1,37 @@ +#pragma once + +#include // Y_IGNORE +#include +#include + + +namespace postgres +{ + +using ConnectionPtr = std::unique_ptr; +using Pool = BorrowedObjectPool; +using PoolPtr = std::shared_ptr; + +class ConnectionHolder +{ + +public: + ConnectionHolder(PoolPtr pool_, ConnectionPtr connection_) : pool(pool_), connection(std::move(connection_)) {} + + ConnectionHolder(const ConnectionHolder & other) = delete; + + ~ConnectionHolder() { pool->returnObject(std::move(connection)); } + + pqxx::connection & get() + { + assert(connection != nullptr); + return *connection; + } + +private: + PoolPtr pool; + ConnectionPtr connection; +}; + +using ConnectionHolderPtr = std::unique_ptr; +} diff --git a/src/Core/PostgreSQL/PoolWithFailover.cpp b/src/Core/PostgreSQL/PoolWithFailover.cpp new file mode 100644 index 00000000000..6bf756b8a12 --- /dev/null +++ b/src/Core/PostgreSQL/PoolWithFailover.cpp @@ -0,0 +1,138 @@ +#include "PoolWithFailover.h" +#include "Utils.h" +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int POSTGRESQL_CONNECTION_FAILURE; +} +} + +namespace postgres +{ + +PoolWithFailover::PoolWithFailover( + const Poco::Util::AbstractConfiguration & config, const String & config_prefix, + size_t pool_size, size_t pool_wait_timeout_, size_t max_tries_) + : pool_wait_timeout(pool_wait_timeout_) + , max_tries(max_tries_) +{ + LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + pool_size, pool_wait_timeout, max_tries_); + + auto db = config.getString(config_prefix + ".db", ""); + auto host = config.getString(config_prefix + ".host", ""); + auto port = config.getUInt(config_prefix + ".port", 0); + auto user = config.getString(config_prefix + ".user", ""); + auto password = config.getString(config_prefix + ".password", ""); + + if (config.has(config_prefix + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys config_keys; + config.keys(config_prefix, config_keys); + + for (const auto & config_key : config_keys) + { + if (config_key.starts_with("replica")) + { + std::string replica_name = config_prefix + "." + config_key; + size_t priority = config.getInt(replica_name + ".priority", 0); + + auto replica_host = config.getString(replica_name + ".host", host); + auto replica_port = config.getUInt(replica_name + ".port", port); + auto replica_user = config.getString(replica_name + ".user", user); + auto replica_password = config.getString(replica_name + ".password", password); + + auto connection_string = formatConnectionString(db, replica_host, replica_port, replica_user, replica_password).first; + replicas_with_priority[priority].emplace_back(connection_string, pool_size); + } + } + } + else + { + auto connection_string = formatConnectionString(db, host, port, user, password).first; + replicas_with_priority[0].emplace_back(connection_string, pool_size); + } +} + +PoolWithFailover::PoolWithFailover( + const std::string & database, + const RemoteDescription & addresses, + const std::string & user, const std::string & password, + size_t pool_size, size_t pool_wait_timeout_, size_t max_tries_) + : pool_wait_timeout(pool_wait_timeout_) + , max_tries(max_tries_) +{ + LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + pool_size, pool_wait_timeout, max_tries_); + + /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. + for (const auto & [host, port] : addresses) + { + LOG_DEBUG(&Poco::Logger::get("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port); + auto connection_string = formatConnectionString(database, host, port, user, password).first; + replicas_with_priority[0].emplace_back(connection_string, pool_size); + } +} + +ConnectionHolderPtr PoolWithFailover::get() +{ + std::lock_guard lock(mutex); + + for (size_t try_idx = 0; try_idx < max_tries; ++try_idx) + { + for (auto & priority : replicas_with_priority) + { + auto & replicas = priority.second; + for (size_t i = 0; i < replicas.size(); ++i) + { + auto & replica = replicas[i]; + + ConnectionPtr connection; + auto connection_available = replica.pool->tryBorrowObject(connection, []() { return nullptr; }, pool_wait_timeout); + + if (!connection_available) + { + LOG_WARNING(log, "Unable to fetch connection within the timeout"); + continue; + } + + try + { + /// Create a new connection or reopen an old connection if it became invalid. + if (!connection || !connection->is_open()) + { + connection = std::make_unique(replica.connection_string); + LOG_DEBUG(log, "New connection to {}:{}", connection->hostname(), connection->port()); + } + } + catch (const pqxx::broken_connection & pqxx_error) + { + LOG_ERROR(log, "Connection error: {}", pqxx_error.what()); + + replica.pool->returnObject(std::move(connection)); + continue; + } + catch (...) + { + replica.pool->returnObject(std::move(connection)); + throw; + } + + auto connection_holder = std::make_unique(replica.pool, std::move(connection)); + + /// Move all traversed replicas to the end. + if (replicas.size() > 1) + std::rotate(replicas.begin(), replicas.begin() + i + 1, replicas.end()); + + return connection_holder; + } + } + } + + throw DB::Exception(DB::ErrorCodes::POSTGRESQL_CONNECTION_FAILURE, "Unable to connect to any of the replicas"); +} +} diff --git a/src/Core/PostgreSQL/PoolWithFailover.h b/src/Core/PostgreSQL/PoolWithFailover.h new file mode 100644 index 00000000000..f4ae2c6cd1b --- /dev/null +++ b/src/Core/PostgreSQL/PoolWithFailover.h @@ -0,0 +1,65 @@ +#pragma once + +#include "ConnectionHolder.h" +#include +#include +#include + + +namespace postgres +{ + +class PoolWithFailover +{ + +using RemoteDescription = std::vector>; + +public: + static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16; + static constexpr inline auto POSTGRESQL_POOL_WAIT_TIMEOUT = 5000; + static constexpr inline auto POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES = 5; + + PoolWithFailover( + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix, + size_t pool_size = POSTGRESQL_POOL_DEFAULT_SIZE, + size_t pool_wait_timeout = POSTGRESQL_POOL_WAIT_TIMEOUT, + size_t max_tries_ = POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + + PoolWithFailover( + const std::string & database, + const RemoteDescription & addresses, + const std::string & user, + const std::string & password, + size_t pool_size = POSTGRESQL_POOL_DEFAULT_SIZE, + size_t pool_wait_timeout = POSTGRESQL_POOL_WAIT_TIMEOUT, + size_t max_tries_ = POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES); + + PoolWithFailover(const PoolWithFailover & other) = delete; + + ConnectionHolderPtr get(); + +private: + struct PoolHolder + { + String connection_string; + PoolPtr pool; + + PoolHolder(const String & connection_string_, size_t pool_size) + : connection_string(connection_string_), pool(std::make_shared(pool_size)) {} + }; + + /// Highest priority is 0, the bigger the number in map, the less the priority + using Replicas = std::vector; + using ReplicasWithPriority = std::map; + + ReplicasWithPriority replicas_with_priority; + size_t pool_wait_timeout; + size_t max_tries; + std::mutex mutex; + Poco::Logger * log = &Poco::Logger::get("PostgreSQLConnectionPool"); +}; + +using PoolWithFailoverPtr = std::shared_ptr; + +} diff --git a/src/Core/PostgreSQL/Utils.cpp b/src/Core/PostgreSQL/Utils.cpp new file mode 100644 index 00000000000..98e76da99d2 --- /dev/null +++ b/src/Core/PostgreSQL/Utils.cpp @@ -0,0 +1,19 @@ +#include "Utils.h" +#include + +namespace postgres +{ + +ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password) +{ + DB::WriteBufferFromOwnString out; + out << "dbname=" << DB::quote << dbname + << " host=" << DB::quote << host + << " port=" << port + << " user=" << DB::quote << user + << " password=" << DB::quote << password + << " connect_timeout=10"; + return std::make_pair(out.str(), host + ':' + DB::toString(port)); +} + +} diff --git a/src/Core/PostgreSQL/Utils.h b/src/Core/PostgreSQL/Utils.h new file mode 100644 index 00000000000..ccb133112d9 --- /dev/null +++ b/src/Core/PostgreSQL/Utils.h @@ -0,0 +1,37 @@ +#pragma once + +#include // Y_IGNORE +#include +#include "Connection.h" + +namespace pqxx +{ + using ReadTransaction = pqxx::read_transaction; + using ReplicationTransaction = pqxx::transaction; +} + + +namespace postgres +{ + +ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password); + +Connection createReplicationConnection(const ConnectionInfo & connection_info); + +template +class Transaction +{ +public: + Transaction(pqxx::connection & connection) : transaction(connection) {} + + ~Transaction() { transaction.commit(); } + + T & getRef() { return transaction; } + + void exec(const String & query) { transaction.exec(query); } + +private: + T transaction; +}; + +} diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 7eabce7c270..79a98b7b070 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -24,7 +24,7 @@ namespace ErrorCodes MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( ContextPtr context_, - postgres::Connection && connection_, + std::shared_ptr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, @@ -37,7 +37,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) , metadata(metadata_path) - , connection(std::move(connection_)) + , connection(connection_) , current_lsn(start_lsn) , max_block_size(max_block_size_) , allow_automatic_update(allow_automatic_update_) @@ -88,7 +88,7 @@ void MaterializePostgreSQLConsumer::readMetadata() if (!metadata.lsn().empty()) { - auto tx = std::make_shared(connection.getRef()); + auto tx = std::make_shared(connection->getRef()); final_lsn = metadata.lsn(); final_lsn = advanceLSN(tx); tx->commit(); @@ -600,7 +600,7 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() try { - tx = std::make_shared(connection.getRef()); + tx = std::make_shared(connection->getRef()); /// Read up to max_block_size rows changes (upto_n_changes parameter). It might return larger number as the limit /// is checked only after each transaction block. diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index ffe80c93ca6..afb39519715 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -28,7 +28,7 @@ public: MaterializePostgreSQLConsumer( ContextPtr context_, - postgres::Connection && connection_, + std::shared_ptr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, const std::string & metadata_path, @@ -106,7 +106,7 @@ private: const std::string replication_slot_name, publication_name; MaterializePostgreSQLMetadata metadata; - postgres::Connection connection; + std::shared_ptr connection; std::string current_lsn, final_lsn; const size_t max_block_size; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index dc38d18759e..30d3f1e6e97 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -9,7 +9,7 @@ #include #include #include -#include +#include namespace DB @@ -42,7 +42,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , allow_automatic_update(allow_automatic_update_) , is_materialize_postgresql_database(is_materialize_postgresql_database_) , tables_list(tables_list_) - , connection(connection_info_) + , connection(std::make_shared(connection_info_)) { replication_slot = fmt::format("{}_ch_replication_slot", current_database_name); publication_name = fmt::format("{}_ch_publication", current_database_name); @@ -68,8 +68,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() { try { - /// Will throw pqxx::broken_connection if no connection at the moment - connection.isValid(); + connection->connect(); /// Will throw pqxx::broken_connection if no connection at the moment startSynchronization(false); } catch (const pqxx::broken_connection & pqxx_error) @@ -95,7 +94,7 @@ void PostgreSQLReplicationHandler::shutdown() void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { { - postgres::Transaction tx(connection.getRef()); + postgres::Transaction tx(connection->getRef()); createPublicationIfNeeded(tx.getRef()); } @@ -121,6 +120,8 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); + /// Throw in case of single MaterializePostgreSQL storage, becuase initial setup is done immediately + /// (unlike database engine where it is done in a separate thread). if (throw_on_error) throw; } @@ -134,16 +135,17 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { initial_sync(); } + /// Replication slot depends on publication, so if replication slot exists and new + /// publication was just created - drop that replication slot and start from scratch. else if (new_publication_created) { - /// Replication slot depends on publication, so if replication slot exists and new - /// publication was just created - drop that replication slot and start from scratch. dropReplicationSlot(tx.getRef()); initial_sync(); } + /// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's + /// and pass them to replication consumer. else { - /// Synchronization and initial load already took place. LOG_TRACE(log, "Loading {} tables...", materialized_storages.size()); for (const auto & [table_name, storage] : materialized_storages) { @@ -179,9 +181,12 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } } + /// Pass current connection to consumer. It is not std::moved implicitly, but a shared_ptr is passed. + /// Consumer and replication handler are always executed one after another (not concurrently) and share the same connection. + /// Handler uses it only for loadFromSnapshot and shutdown methods. consumer = std::make_shared( context, - std::move(connection), + connection, replication_slot, publication_name, metadata_path, @@ -197,10 +202,10 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } -StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(std::string & snapshot_name, const String & table_name, +StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(String & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage) { - auto tx = std::make_shared(connection.getRef()); + auto tx = std::make_shared(connection->getRef()); std::string query_str = fmt::format("SET TRANSACTION SNAPSHOT '{}'", snapshot_name); tx->exec(query_str); @@ -242,7 +247,16 @@ void PostgreSQLReplicationHandler::consumerFunc() bool schedule_now = consumer->consume(skipped_tables); if (!skipped_tables.empty()) - reloadFromSnapshot(skipped_tables); + { + try + { + reloadFromSnapshot(skipped_tables); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } if (stop_synchronization) return; @@ -270,6 +284,7 @@ bool PostgreSQLReplicationHandler::isPublicationExist(pqxx::work & tx) void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bool create_without_check) { + /// For database engine a publication can be created earlier than in startReplication(). if (new_publication_created) return; @@ -370,12 +385,10 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::shutdownFinal() { - if (Poco::File(metadata_path).exists()) - Poco::File(metadata_path).remove(); - - postgres::Connection connection_(connection_info); - postgres::Transaction tx(connection_.getRef()); + if (std::filesystem::exists(metadata_path)) + std::filesystem::remove(metadata_path); + postgres::Transaction tx(connection->getRef()); dropPublication(tx.getRef()); if (isReplicationSlotExist(tx.getRef(), replication_slot)) dropReplicationSlot(tx.getRef()); @@ -432,80 +445,69 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector tx(replication_connection.getRef()); + + std::string snapshot_name, start_lsn; + createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, true); + + for (const auto & [relation_id, table_name] : relation_data) { - postgres::Connection replication_connection(connection_info, /* replication */true); - postgres::Transaction tx(replication_connection.getRef()); + auto storage = DatabaseCatalog::instance().getTable(StorageID(current_database_name, table_name), context); + auto * materialized_storage = storage->as (); - std::string snapshot_name, start_lsn; - createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, true); + auto temp_materialized_storage = materialized_storage->createTemporary(); - for (const auto & [relation_id, table_name] : relation_data) + /// This snapshot is valid up to the end of the transaction, which exported it. + StoragePtr temp_nested_storage = loadFromSnapshot(snapshot_name, table_name, temp_materialized_storage->as ()); + + auto table_id = materialized_storage->getNestedStorageID(); + auto temp_table_id = temp_nested_storage->getStorageID(); + + LOG_TRACE(log, "Starting background update of table {}.{} ({}) with table {}.{} ({})", + table_id.database_name, table_id.table_name, toString(table_id.uuid), + temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); + + auto ast_rename = std::make_shared(); + ASTRenameQuery::Element elem { - auto storage = DatabaseCatalog::instance().getTable( - StorageID(current_database_name, table_name), - context); - auto * materialized_storage = storage->as (); + ASTRenameQuery::Table{table_id.database_name, table_id.table_name}, + ASTRenameQuery::Table{temp_table_id.database_name, temp_table_id.table_name} + }; + ast_rename->elements.push_back(std::move(elem)); + ast_rename->exchange = true; - auto temp_materialized_storage = materialized_storage->createTemporary(); + auto nested_context = materialized_storage->getNestedTableContext(); - /// This snapshot is valid up to the end of the transaction, which exported it. - StoragePtr temp_nested_storage = loadFromSnapshot(snapshot_name, table_name, temp_materialized_storage->as ()); + try + { + auto materialized_table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + InterpreterRenameQuery(ast_rename, nested_context).execute(); - auto table_id = materialized_storage->getNestedStorageID(); - auto temp_table_id = temp_nested_storage->getStorageID(); - - LOG_TRACE(log, "Starting background update of table {}.{} ({}) with table {}.{} ({})", - table_id.database_name, table_id.table_name, toString(table_id.uuid), - temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); - - auto ast_rename = std::make_shared(); - ASTRenameQuery::Element elem { - ASTRenameQuery::Table{table_id.database_name, table_id.table_name}, - ASTRenameQuery::Table{temp_table_id.database_name, temp_table_id.table_name} - }; - ast_rename->elements.push_back(std::move(elem)); - ast_rename->exchange = true; + auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); + auto nested_table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + auto nested_table_id = nested_storage->getStorageID(); - auto nested_context = materialized_storage->getNestedTableContext(); + materialized_storage->setNestedStorageID(nested_table_id); + nested_storage = materialized_storage->prepare(); + LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); - try - { - auto materialized_table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); - InterpreterRenameQuery(ast_rename, nested_context).execute(); - - { - auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); - auto nested_table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); - auto nested_table_id = nested_storage->getStorageID(); - - materialized_storage->setNestedStorageID(nested_table_id); - nested_storage = materialized_storage->prepare(); - LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); - - /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. - consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); - } - - LOG_DEBUG(log, "Dropping table {}.{} ({})", temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, temp_table_id, true); - - dropReplicationSlot(tx.getRef(), /* temporary */true); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); + /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. + consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); } + + LOG_DEBUG(log, "Dropping table {}.{} ({})", temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, temp_table_id, true); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); } } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } + + dropReplicationSlot(tx.getRef(), /* temporary */true); } - - } #endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index ddea1d03763..c955b2fbe3a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -113,7 +113,8 @@ private: String replication_slot, publication_name; - postgres::Connection connection; + /// Shared between replication_consumer and replication_handler, but never accessed concurrently. + std::shared_ptr connection; /// Replication consumer. Manages decoding of replication stream and syncing into tables. std::shared_ptr consumer; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h index 6b896c24dfa..e38041a1b78 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h @@ -173,7 +173,7 @@ private: /// It results in the fact: single MaterializePostgreSQL storage is created only if its nested table is created. /// In case of attach - this setup will be done in a separate thread in the background. It will also /// be checked for nested table and attempted to load it if it does not exist for some reason. - bool is_attach; + bool is_attach = true; }; } diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index db609cd6081..54facb9ca0b 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -43,9 +43,9 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/, ColumnsDescription TableFunctionPostgreSQL::getActualTableStructure(ContextPtr context) const { const bool use_nulls = context->getSettingsRef().external_table_functions_use_nulls; - auto connection = connection_pool->get(); + auto connection_holder = connection_pool->get(); auto columns = fetchPostgreSQLTableStructure( - connection->conn(), + connection_holder->get(), remote_table_schema.empty() ? doubleQuoteString(remote_table_name) : doubleQuoteString(remote_table_schema) + '.' + doubleQuoteString(remote_table_name), use_nulls).columns; diff --git a/src/TableFunctions/TableFunctionPostgreSQL.h b/src/TableFunctions/TableFunctionPostgreSQL.h index a3d024c1a50..c31d02fa955 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.h +++ b/src/TableFunctions/TableFunctionPostgreSQL.h @@ -5,7 +5,7 @@ #if USE_LIBPQXX #include -#include +#include namespace DB diff --git a/tests/integration/test_dictionaries_update_field/__init__.py b/tests/integration/test_dictionaries_update_field/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_dictionaries_update_field/configs/config.xml b/tests/integration/test_dictionaries_update_field/configs/config.xml new file mode 100644 index 00000000000..a1518083be3 --- /dev/null +++ b/tests/integration/test_dictionaries_update_field/configs/config.xml @@ -0,0 +1,30 @@ + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + 9000 + 127.0.0.1 + + + + true + none + + AcceptCertificateHandler + + + + + 500 + 5368709120 + ./clickhouse/ + users.xml + + /etc/clickhouse-server/config.d/*.xml + diff --git a/tests/integration/test_dictionaries_update_field/configs/users.xml b/tests/integration/test_dictionaries_update_field/configs/users.xml new file mode 100644 index 00000000000..6061af8e33d --- /dev/null +++ b/tests/integration/test_dictionaries_update_field/configs/users.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + ::/0 + + default + default + + + + + + + + From 4ac023e511ad5b3afbb16c1286e37ac131ea7ce8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 9 May 2021 22:18:16 +0000 Subject: [PATCH 115/931] Small fixes --- src/Core/PostgreSQL/Connection.h | 5 ++--- src/Core/PostgreSQL/insertPostgreSQLValue.cpp | 3 ++- src/Core/ya.make.in | 2 +- .../PostgreSQL/DatabaseMaterializePostgreSQL.cpp | 16 ++++++++-------- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 6 +++--- .../PostgreSQL/MaterializePostgreSQLConsumer.cpp | 6 +++--- .../PostgreSQL/MaterializePostgreSQLMetadata.cpp | 1 - .../PostgreSQL/MaterializePostgreSQLMetadata.h | 2 +- 8 files changed, 20 insertions(+), 21 deletions(-) diff --git a/src/Core/PostgreSQL/Connection.h b/src/Core/PostgreSQL/Connection.h index 1e9334eace5..46646ea6f35 100644 --- a/src/Core/PostgreSQL/Connection.h +++ b/src/Core/PostgreSQL/Connection.h @@ -2,6 +2,7 @@ #include // Y_IGNORE #include +#include namespace postgres @@ -9,13 +10,11 @@ namespace postgres using ConnectionInfo = std::pair; using ConnectionPtr = std::unique_ptr; -class Connection +class Connection : private boost::noncopyable { public: Connection(const ConnectionInfo & connection_info_, bool replication_ = false); - Connection(const Connection & other) = delete; - pqxx::connection & getRef(); void connect(); diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index 70537767dc5..07f2404fdc3 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -131,7 +131,8 @@ void insertPostgreSQLValue( { max_dimension = std::max(max_dimension, dimension); - if (--dimension == 0) + --dimension; + if (dimension == 0) break; dimensions[dimension].emplace_back(Array(dimensions[dimension + 1].begin(), dimensions[dimension + 1].end())); diff --git a/src/Core/ya.make.in b/src/Core/ya.make.in index f17aef89c54..6b89dc18482 100644 --- a/src/Core/ya.make.in +++ b/src/Core/ya.make.in @@ -10,7 +10,7 @@ PEERDIR( SRCS( - + ) END() diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 163e38f79f2..af6e2c1edfd 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -42,7 +42,7 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const String & postgres_database_name, const postgres::ConnectionInfo & connection_info_, std::unique_ptr settings_) - : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context_) + : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context_) , database_engine_define(database_engine_define_->clone()) , remote_database_name(postgres_database_name) , connection_info(connection_info_) @@ -72,16 +72,16 @@ void DatabaseMaterializePostgreSQL::startSynchronization() /// Check nested ReplacingMergeTree table. auto storage = DatabaseAtomic::tryGetTable(table_name, getContext()); - if (!storage) - { - /// Nested table does not exist and will be created by replication thread. - storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), getContext()); - } - else + if (storage) { /// Nested table was already created and synchronized. storage = StorageMaterializePostgreSQL::create(storage, getContext()); } + else + { + /// Nested table does not exist and will be created by replication thread. + storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), getContext()); + } /// Cache MaterializePostgreSQL wrapper over nested table. materialized_tables[table_name] = storage; @@ -90,7 +90,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() replication_handler->addStorage(table_name, storage->as()); } - LOG_TRACE(log, "Loaded {} tables. Starting synchronization, (database: {})", materialized_tables.size(), database_name); + LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); replication_handler->startup(); } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 8b933f1e6a5..199b66b7160 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -179,7 +179,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "FROM pg_index i " "JOIN pg_attribute a ON a.attrelid = i.indrelid " "AND a.attnum = ANY(i.indkey) " - "WHERE i.indrelid = '{}'::regclass AND i.indisprimary", postgres_table_name); + "WHERE i.indrelid = {}::regclass AND i.indisprimary", quoteString(postgres_table_name)); table.primary_key_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); } @@ -201,10 +201,10 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "and a.attrelid = t.oid " "and a.attnum = ANY(ix.indkey) " "and t.relkind = 'r' " /// simple tables - "and t.relname = '{}' " /// Connection is already done to a needed database, only table name is needed. + "and t.relname = {} " /// Connection is already done to a needed database, only table name is needed. "and ix.indisreplident = 't' " /// index is is replica identity index "ORDER BY a.attname", /// column names - postgres_table_name); + quoteString(postgres_table_name)); table.replica_identity_columns = readNamesAndTypesList(tx, postgres_table_name, query, use_nulls, true); } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 79a98b7b070..43f6f48a4ef 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -241,14 +241,14 @@ void MaterializePostgreSQLConsumer::readTupleData( case PostgreSQLQuery::INSERT: { buffer.columns[num_columns]->insert(Int8(1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); + buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); break; } case PostgreSQLQuery::DELETE: { buffer.columns[num_columns]->insert(Int8(-1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); + buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); break; } @@ -260,7 +260,7 @@ void MaterializePostgreSQLConsumer::readTupleData( else buffer.columns[num_columns]->insert(Int8(1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.version())); + buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); break; } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp index ee945c67c1a..66fee26a5e8 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp @@ -78,7 +78,6 @@ void MaterializePostgreSQLMetadata::commitMetadata(std::string & lsn, const std: try { actual_lsn = finalizeStreamFunc(); - /// This is not supposed to happen if (actual_lsn != last_lsn) { diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h index d09adb61363..8895707450b 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h @@ -14,7 +14,7 @@ public: void readMetadata(); - size_t version() { return last_version++; } + size_t getAndIncrementVersion() { return last_version++; } std::string lsn() { return last_lsn; } From ae1191d0c069d1a1d37990a905219437b4e4755f Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 10 May 2021 09:10:02 +0000 Subject: [PATCH 116/931] Get rid of metadata file, rely only on lsn --- .../DatabaseMaterializePostgreSQL.cpp | 9 -- .../MaterializePostgreSQLConsumer.cpp | 71 ++++--------- .../MaterializePostgreSQLConsumer.h | 26 ++--- .../MaterializePostgreSQLMetadata.cpp | 100 ------------------ .../MaterializePostgreSQLMetadata.h | 31 ------ .../PostgreSQLReplicationHandler.cpp | 40 +++---- .../PostgreSQL/PostgreSQLReplicationHandler.h | 19 +--- .../StorageMaterializePostgreSQL.cpp | 5 - 8 files changed, 54 insertions(+), 247 deletions(-) delete mode 100644 src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp delete mode 100644 src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index af6e2c1edfd..dda21937fa2 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -31,8 +31,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -static const auto METADATA_SUFFIX = ".materialize_postgresql_metadata"; - DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( ContextPtr context_, const String & metadata_path_, @@ -57,7 +55,6 @@ void DatabaseMaterializePostgreSQL::startSynchronization() remote_database_name, database_name, connection_info, - metadata_path + METADATA_SUFFIX, getContext(), settings->materialize_postgresql_max_block_size.value, settings->materialize_postgresql_allow_automatic_update, @@ -189,12 +186,6 @@ void DatabaseMaterializePostgreSQL::drop(ContextPtr local_context) if (replication_handler) replication_handler->shutdownFinal(); - /// Remove metadata - Poco::File metadata(getMetadataPath() + METADATA_SUFFIX); - - if (metadata.exists()) - metadata.remove(false); - DatabaseAtomic::drop(StorageMaterializePostgreSQL::makeNestedTableContext(local_context)); } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 43f6f48a4ef..1a6e635f336 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -1,8 +1,6 @@ #include "MaterializePostgreSQLConsumer.h" -#if USE_LIBPQXX #include "StorageMaterializePostgreSQL.h" - #include #include #include @@ -27,7 +25,6 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( std::shared_ptr connection_, const std::string & replication_slot_name_, const std::string & publication_name_, - const std::string & metadata_path, const std::string & start_lsn, const size_t max_block_size_, bool allow_automatic_update_, @@ -36,7 +33,6 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) - , metadata(metadata_path) , connection(connection_) , current_lsn(start_lsn) , max_block_size(max_block_size_) @@ -80,27 +76,6 @@ void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage } -void MaterializePostgreSQLConsumer::readMetadata() -{ - try - { - metadata.readMetadata(); - - if (!metadata.lsn().empty()) - { - auto tx = std::make_shared(connection->getRef()); - final_lsn = metadata.lsn(); - final_lsn = advanceLSN(tx); - tx->commit(); - } - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } -} - - void MaterializePostgreSQLConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); @@ -241,14 +216,14 @@ void MaterializePostgreSQLConsumer::readTupleData( case PostgreSQLQuery::INSERT: { buffer.columns[num_columns]->insert(Int8(1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); + buffer.columns[num_columns + 1]->insert(lsn_value); break; } case PostgreSQLQuery::DELETE: { buffer.columns[num_columns]->insert(Int8(-1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); + buffer.columns[num_columns + 1]->insert(lsn_value); break; } @@ -260,7 +235,7 @@ void MaterializePostgreSQLConsumer::readTupleData( else buffer.columns[num_columns]->insert(Int8(1)); - buffer.columns[num_columns + 1]->insert(UInt64(metadata.getAndIncrementVersion())); + buffer.columns[num_columns + 1]->insert(lsn_value); break; } @@ -488,30 +463,27 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptr(); - insert->table_id = storage->getStorageID(); - insert->columns = buffer.columnsAST; + auto insert = std::make_shared(); + insert->table_id = storage->getStorageID(); + insert->columns = buffer.columnsAST; - auto insert_context = Context::createCopy(context); - insert_context->makeQueryContext(); - insert_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + auto insert_context = Context::createCopy(context); + insert_context->makeQueryContext(); + insert_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - InterpreterInsertQuery interpreter(insert, insert_context, true); - auto block_io = interpreter.execute(); - OneBlockInputStream input(result_rows); + InterpreterInsertQuery interpreter(insert, insert_context, true); + auto block_io = interpreter.execute(); + OneBlockInputStream input(result_rows); - assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica table sync"); - copyData(input, *block_io.out); + assertBlocksHaveEqualStructure(input.getHeader(), block_io.out->getHeader(), "postgresql replica table sync"); + copyData(input, *block_io.out); - auto actual_lsn = advanceLSN(tx); - buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); - - return actual_lsn; - }); + /// The next attempt to read data will start with actual_lsn, returned from advanceLSN. current_lsn acts as + /// a version for rows in RelplacingMergeTree table. + current_lsn = advanceLSN(tx); + buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); } } catch (...) @@ -632,6 +604,8 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() slot_empty = false; current_lsn = (*row)[0]; + lsn_value = getLSNValue(current_lsn); + LOG_DEBUG(log, "Current lsn: {}, value: {}", current_lsn, lsn_value); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } @@ -704,7 +678,4 @@ void MaterializePostgreSQLConsumer::updateNested(const String & table_name, Stor skip_list[table_id] = table_start_lsn; } - } - -#endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index afb39519715..43ac919f119 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -1,11 +1,5 @@ #pragma once -#if !defined(ARCADIA_BUILD) -#include "config_core.h" -#endif - -#if USE_LIBPQXX -#include "MaterializePostgreSQLMetadata.h" #include #include @@ -15,7 +9,6 @@ #include #include #include -#include "pqxx/pqxx" // Y_IGNORE namespace DB @@ -29,16 +22,13 @@ public: MaterializePostgreSQLConsumer( ContextPtr context_, std::shared_ptr connection_, - const std::string & replication_slot_name_, - const std::string & publication_name_, - const std::string & metadata_path, - const std::string & start_lsn, + const String & replication_slot_name_, + const String & publication_name_, + const String & start_lsn, const size_t max_block_size_, bool allow_automatic_update_, Storages storages_); - void readMetadata(); - bool consume(std::vector> & skipped_tables); /// Called from reloadFromSnapshot by replication handler. This method is needed to move a table back into synchronization @@ -105,14 +95,17 @@ private: ContextPtr context; const std::string replication_slot_name, publication_name; - MaterializePostgreSQLMetadata metadata; std::shared_ptr connection; std::string current_lsn, final_lsn; + + /// current_lsn converted from String to Int64 via getLSNValue(). + UInt64 lsn_value; + const size_t max_block_size; bool allow_automatic_update; - std::string table_to_insert; + String table_to_insert; /// List of tables which need to be synced after last replication stream. std::unordered_set tables_to_sync; @@ -147,7 +140,4 @@ private: /// i.e. we will not miss the first start_lsn position for reloaded table. std::unordered_map skip_list; }; - } - -#endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp deleted file mode 100644 index 66fee26a5e8..00000000000 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.cpp +++ /dev/null @@ -1,100 +0,0 @@ -#include "MaterializePostgreSQLMetadata.h" - -#if USE_LIBPQXX -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -MaterializePostgreSQLMetadata::MaterializePostgreSQLMetadata(const std::string & metadata_file_path) - : metadata_file(metadata_file_path) - , tmp_metadata_file(metadata_file_path + ".tmp") - , last_version(1) -{ -} - - -void MaterializePostgreSQLMetadata::readMetadata() -{ - if (Poco::File(metadata_file).exists()) - { - ReadBufferFromFile in(metadata_file, DBMS_DEFAULT_BUFFER_SIZE); - - assertString("\nLast version:\t", in); - readIntText(last_version, in); - - assertString("\nLast LSN:\t", in); - readString(last_lsn, in); - - if (checkString("\nActual LSN:\t", in)) - { - std::string actual_lsn; - readString(actual_lsn, in); - - if (!actual_lsn.empty()) - last_lsn = actual_lsn; - } - - LOG_DEBUG(&Poco::Logger::get("MaterializePostgreSQLMetadata"), - "Last written version is {}. (From metadata file {})", last_version, metadata_file); - } -} - - -void MaterializePostgreSQLMetadata::writeMetadata(bool append_metadata) -{ - WriteBufferFromFile out(tmp_metadata_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_TRUNC | O_CREAT); - - if (append_metadata) - { - writeString("\nActual LSN:\t" + toString(last_lsn), out); - } - else - { - writeString("\nLast version:\t" + toString(last_version), out); - writeString("\nLast LSN:\t" + toString(last_lsn), out); - } - - out.next(); - out.sync(); - out.close(); -} - - -/// While data is received, version is updated. Before table sync, write last version to tmp file. -/// Then sync data to table and rename tmp to non-tmp. -void MaterializePostgreSQLMetadata::commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc) -{ - std::string actual_lsn; - last_lsn = lsn; - writeMetadata(); - - try - { - actual_lsn = finalizeStreamFunc(); - /// This is not supposed to happen - if (actual_lsn != last_lsn) - { - writeMetadata(true); - LOG_WARNING(&Poco::Logger::get("MaterializePostgreSQLMetadata"), - "Last written LSN {} is not equal to actual LSN {}", last_lsn, actual_lsn); - } - - Poco::File(tmp_metadata_file).renameTo(metadata_file); - } - catch (...) - { - Poco::File(tmp_metadata_file).remove(); - throw; - } -} - -} - -#endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h b/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h deleted file mode 100644 index 8895707450b..00000000000 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLMetadata.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once -#include - - -namespace DB -{ - -class MaterializePostgreSQLMetadata -{ -public: - MaterializePostgreSQLMetadata(const std::string & metadata_file_path); - - void commitMetadata(std::string & lsn, const std::function & finalizeStreamFunc); - - void readMetadata(); - - size_t getAndIncrementVersion() { return last_version++; } - - std::string lsn() { return last_lsn; } - -private: - void writeMetadata(bool append_metadata = false); - - const std::string metadata_file; - const std::string tmp_metadata_file; - - uint64_t last_version; - std::string last_lsn; -}; - -} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 30d3f1e6e97..f9ed5eb9bbb 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -1,6 +1,5 @@ #include "PostgreSQLReplicationHandler.h" -#if USE_LIBPQXX #include #include #include @@ -8,8 +7,8 @@ #include #include #include +#include #include -#include namespace DB @@ -26,7 +25,6 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const String & remote_database_name_, const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, - const std::string & metadata_path_, ContextPtr context_, const size_t max_block_size_, bool allow_automatic_update_, @@ -36,7 +34,6 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , context(context_) , remote_database_name(remote_database_name_) , current_database_name(current_database_name_) - , metadata_path(metadata_path_) , connection_info(connection_info_) , max_block_size(max_block_size_) , allow_automatic_update(allow_automatic_update_) @@ -103,7 +100,15 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// List of nested tables (table_name -> nested_storage), which is passed to replication consumer. std::unordered_map nested_storages; - std::string snapshot_name, start_lsn; + + /// snapshot_name is initialized only if a new replication slot is created. + /// start_lsn is initialized in two places: + /// 1. if replication slot does not exist, start_lsn will be returned with its creation return parameters; + /// 2. if replication slot already exist, start_lsn is read from pg_replication_slots as + /// `confirmed_flush_lsn` - the address (LSN) up to which the logical slot's consumer has confirmed receiving data. + /// Data older than this is not available anymore. + /// TODO: more tests + String snapshot_name, start_lsn; auto initial_sync = [&]() { @@ -131,12 +136,13 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. - if (!isReplicationSlotExist(tx.getRef(), replication_slot)) + if (!isReplicationSlotExist(tx.getRef(), replication_slot, start_lsn)) { initial_sync(); } /// Replication slot depends on publication, so if replication slot exists and new /// publication was just created - drop that replication slot and start from scratch. + /// TODO: tests else if (new_publication_created) { dropReplicationSlot(tx.getRef()); @@ -189,7 +195,6 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) connection, replication_slot, publication_name, - metadata_path, start_lsn, max_block_size, allow_automatic_update, @@ -317,24 +322,26 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo } -bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name) +bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & tx, String & slot_name, String & start_lsn) { - std::string query_str = fmt::format("SELECT active, restart_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); + std::string query_str = fmt::format("SELECT active, restart_lsn, confirmed_flush_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); pqxx::result result{tx.exec(query_str)}; /// Replication slot does not exist if (result.empty()) return false; - LOG_TRACE(log, "Replication slot {} already exists (active: {}). Restart lsn position is {}", - slot_name, result[0][0].as(), result[0][0].as()); + start_lsn = result[0][2].as(); + + LOG_TRACE(log, "Replication slot {} already exists (active: {}). Restart lsn position: {}, confirmed flush lsn: {}", + slot_name, result[0][0].as(), result[0][1].as(), start_lsn); return true; } void PostgreSQLReplicationHandler::createReplicationSlot( - pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary) + pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary) { std::string query_str; @@ -385,12 +392,10 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::shutdownFinal() { - if (std::filesystem::exists(metadata_path)) - std::filesystem::remove(metadata_path); - postgres::Transaction tx(connection->getRef()); dropPublication(tx.getRef()); - if (isReplicationSlotExist(tx.getRef(), replication_slot)) + String last_committed_lsn; + if (isReplicationSlotExist(tx.getRef(), replication_slot, last_committed_lsn)) dropReplicationSlot(tx.getRef()); } @@ -508,6 +513,5 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector #include @@ -29,7 +22,6 @@ public: const String & remote_database_name_, const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, - const String & metadata_path_, ContextPtr context_, const size_t max_block_size_, bool allow_automatic_update_, @@ -69,9 +61,9 @@ private: /// Methods to manage Replication Slots. - bool isReplicationSlotExist(pqxx::nontransaction & tx, std::string & slot_name); + bool isReplicationSlotExist(pqxx::nontransaction & tx, String & slot_name, String & start_lsn); - void createReplicationSlot(pqxx::nontransaction & tx, std::string & start_lsn, std::string & snapshot_name, bool temporary = false); + void createReplicationSlot(pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary = false); void dropReplicationSlot(pqxx::nontransaction & tx, bool temporary = false); @@ -85,16 +77,13 @@ private: void reloadFromSnapshot(const std::vector> & relation_data); - PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const std::string & table_name); + PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const String & table_name); Poco::Logger * log; ContextPtr context; const String remote_database_name, current_database_name; - /// Path for replication metadata. - const String metadata_path; - /// Connection string and address for logs. postgres::ConnectionInfo connection_info; @@ -133,5 +122,3 @@ private: }; } - -#endif diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 618f31c9c1b..4a7c9655149 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -63,15 +63,10 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( setInMemoryMetadata(storage_metadata); - /// Path to store replication metadata (like last written version, etc). - auto metadata_path = DatabaseCatalog::instance().getDatabase(getStorageID().database_name)->getMetadataPath() - + "/.metadata_" + table_id_.database_name + "_" + table_id_.table_name + "_" + toString(table_id_.uuid); - replication_handler = std::make_unique( remote_database_name, table_id_.database_name, connection_info, - metadata_path, getContext(), replication_settings->materialize_postgresql_max_block_size.value, /* allow_automatic_update */ false, /* is_materialize_postgresql_database */false); From 65c574db937936669aa1a600d58c915ff1876f03 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 10 May 2021 11:31:06 +0000 Subject: [PATCH 117/931] Add test --- .../MaterializePostgreSQLConsumer.cpp | 5 +- .../PostgreSQLReplicationHandler.cpp | 2 +- .../TableFunctionPostgreSQL.cpp | 1 - .../test.py | 76 +++++++++++++++++++ 4 files changed, 79 insertions(+), 5 deletions(-) diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 1a6e635f336..5e41e20550e 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -35,6 +35,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( , publication_name(publication_name_) , connection(connection_) , current_lsn(start_lsn) + , lsn_value(getLSNValue(start_lsn)) , max_block_size(max_block_size_) , allow_automatic_update(allow_automatic_update_) , storages(storages_) @@ -249,6 +250,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli /// Skip '\x' size_t pos = 2; char type = readInt8(replication_message, pos, size); + //LOG_DEBUG(log, "Message type: {}, lsn string: {}, lsn value {}", type, current_lsn, lsn_value); switch (type) { @@ -480,8 +482,6 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptrgetHeader(), "postgresql replica table sync"); copyData(input, *block_io.out); - /// The next attempt to read data will start with actual_lsn, returned from advanceLSN. current_lsn acts as - /// a version for rows in RelplacingMergeTree table. current_lsn = advanceLSN(tx); buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); } @@ -605,7 +605,6 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() slot_empty = false; current_lsn = (*row)[0]; lsn_value = getLSNValue(current_lsn); - LOG_DEBUG(log, "Current lsn: {}, value: {}", current_lsn, lsn_value); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index f9ed5eb9bbb..51b70bf1b5b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -125,7 +125,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); - /// Throw in case of single MaterializePostgreSQL storage, becuase initial setup is done immediately + /// Throw in case of single MaterializePostgreSQL storage, because initial setup is done immediately /// (unlike database engine where it is done in a separate thread). if (throw_on_error) throw; diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index 54facb9ca0b..de04f1fa7bd 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -14,7 +14,6 @@ #include "registerTableFunctions.h" #include #include -#include namespace DB diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index abfb3427c99..2bf3673c3a5 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -9,6 +9,9 @@ from helpers.test_tools import assert_eq_with_retry from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import TSV +from random import randrange +import threading + cluster = ClickHouseCluster(__file__) instance = cluster.add_instance('instance', main_configs = ['configs/log_conf.xml'], @@ -427,6 +430,79 @@ def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE test_database") +@pytest.mark.timeout(120) +def test_random_queries(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 5 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(10000)'.format(i)) + n = [10000] + + query = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key > 6000;', + 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', + 'UPDATE postgresql_replica_{} SET value = value*value WHERE key < 5000;', + 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', + 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key < 5000;', + 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 == 0;', + 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', + 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key >= 5000;', + 'DELETE FROM postgresql_replica_{} WHERE value-3 = 3;'] + + def attack(thread_id): + print('thread {}'.format(thread_id)) + k = 10000 + for i in range(10): + query_id = random.randrange(0, len(query)-1) + table_id = random.randrange(0, 5) # num tables + + # random update / delete query + cursor.execute(query[query_id].format(table_id)) + print("table {} query {} ok".format(table_id, query_id)) + + # allow some thread to do inserts (not to violate key constraints) + if thread_id < 5: + instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(1000)'.format(thread_id, k)) + k += 1 + print("insert table {} ok".format(thread_id)) + + threads = [] + threads_num = 16 + + for i in range(threads_num): + threads.append(threading.Thread(target=attack, args=(i,))) + for thread in threads: + time.sleep(random.uniform(0, 1)) + thread.start() + + instance.query( + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + + n[0] = 50000 + for table_id in range(NUM_TABLES): + n[0] += 1 + instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)'.format(table_id, n[0])) + + for thread in threads: + thread.join() + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + count = instance.query('SELECT count() FROM test_database.postgresql_replica_{}'.format(i)) + print(count) + + for i in range(NUM_TABLES): + cursor.execute('drop table postgresql_replica_{};'.format(i)) + + instance.query("DROP DATABASE test_database") + assert 'test_database' not in instance.query('SHOW DATABASES') + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 626e87bae50c82380a19795fc18f2ad00d407b7a Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 10 May 2021 13:51:05 +0000 Subject: [PATCH 118/931] Some fixes --- src/Core/PostgreSQL/Utils.h | 13 ++++++++- .../DatabaseMaterializePostgreSQL.cpp | 7 ----- .../DatabaseMaterializePostgreSQL.h | 2 -- .../PostgreSQLReplicationHandler.cpp | 28 ++++++++++++------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 +- .../test.py | 22 ++++++++++----- 6 files changed, 46 insertions(+), 28 deletions(-) diff --git a/src/Core/PostgreSQL/Utils.h b/src/Core/PostgreSQL/Utils.h index ccb133112d9..b3c035ebac4 100644 --- a/src/Core/PostgreSQL/Utils.h +++ b/src/Core/PostgreSQL/Utils.h @@ -3,6 +3,7 @@ #include // Y_IGNORE #include #include "Connection.h" +#include namespace pqxx { @@ -24,7 +25,17 @@ class Transaction public: Transaction(pqxx::connection & connection) : transaction(connection) {} - ~Transaction() { transaction.commit(); } + ~Transaction() + { + try + { + transaction.commit(); + } + catch (...) + { + DB::tryLogCurrentException(__PRETTY_FUNCTION__); + } + } T & getRef() { return transaction; } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index dda21937fa2..080069c3f44 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -92,13 +92,6 @@ void DatabaseMaterializePostgreSQL::startSynchronization() } -void DatabaseMaterializePostgreSQL::shutdown() -{ - if (replication_handler) - replication_handler->shutdown(); -} - - void DatabaseMaterializePostgreSQL::loadStoredObjects(ContextPtr local_context, bool has_force_restore_data_flag, bool force_attach) { DatabaseAtomic::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach); diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h index 97399adf1ff..6a62491f940 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h @@ -55,8 +55,6 @@ public: void drop(ContextPtr local_context) override; - void shutdown() override; - void stopReplication(); private: diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 51b70bf1b5b..44eb48deec5 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -136,7 +136,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. - if (!isReplicationSlotExist(tx.getRef(), replication_slot, start_lsn)) + if (!isReplicationSlotExist(tx.getRef(), start_lsn, /* temporary */false)) { initial_sync(); } @@ -322,9 +322,15 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo } -bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & tx, String & slot_name, String & start_lsn) +bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & tx, String & start_lsn, bool temporary) { - std::string query_str = fmt::format("SELECT active, restart_lsn, confirmed_flush_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); + String slot_name; + if (temporary) + slot_name = replication_slot + "_tmp"; + else + slot_name = replication_slot; + + String query_str = fmt::format("SELECT active, restart_lsn, confirmed_flush_lsn FROM pg_replication_slots WHERE slot_name = '{}'", slot_name); pqxx::result result{tx.exec(query_str)}; /// Replication slot does not exist @@ -343,9 +349,7 @@ bool PostgreSQLReplicationHandler::isReplicationSlotExist(pqxx::nontransaction & void PostgreSQLReplicationHandler::createReplicationSlot( pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary) { - std::string query_str; - - std::string slot_name; + String query_str, slot_name; if (temporary) slot_name = replication_slot + "_tmp"; else @@ -395,8 +399,10 @@ void PostgreSQLReplicationHandler::shutdownFinal() postgres::Transaction tx(connection->getRef()); dropPublication(tx.getRef()); String last_committed_lsn; - if (isReplicationSlotExist(tx.getRef(), replication_slot, last_committed_lsn)) - dropReplicationSlot(tx.getRef()); + if (isReplicationSlotExist(tx.getRef(), last_committed_lsn, /* temporary */false)) + dropReplicationSlot(tx.getRef(), /* temporary */false); + if (isReplicationSlotExist(tx.getRef(), last_committed_lsn, /* temporary */true)) + dropReplicationSlot(tx.getRef(), /* temporary */true); } @@ -453,8 +459,10 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector tx(replication_connection.getRef()); - std::string snapshot_name, start_lsn; - createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, true); + String snapshot_name, start_lsn; + if (isReplicationSlotExist(tx.getRef(), start_lsn, /* temporary */true)) + dropReplicationSlot(tx.getRef(), /* temporary */true); + createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, /* temporary */true); for (const auto & [relation_id, table_name] : relation_data) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 7cf475603b1..61132d0c0fc 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -61,7 +61,7 @@ private: /// Methods to manage Replication Slots. - bool isReplicationSlotExist(pqxx::nontransaction & tx, String & slot_name, String & start_lsn); + bool isReplicationSlotExist(pqxx::nontransaction & tx, String & start_lsn, bool temporary = false); void createReplicationSlot(pqxx::nontransaction & tx, String & start_lsn, String & snapshot_name, bool temporary = false); diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 2bf3673c3a5..e641523f9b1 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -443,16 +443,16 @@ def test_random_queries(started_cluster): n = [10000] query = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key > 6000;', + 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', - 'UPDATE postgresql_replica_{} SET value = value*value WHERE key < 5000;', + 'UPDATE postgresql_replica_{} SET value = value*value WHERE key % 2 = 1;', 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key < 5000;', + 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 == 0;', + 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', - 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key >= 5000;', - 'DELETE FROM postgresql_replica_{} WHERE value-3 = 3;'] + 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', + 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] def attack(thread_id): print('thread {}'.format(thread_id)) @@ -467,10 +467,17 @@ def test_random_queries(started_cluster): # allow some thread to do inserts (not to violate key constraints) if thread_id < 5: - instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(1000)'.format(thread_id, k)) + print("try insert table {}".format(thread_id)) + instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)'.format(i, thread_id, k)) k += 1 print("insert table {} ok".format(thread_id)) + if i == 5: + # also change primary key value + print("try update primary key {}".format(thread_id)) + cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(thread_id, i+1, i+1)) + print("update primary key {} ok".format(thread_id)) + threads = [] threads_num = 16 @@ -487,6 +494,7 @@ def test_random_queries(started_cluster): for table_id in range(NUM_TABLES): n[0] += 1 instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)'.format(table_id, n[0])) + #cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) for thread in threads: thread.join() From bd4a0934e4459158efdc29c9717a0a499c145bf5 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 10 May 2021 14:51:17 +0000 Subject: [PATCH 119/931] Add one more test --- .../test.py | 56 +++++++++++++++++-- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index e641523f9b1..f16e0125efd 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,14 +31,15 @@ postgres_table_template_3 = """ key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ -def get_postgres_conn(database=False): +def get_postgres_conn(database=False, auto_commit=True): if database == True: conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" else: conn_string = "host='localhost' user='postgres' password='mysecretpassword'" conn = psycopg2.connect(conn_string) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) - conn.autocommit = True + if auto_commit: + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn.autocommit = True return conn @@ -445,7 +446,7 @@ def test_random_queries(started_cluster): query = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', - 'UPDATE postgresql_replica_{} SET value = value*value WHERE key % 2 = 1;', + 'UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;', 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', @@ -511,6 +512,53 @@ def test_random_queries(started_cluster): assert 'test_database' not in instance.query('SHOW DATABASES') +@pytest.mark.timeout(120) +def test_single_transaction(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(database=True, auto_commit=False) + cursor = conn.cursor() + + create_postgres_table(cursor, 'postgresql_replica_0'); + conn.commit() + instance.query( + "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + assert_nested_table_is_created('postgresql_replica_0') + + queries = [ + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', + "UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0", + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', + 'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;', + "UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1", + 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', + 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', + "UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1", + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', + "UPDATE postgresql_replica_{} SET key=key+10000000", + 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', + 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] + + for query in queries: + print('query {}'.format(query)) + cursor.execute(query.format(0)) + + time.sleep(5) + result = instance.query("select count() from test_database.postgresql_replica_0") + # no commit yet + assert(int(result) == 0) + + conn.commit() + check_tables_are_synchronized('postgresql_replica_{}'.format(0)); + instance.query("DROP DATABASE test_database") + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From f570b1274e5ffb28e994ea90b4dfc218b55f209a Mon Sep 17 00:00:00 2001 From: elevankoff Date: Tue, 11 May 2021 11:53:25 +0000 Subject: [PATCH 120/931] Fix typo in the comment --- src/Common/ProcessorStatisticsOS.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 123f9385113..20ba680b6dd 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -11,7 +11,7 @@ namespace DB { -/** Opens files: /proc/loadav, /proc/stat, /proc/cpuinfo. Keeps it open and reads processor statistics. +/** Opens files: /proc/loadavg, /proc/stat, /proc/cpuinfo. Keeps it open and reads processor statistics. * This is Linux specific. * See: man procfs */ From 407db17e2ef16b1611e9bd22992c6c6cc5c4a00b Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 12 May 2021 21:05:43 +0000 Subject: [PATCH 121/931] Remove setting, use native transaction --- src/Core/PostgreSQL/Utils.h | 31 ---------- src/Core/Settings.h | 1 - .../PostgreSQL/DatabasePostgreSQL.cpp | 3 +- .../fetchPostgreSQLTableStructure.cpp | 17 ++--- .../fetchPostgreSQLTableStructure.h | 4 +- .../PostgreSQLReplicationHandler.cpp | 62 +++++++++++-------- 6 files changed, 49 insertions(+), 69 deletions(-) diff --git a/src/Core/PostgreSQL/Utils.h b/src/Core/PostgreSQL/Utils.h index b3c035ebac4..34d66fefb70 100644 --- a/src/Core/PostgreSQL/Utils.h +++ b/src/Core/PostgreSQL/Utils.h @@ -11,38 +11,7 @@ namespace pqxx using ReplicationTransaction = pqxx::transaction; } - namespace postgres { - ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password); - -Connection createReplicationConnection(const ConnectionInfo & connection_info); - -template -class Transaction -{ -public: - Transaction(pqxx::connection & connection) : transaction(connection) {} - - ~Transaction() - { - try - { - transaction.commit(); - } - catch (...) - { - DB::tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - T & getRef() { return transaction; } - - void exec(const String & query) { transaction.exec(query); } - -private: - T transaction; -}; - } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 77bef1210ba..3c41ac6da0d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -421,7 +421,6 @@ class IColumn; M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ M(Bool, allow_experimental_database_materialize_postgresql, false, "Allow to create database with Engine=MaterializePostgreSQL(...).", 0) \ - M(Bool, external_databases_use_nulls, true, "If set to false, external databases will use default values instead of NULLs. (Sopported for PostgreSQL/MaterializePostgreSQL database engine)", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 6d4206d1b99..346dca576b2 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -149,9 +149,8 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr if (!table_checked && !checkPostgresTable(table_name)) return StoragePtr{}; - auto use_nulls = local_context->getSettingsRef().external_databases_use_nulls; auto connection_holder = pool->get(); - auto columns = fetchPostgreSQLTableStructure(connection_holder->get(), doubleQuoteString(table_name), use_nulls).columns; + auto columns = fetchPostgreSQLTableStructure(connection_holder->get(), doubleQuoteString(table_name)).columns; if (!columns) return StoragePtr{}; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 199b66b7160..e9edf5b3f5f 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -135,7 +135,7 @@ std::shared_ptr readNamesAndTypesList( std::get<0>(row), /// column name convertPostgreSQLDataType( std::get<1>(row), /// data type - use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false + use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false == nullable std::get<3>(row)))); /// number of dimensions if data type is array } } @@ -213,18 +213,21 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( } -PostgreSQLTableStructure fetchPostgreSQLTableStructure( - pqxx::connection & connection, const String & postgres_table_name, bool use_nulls) +PostgreSQLTableStructure fetchPostgreSQLTableStructure(pqxx::connection & connection, const String & postgres_table_name, bool use_nulls) { - postgres::Transaction tx(connection); - return fetchPostgreSQLTableStructure(tx.getRef(), postgres_table_name, use_nulls, false, false); + pqxx::ReadTransaction tx(connection); + auto result = fetchPostgreSQLTableStructure(tx, postgres_table_name, use_nulls, false, false); + tx.commit(); + return result; } std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection) { - postgres::Transaction tx(connection); - return fetchPostgreSQLTablesList(tx.getRef()); + pqxx::ReadTransaction tx(connection); + auto result = fetchPostgreSQLTablesList(tx); + tx.commit(); + return result; } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 2b099f02440..07562cd69fa 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -24,11 +24,11 @@ using PostgreSQLTableStructurePtr = std::unique_ptr; std::unordered_set fetchPostgreSQLTablesList(pqxx::connection & connection); PostgreSQLTableStructure fetchPostgreSQLTableStructure( - pqxx::connection & connection, const String & postgres_table_name, bool use_nulls); + pqxx::connection & connection, const String & postgres_table_name, bool use_nulls = true); template PostgreSQLTableStructure fetchPostgreSQLTableStructure( - T & tx, const String & postgres_table_name, bool use_nulls, + T & tx, const String & postgres_table_name, bool use_nulls = true, bool with_primary_key = false, bool with_replica_identity_index = false); template diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 44eb48deec5..412a38755e3 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -91,12 +91,13 @@ void PostgreSQLReplicationHandler::shutdown() void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { { - postgres::Transaction tx(connection->getRef()); - createPublicationIfNeeded(tx.getRef()); + pqxx::work tx(connection->getRef()); + createPublicationIfNeeded(tx); + tx.commit(); } postgres::Connection replication_connection(connection_info, /* replication */true); - postgres::Transaction tx(replication_connection.getRef()); + pqxx::nontransaction tx(replication_connection.getRef()); /// List of nested tables (table_name -> nested_storage), which is passed to replication consumer. std::unordered_map nested_storages; @@ -112,7 +113,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) auto initial_sync = [&]() { - createReplicationSlot(tx.getRef(), start_lsn, snapshot_name); + createReplicationSlot(tx, start_lsn, snapshot_name); for (const auto & [table_name, storage] : materialized_storages) { @@ -136,7 +137,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. - if (!isReplicationSlotExist(tx.getRef(), start_lsn, /* temporary */false)) + if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false)) { initial_sync(); } @@ -145,7 +146,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// TODO: tests else if (new_publication_created) { - dropReplicationSlot(tx.getRef()); + dropReplicationSlot(tx); initial_sync(); } /// Synchronization and initial load already took place - do not create any new tables, just fetch StoragePtr's @@ -187,6 +188,8 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } } + tx.commit(); + /// Pass current connection to consumer. It is not std::moved implicitly, but a shared_ptr is passed. /// Consumer and replication handler are always executed one after another (not concurrently) and share the same connection. /// Handler uses it only for loadFromSnapshot and shutdown methods. @@ -396,34 +399,41 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::shutdownFinal() { - postgres::Transaction tx(connection->getRef()); - dropPublication(tx.getRef()); + pqxx::nontransaction tx(connection->getRef()); + dropPublication(tx); String last_committed_lsn; - if (isReplicationSlotExist(tx.getRef(), last_committed_lsn, /* temporary */false)) - dropReplicationSlot(tx.getRef(), /* temporary */false); - if (isReplicationSlotExist(tx.getRef(), last_committed_lsn, /* temporary */true)) - dropReplicationSlot(tx.getRef(), /* temporary */true); + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) + dropReplicationSlot(tx, /* temporary */false); + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); + tx.commit(); } /// Used by MaterializePostgreSQL database engine. NameSet PostgreSQLReplicationHandler::fetchRequiredTables(pqxx::connection & connection_) { - postgres::Transaction tx(connection_); - bool publication_exists = isPublicationExist(tx.getRef()); + pqxx::work tx(connection_); + bool publication_exists = isPublicationExist(tx); + NameSet result_tables; if (tables_list.empty() && !publication_exists) { /// Fetch all tables list from database. Publication does not exist yet, which means /// that no replication took place. Publication will be created in /// startSynchronization method. - return fetchPostgreSQLTablesList(tx.getRef()); + result_tables = fetchPostgreSQLTablesList(tx); + } + else + { + if (!publication_exists) + createPublicationIfNeeded(tx, /* create_without_check = */ true); + + result_tables = fetchTablesFromPublication(tx); } - if (!publication_exists) - createPublicationIfNeeded(tx.getRef(), /* create_without_check = */ true); - - return fetchTablesFromPublication(tx.getRef()); + tx.commit(); + return result_tables; } @@ -445,8 +455,7 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( if (!is_materialize_postgresql_database) return nullptr; - auto use_nulls = context->getSettingsRef().external_databases_use_nulls; - return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, use_nulls, true, true)); + return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, true, true, true)); } @@ -457,12 +466,12 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector tx(replication_connection.getRef()); + pqxx::nontransaction tx(replication_connection.getRef()); String snapshot_name, start_lsn; - if (isReplicationSlotExist(tx.getRef(), start_lsn, /* temporary */true)) - dropReplicationSlot(tx.getRef(), /* temporary */true); - createReplicationSlot(tx.getRef(), start_lsn, snapshot_name, /* temporary */true); + if (isReplicationSlotExist(tx, start_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); + createReplicationSlot(tx, start_lsn, snapshot_name, /* temporary */true); for (const auto & [relation_id, table_name] : relation_data) { @@ -519,7 +528,8 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector Date: Wed, 12 May 2021 22:47:41 +0000 Subject: [PATCH 122/931] Add replication identifier, allow proper access to _version and _sign columns from nested --- .../DatabaseMaterializePostgreSQL.cpp | 1 + .../PostgreSQLReplicationHandler.cpp | 24 ++++-- .../PostgreSQL/PostgreSQLReplicationHandler.h | 1 + .../StorageMaterializePostgreSQL.cpp | 17 ++++- .../test_storage_postgresql_replica/test.py | 75 +++++++++++++------ 5 files changed, 86 insertions(+), 32 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index 080069c3f44..cbedc98fc3d 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -52,6 +52,7 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( void DatabaseMaterializePostgreSQL::startSynchronization() { replication_handler = std::make_unique( + /* replication_identifier */database_name, remote_database_name, database_name, connection_info, diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 412a38755e3..e42e70c50ef 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -22,6 +22,7 @@ namespace ErrorCodes } PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( + const String & replication_identifier, const String & remote_database_name_, const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, @@ -41,8 +42,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) { - replication_slot = fmt::format("{}_ch_replication_slot", current_database_name); - publication_name = fmt::format("{}_ch_publication", current_database_name); + replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier); + publication_name = fmt::format("{}_ch_publication", replication_identifier); startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ waitConnectionAndStart(); }); consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); @@ -402,11 +403,20 @@ void PostgreSQLReplicationHandler::shutdownFinal() pqxx::nontransaction tx(connection->getRef()); dropPublication(tx); String last_committed_lsn; - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) - dropReplicationSlot(tx, /* temporary */false); - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) - dropReplicationSlot(tx, /* temporary */true); - tx.commit(); + try + { + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) + dropReplicationSlot(tx, /* temporary */false); + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); + tx.commit(); + } + catch (Exception & e) + { + e.addMessage("while dropping replication slot: {}", replication_slot); + LOG_ERROR(log, "Failed to drop replication slot: {}. It must be dropped manually.", replication_slot); + throw; + } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 61132d0c0fc..1f8d25ab32d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -19,6 +19,7 @@ class PostgreSQLReplicationHandler { public: PostgreSQLReplicationHandler( + const String & replication_identifier, const String & remote_database_name_, const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 4a7c9655149..806b51bafba 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -63,7 +63,9 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( setInMemoryMetadata(storage_metadata); + String replication_identifier = remote_database_name + "_" + remote_table_name_; replication_handler = std::make_unique( + replication_identifier, remote_database_name, table_id_.database_name, connection_info, @@ -351,11 +353,7 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt "No columns returned for table {}.{}", table_id.database_name, table_id.table_name); } - StorageInMemoryMetadata storage_metadata; - ordinary_columns_and_types = *table_structure->columns; - storage_metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); - setInMemoryMetadata(storage_metadata); if (!table_structure->primary_key_columns && !table_structure->replica_identity_columns) { @@ -406,6 +404,17 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt create_table_query->set(create_table_query->storage, storage); + /// Add columns _sign and _version, so that they can be accessed from nested ReplacingMergeTree table if needed. + /// TODO: add test for case of database engine, test same case after table reload. + ordinary_columns_and_types.push_back({"_sign", std::make_shared()}); + ordinary_columns_and_types.push_back({"_version", std::make_shared()}); + + StorageInMemoryMetadata metadata; + metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); + metadata.setConstraints(metadata_snapshot->getConstraints()); + + setInMemoryMetadata(metadata); + return create_table_query; } diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 20d21008629..53eedbc8b7d 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -77,8 +77,9 @@ def test_initial_load_from_snapshot(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -100,8 +101,9 @@ def test_no_connection_at_startup(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -132,8 +134,9 @@ def test_detach_attach_is_ok(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -167,8 +170,9 @@ def test_replicating_insert_queries(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -208,8 +212,9 @@ def test_replicating_delete_queries(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -246,8 +251,9 @@ def test_replicating_update_queries(started_cluster): instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -276,8 +282,9 @@ def test_resume_from_written_version(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -318,12 +325,9 @@ def test_many_replication_messages(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica ( - key UInt64, value UInt64, - _sign Int8 MATERIALIZED 1, - _version UInt64 MATERIALIZED 1, - PRIMARY KEY(key)) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, PRIMARY KEY(key)) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') SETTINGS materialize_postgresql_max_block_size = 50000; @@ -376,8 +380,9 @@ def test_connection_loss(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; @@ -412,8 +417,9 @@ def test_clickhouse_restart(started_cluster): create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') @@ -439,32 +445,59 @@ def test_rename_table(started_cluster): conn = get_postgres_conn(True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL( 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; ''') + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25)") + result = instance.query('SELECT count() FROM test.postgresql_replica;') - while int(result) != 50: + while int(result) != 25: time.sleep(0.5) result = instance.query('SELECT count() FROM test.postgresql_replica;') instance.query('RENAME TABLE test.postgresql_replica TO test.postgresql_replica_renamed') + assert(int(instance.query('SELECT count() FROM test.postgresql_replica_renamed;')) == 25) + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25, 25)") result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') while int(result) != 50: time.sleep(0.5) result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50, 50)") + result = instance.query('SELECT * FROM test.postgresql_replica_renamed ORDER BY key;') + postgresql_replica_check_result(result, True) + cursor.execute('DROP TABLE postgresql_replica;') + instance.query('DROP TABLE IF EXISTS test.postgresql_replica_renamed') - result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') - while int(result) != 100: + +def test_virtual_columns(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica'); + + instance.query('DROP TABLE IF EXISTS test.postgresql_replica') + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = MaterializePostgreSQL( + 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; ''') + + instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") + result = instance.query('SELECT count() FROM test.postgresql_replica;') + while int(result) != 10: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + result = instance.query('SELECT count() FROM test.postgresql_replica;') + + # just check that it works, no check with `expected` becuase _version is taken as LSN, which will be different each time. + result = instance.query('SELECT key, value, _sign, _version FROM test.postgresql_replica;') + print(result) + cursor.execute('DROP TABLE postgresql_replica;') if __name__ == '__main__': From 85bbfd54e818b21ddf19556770b11dbbe5fe781d Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 13 May 2021 07:36:40 +0000 Subject: [PATCH 123/931] Fix bug --- .../integrations/materialize-postgresql.md | 13 ++++--- .../MaterializePostgreSQLConsumer.cpp | 5 ++- .../MaterializePostgreSQLConsumer.h | 3 ++ .../PostgreSQLReplicationHandler.cpp | 11 +++++- .../StorageMaterializePostgreSQL.cpp | 1 - .../test.py | 35 ++++++++++++++++++- 6 files changed, 60 insertions(+), 8 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/materialize-postgresql.md b/docs/en/engines/table-engines/integrations/materialize-postgresql.md index c40ea6b72db..aba1a370792 100644 --- a/docs/en/engines/table-engines/integrations/materialize-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialize-postgresql.md @@ -8,7 +8,7 @@ toc_title: MateriaziePostgreSQL ## Creating a Table {#creating-a-table} ``` sql -CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) +CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') PRIMARY KEY key; ``` @@ -25,12 +25,17 @@ PRIMARY KEY key; ## Virtual columns {#creating-a-table} -- `_version` +- `_version` (`UInt64`) -- `_sign` +- `_sign` (`Int8`) + +These columns do not need to be added, when table is created. They are always accessible in `SELECT` query. +`_version` column equals `LSN` position in `WAL`, so it might be used to check how up-to-date replication is. ``` sql -CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, _sign Int8 MATERIALIZED 1, _version UInt64 MATERIALIZED 1) +CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') PRIMARY KEY key; + +SELECT key, value, _version FROM test.postgresql_replica; ``` diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 5e41e20550e..5c4fc27a334 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -51,6 +51,9 @@ void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage { const auto storage_metadata = storage->getInMemoryMetadataPtr(); const Block sample_block = storage_metadata->getSampleBlock(); + + /// Need to clear type, because in description.init() the types are appended (emplace_back) + description.types.clear(); description.init(sample_block); columns = description.sample_block.cloneEmptyColumns(); @@ -560,7 +563,7 @@ void MaterializePostgreSQLConsumer::markTableAsSkipped(Int32 relation_id, const if (allow_automatic_update) LOG_TRACE(log, "Table {} (relation_id: {}) is skipped temporarily. It will be reloaded in the background", relation_name, relation_id); else - LOG_WARNING(log, "Table {} (relation_id: {}) is skipped, because table schema has changed", relation_name); + LOG_WARNING(log, "Table {} (relation_id: {}) is skipped, because table schema has changed", relation_name, relation_id); } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h index 43ac919f119..00523ff0ea9 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h @@ -51,7 +51,10 @@ private: { ExternalResultDescription description; MutableColumns columns; + + /// Needed to pass to insert query columns list in syncTables(). std::shared_ptr columnsAST; + /// Needed for insertPostgreSQLValue() method to parse array std::unordered_map array_info; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index e42e70c50ef..5f1a62d8086 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -523,7 +523,16 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorsetNestedStorageID(nested_table_id); nested_storage = materialized_storage->prepare(); - LOG_TRACE(log, "Updated table {}.{} ({})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid)); + + auto nested_storage_metadata = nested_storage->getInMemoryMetadataPtr(); + auto nested_sample_block = nested_storage_metadata->getSampleBlock(); + LOG_TRACE(log, "Updated table {}.{} ({}). New structure: {}", + nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid), nested_sample_block.dumpStructure()); + + auto materialized_storage_metadata = nested_storage->getInMemoryMetadataPtr(); + auto materialized_sample_block = materialized_storage_metadata->getSampleBlock(); + + assertBlocksHaveEqualStructure(nested_sample_block, materialized_sample_block, "while reloading table in the background"); /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 806b51bafba..7c20b49897f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -405,7 +405,6 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt create_table_query->set(create_table_query->storage, storage); /// Add columns _sign and _version, so that they can be accessed from nested ReplacingMergeTree table if needed. - /// TODO: add test for case of database engine, test same case after table reload. ordinary_columns_and_types.push_back({"_sign", std::make_shared()}); ordinary_columns_and_types.push_back({"_version", std::make_shared()}); diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index f16e0125efd..3526cac57e7 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -555,7 +555,40 @@ def test_single_transaction(started_cluster): assert(int(result) == 0) conn.commit() - check_tables_are_synchronized('postgresql_replica_{}'.format(0)); + check_tables_are_synchronized('postgresql_replica_0'); + instance.query("DROP DATABASE test_database") + + +def test_virtual_columns(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + create_postgres_table(cursor, 'postgresql_replica_0'); + + instance.query( + """CREATE DATABASE test_database + ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') + SETTINGS materialize_postgresql_allow_automatic_update = 1; """) + assert_nested_table_is_created('postgresql_replica_0') + instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number from numbers(10)") + check_tables_are_synchronized('postgresql_replica_0'); + + # just check that it works, no check with `expected` becuase _version is taken as LSN, which will be different each time. + result = instance.query('SELECT key, value, _sign, _version FROM test_database.postgresql_replica_0;') + print(result) + + cursor.execute("ALTER TABLE postgresql_replica_0 ADD COLUMN value2 integer") + instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(10, 10)") + check_tables_are_synchronized('postgresql_replica_0'); + + result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') + print(result) + + instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(20, 10)") + check_tables_are_synchronized('postgresql_replica_0'); + + result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') + print(result) + instance.query("DROP DATABASE test_database") From 701ff6d17dd2ccd0f80dd3253e5d4cc23e6710a6 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 13 May 2021 17:21:55 +0300 Subject: [PATCH 124/931] Save join settings for view --- src/Storages/StorageView.cpp | 11 ++++++++--- src/Storages/StorageView.h | 5 ++++- src/TableFunctions/TableFunctionView.cpp | 2 +- .../01866_view_persist_settings.reference | 8 ++++++++ .../0_stateless/01866_view_persist_settings.sql | 16 ++++++++++++++++ 5 files changed, 37 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/01866_view_persist_settings.reference create mode 100644 tests/queries/0_stateless/01866_view_persist_settings.sql diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 75bd4b2967f..113cd0b6233 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -33,8 +33,10 @@ namespace ErrorCodes StorageView::StorageView( const StorageID & table_id_, const ASTCreateQuery & query, - const ColumnsDescription & columns_) + const ColumnsDescription & columns_, + const Settings & settings) : IStorage(table_id_) + , settings_changes{{"join_use_nulls", Field(settings.join_use_nulls)}} { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -85,7 +87,10 @@ void StorageView::read( current_inner_query = query_info.view_query->clone(); } - InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, {}, column_names); + auto modified_context = Context::createCopy(context); + modified_context->applySettingsChanges(settings_changes); + + InterpreterSelectWithUnionQuery interpreter(current_inner_query, modified_context, {}, column_names); interpreter.buildQueryPlan(query_plan); /// It's expected that the columns read from storage are not constant. @@ -173,7 +178,7 @@ void registerStorageView(StorageFactory & factory) if (args.query.storage) throw Exception("Specifying ENGINE is not allowed for a View", ErrorCodes::INCORRECT_QUERY); - return StorageView::create(args.table_id, args.query, args.columns); + return StorageView::create(args.table_id, args.query, args.columns, args.getLocalContext()->getSettingsRef()); }); } diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index fa11472218d..b2a22544559 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -52,7 +52,10 @@ protected: StorageView( const StorageID & table_id_, const ASTCreateQuery & query, - const ColumnsDescription & columns_); + const ColumnsDescription & columns_, + const Settings & settings); + + SettingsChanges settings_changes; }; } diff --git a/src/TableFunctions/TableFunctionView.cpp b/src/TableFunctions/TableFunctionView.cpp index 3f51e0bbc95..d31592832bd 100644 --- a/src/TableFunctions/TableFunctionView.cpp +++ b/src/TableFunctions/TableFunctionView.cpp @@ -42,7 +42,7 @@ StoragePtr TableFunctionView::executeImpl( const ASTPtr & /*ast_function*/, ContextPtr context, const std::string & table_name, ColumnsDescription /*cached_columns*/) const { auto columns = getActualTableStructure(context); - auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns); + auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns, context->getSettingsRef()); res->startup(); return res; } diff --git a/tests/queries/0_stateless/01866_view_persist_settings.reference b/tests/queries/0_stateless/01866_view_persist_settings.reference new file mode 100644 index 00000000000..a51c6e2617d --- /dev/null +++ b/tests/queries/0_stateless/01866_view_persist_settings.reference @@ -0,0 +1,8 @@ +1 11 0 +1 12 0 +2 11 22 +2 11 23 +2 12 22 +2 12 23 +3 0 22 +3 0 23 diff --git a/tests/queries/0_stateless/01866_view_persist_settings.sql b/tests/queries/0_stateless/01866_view_persist_settings.sql new file mode 100644 index 00000000000..3e2dfb65dd2 --- /dev/null +++ b/tests/queries/0_stateless/01866_view_persist_settings.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS some_test_view; + +SET join_use_nulls = 0; + +CREATE OR REPLACE VIEW some_test_view +AS +SELECT * FROM ( SELECT arrayJoin([1, 2]) AS a, arrayJoin([11, 12]) AS b ) AS t1 +FULL JOIN ( SELECT arrayJoin([2, 3]) AS a, arrayJoin([22, 23]) AS c ) AS t2 +USING a +ORDER BY a; + +SET join_use_nulls = 1; + +SELECT * from some_test_view; + +DROP TABLE some_test_view; From 6a24e9599f10877962f92ab5e38ae36cc9896e86 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 13 May 2021 17:22:53 +0300 Subject: [PATCH 125/931] Upd comment for makeConvertingActions --- src/Interpreters/ActionsDAG.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 9862cb8708e..708d7650dca 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -206,6 +206,7 @@ public: /// Conversion should be possible with only usage of CAST function and renames. /// @param ignore_constant_values - Do not check that constants are same. Use value from result_header. /// @param add_casted_columns - Create new columns with converted values instead of replacing original. + /// @param new_names - Output parameter for new column names when add_casted_columns is used. static ActionsDAGPtr makeConvertingActions( const ColumnsWithTypeAndName & source, const ColumnsWithTypeAndName & result, From 3193e5358f8eaa68e7dd37e273a0b5a03a7b9a7d Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 14 May 2021 13:22:56 +0300 Subject: [PATCH 126/931] Save all changed settings in StorageView --- src/Storages/StorageView.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 113cd0b6233..077d78606aa 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -36,7 +36,7 @@ StorageView::StorageView( const ColumnsDescription & columns_, const Settings & settings) : IStorage(table_id_) - , settings_changes{{"join_use_nulls", Field(settings.join_use_nulls)}} + , settings_changes(settings.changes()) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); From 4a48c483ccabc6b2d5199271764cdc5658ffd0f3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 14 May 2021 13:43:17 +0300 Subject: [PATCH 127/931] Add cases to test view_persist_settings --- .../01866_view_persist_settings.reference | 26 ++++++++-- .../01866_view_persist_settings.sql | 51 +++++++++++++++---- 2 files changed, 63 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/01866_view_persist_settings.reference b/tests/queries/0_stateless/01866_view_persist_settings.reference index a51c6e2617d..986b34a5e13 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.reference +++ b/tests/queries/0_stateless/01866_view_persist_settings.reference @@ -1,8 +1,24 @@ 1 11 0 -1 12 0 -2 11 22 -2 11 23 2 12 22 -2 12 23 -3 0 22 3 0 23 +1 11 0 +2 12 22 +3 0 23 +1 11 \N +2 12 22 +3 \N 23 +1 11 \N +2 12 22 +3 \N 23 +1 11 0 +2 12 22 +3 0 23 +1 11 0 +2 12 22 +3 0 23 +1 11 \N +2 12 22 +3 \N 23 +1 11 \N +2 12 22 +3 \N 23 diff --git a/tests/queries/0_stateless/01866_view_persist_settings.sql b/tests/queries/0_stateless/01866_view_persist_settings.sql index 3e2dfb65dd2..b2158d5d179 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.sql +++ b/tests/queries/0_stateless/01866_view_persist_settings.sql @@ -1,16 +1,49 @@ -DROP TABLE IF EXISTS some_test_view; +DROP TABLE IF EXISTS view_no_nulls; +DROP TABLE IF EXISTS view_no_nulls_set; +DROP TABLE IF EXISTS view_nulls_set; +DROP TABLE IF EXISTS view_nulls; SET join_use_nulls = 0; -CREATE OR REPLACE VIEW some_test_view -AS -SELECT * FROM ( SELECT arrayJoin([1, 2]) AS a, arrayJoin([11, 12]) AS b ) AS t1 -FULL JOIN ( SELECT arrayJoin([2, 3]) AS a, arrayJoin([22, 23]) AS c ) AS t2 -USING a -ORDER BY a; +CREATE OR REPLACE VIEW view_no_nulls AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a; + +CREATE OR REPLACE VIEW view_nulls_set AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a +SETTINGS join_use_nulls = 1; SET join_use_nulls = 1; -SELECT * from some_test_view; +CREATE OR REPLACE VIEW view_nulls AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a; -DROP TABLE some_test_view; +CREATE OR REPLACE VIEW view_no_nulls_set AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a +SETTINGS join_use_nulls = 0; + +SET join_use_nulls = 1; + +SELECT * from view_no_nulls; +SELECT * from view_no_nulls_set; +SELECT * from view_nulls_set; +SELECT * from view_nulls; + +SET join_use_nulls = 0; + +SELECT * from view_no_nulls; +SELECT * from view_no_nulls_set; +SELECT * from view_nulls_set; +SELECT * from view_nulls; + +DROP TABLE IF EXISTS view_no_nulls; +DROP TABLE IF EXISTS view_no_nulls_set; +DROP TABLE IF EXISTS view_nulls_set; +DROP TABLE IF EXISTS view_nulls; From 2d6d05386a6d403f7ea720168252e53545d2b180 Mon Sep 17 00:00:00 2001 From: "d.v.semenov" Date: Wed, 24 Mar 2021 22:47:28 +0300 Subject: [PATCH 128/931] Just Works Just works (remastered) First steps First steps fixed First steps first fails Research first steps Tokenizer created Sprint to the moon Rename Rename 2.0 Rename 3.0 Work in progress Update Oops Oops x2 Try this Now surely works Maybe now? Now? Cmake first try Restore to previous Cmake second try Make this work Correct mistakes Third try cmake Exclude simd Better Try Add std::cerr More std::cerr More and more std::cerr Maybe fix? A B C D E F G H I J K L M N O P AA AB AC AD AE AF AAA AAB AAC AAD AAF AAE AAF AAG AAH AAI AAJ AAK AAAA AAAB AAAC AAAD AAAE AAAF AAAG AAAH AAAAA AAAAB First try v2 First try v2.1 First try v2.2 First try v2.3 First try v2.4 First try v2.5 First try v2.6 First try v2.7 First try v2.8 First try v2.9 First try v2.10 First try v2.11 First try v2.12 First try v2.13 First try v2.14 First try v2.15 First try v2.16 First try v2.16 First try v2.17 First try v2.18 First try v2.19 First try v2.20 First try v2.21 First try v2.22 First try v2.23 First try v2.24 First try v2.25 First try v2.26 First try v2.27 First try v2.28 First try v2.29 First try v2.30 First try v2.31 First try v2.32 First try v2.33 First try v2.34 First try v2.35 First try v2.36 First try v2.37 Second try v2.00 Second try v2.01 Second try v2.02 Second try v2.03 Second try v2.04 Second try v2.05 Second try v2.06 Second try v2.07 Second try v2.08 Second try v2.09 Second try v2.10 Second try v2.11 Second try v2.12 Second try v2.13 Second try v2.14 Second try v2.15 Second try v2.16 Second try v2.17 Cleanup Link SQLJSON only in simdjson build Fix? Fix?1.1 Fix Revert "Fix" This reverts commit 9df7aa977c880ec130062bceece7e215190b4837. Revert "Fix?1.1" This reverts commit 37429ecc9003fd73c106344186e39ff6603dde6c. Revert "Fix?" This reverts commit c1236fb8f4b5a799a5564aecf81136301f226e33. Revert "Link SQLJSON only in simdjson build" This reverts commit 8795cd8b143f3cfd312ddbf1b98e10d0d6fcaf51. Revert "Cleanup" This reverts commit e100dbc545f54421276be2e5d44f99f52fe1d87c. Third try v2.0 Third try v2.1 Third try v2.2 Third try v2.3 Third try v2.4 Third try v2.5 Third try v2.6 Third try v2.7 Third try v2.8 Third try v2.9 Third try v2.10 Third try v2.11 Third try v2.12 Third try v2.13 Third try v2.14 Third try v2.15 Pre-intermediate touches v1.0 Pre-intermediate touches v1.1 Pre-intermediate touches v1.2 Pre-intermediate touches v1.3 Last changes --- src/Functions/CMakeLists.txt | 2 + src/Functions/FunctionSQLJSON.cpp | 19 ++ src/Functions/FunctionSQLJSON.h | 277 ++++++++++++++++++ src/Functions/FunctionsJSON.h | 2 - src/Functions/JSONPath/ASTs/ASTJSONPath.h | 26 ++ .../JSONPath/ASTs/ASTJSONPathMemberAccess.h | 25 ++ .../JSONPath/ASTs/ASTJSONPathQuery.h | 23 ++ src/Functions/JSONPath/ASTs/CMakeLists.txt | 8 + src/Functions/JSONPath/CMakeLists.txt | 8 + .../JSONPath/Generators/CMakeLists.txt | 8 + .../JSONPath/Generators/GeneratorJSONPath.h | 98 +++++++ .../JSONPath/Generators/IGenerator.h | 30 ++ .../JSONPath/Generators/IGenerator_fwd.h | 16 + src/Functions/JSONPath/Generators/IVisitor.h | 40 +++ .../Generators/VisitorJSONPathMemberAccess.h | 39 +++ .../JSONPath/Generators/VisitorStatus.h | 11 + src/Functions/JSONPath/Parsers/CMakeLists.txt | 8 + .../JSONPath/Parsers/ParserJSONPath.cpp | 34 +++ .../JSONPath/Parsers/ParserJSONPath.h | 22 ++ .../Parsers/ParserJSONPathMemberAccess.cpp | 42 +++ .../Parsers/ParserJSONPathMemberAccess.h | 12 + .../JSONPath/Parsers/ParserJSONPathQuery.cpp | 39 +++ .../JSONPath/Parsers/ParserJSONPathQuery.h | 17 ++ src/Functions/RapidJSONParser.h | 3 +- src/Functions/SimdJSONParser.h | 64 +++- src/Functions/registerFunctions.cpp | 2 + src/Parsers/Lexer.cpp | 3 + src/Parsers/Lexer.h | 1 + 28 files changed, 860 insertions(+), 19 deletions(-) create mode 100644 src/Functions/FunctionSQLJSON.cpp create mode 100644 src/Functions/FunctionSQLJSON.h create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPath.h create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h create mode 100644 src/Functions/JSONPath/ASTs/CMakeLists.txt create mode 100644 src/Functions/JSONPath/CMakeLists.txt create mode 100644 src/Functions/JSONPath/Generators/CMakeLists.txt create mode 100644 src/Functions/JSONPath/Generators/GeneratorJSONPath.h create mode 100644 src/Functions/JSONPath/Generators/IGenerator.h create mode 100644 src/Functions/JSONPath/Generators/IGenerator_fwd.h create mode 100644 src/Functions/JSONPath/Generators/IVisitor.h create mode 100644 src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h create mode 100644 src/Functions/JSONPath/Generators/VisitorStatus.h create mode 100644 src/Functions/JSONPath/Parsers/CMakeLists.txt create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPath.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPath.h create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h diff --git a/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt index 1c3beb2e47d..24add0f4f0a 100644 --- a/src/Functions/CMakeLists.txt +++ b/src/Functions/CMakeLists.txt @@ -114,6 +114,8 @@ target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_url) add_subdirectory(array) target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_array) +add_subdirectory(JSONPath) + if (USE_STATS) target_link_libraries(clickhouse_functions PRIVATE stats) endif() diff --git a/src/Functions/FunctionSQLJSON.cpp b/src/Functions/FunctionSQLJSON.cpp new file mode 100644 index 00000000000..ddcca12835f --- /dev/null +++ b/src/Functions/FunctionSQLJSON.cpp @@ -0,0 +1,19 @@ +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + + +void registerFunctionsSQLJSON(FunctionFactory & factory) +{ + factory.registerFunction>(); + factory.registerFunction>(); +} + +} diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h new file mode 100644 index 00000000000..24749099e57 --- /dev/null +++ b/src/Functions/FunctionSQLJSON.h @@ -0,0 +1,277 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include + +#if !defined(ARCADIA_BUILD) +# include "config_functions.h" +#endif + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int BAD_ARGUMENTS; +} + +class FunctionSQLJSONHelpers +{ +public: + template typename Impl, class JSONParser> + class Executor + { + public: + static ColumnPtr run(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) + { + MutableColumnPtr to{result_type->createColumn()}; + to->reserve(input_rows_count); + + if (arguments.size() < 2) + { + throw Exception{"JSONPath functions require at least 2 arguments", ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION}; + } + + /// Check 1 argument: must be of type String (JSONPath) + const auto & first_column = arguments[0]; + if (!isString(first_column.type)) + { + throw Exception{ + "JSONPath functions require 1 argument to be JSONPath of type string, illegal type: " + first_column.type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + } + + /// Check 2 argument: must be of type String (JSON) + const auto & second_column = arguments[1]; + if (!isString(second_column.type)) + { + throw Exception{ + "JSONPath functions require 2 argument to be JSON of string, illegal type: " + second_column.type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + } + + /// If argument is successfully cast to (ColumnConst *) then it is quoted string + /// Example: + /// SomeFunction('some string argument') + /// + /// Otherwise it is a column + /// Example: + /// SomeFunction(database.table.column) + + /// Check 1 argument: must be const String (JSONPath) + const ColumnPtr & arg_jsonpath = first_column.column; + const auto * arg_jsonpath_const = typeid_cast(arg_jsonpath.get()); + if (!arg_jsonpath_const) + { + throw Exception{"JSONPath argument must be of type const String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + } + /// Retrieve data from 1 argument + const auto * arg_jsonpath_string = typeid_cast(arg_jsonpath_const->getDataColumnPtr().get()); + if (!arg_jsonpath_string) + { + throw Exception{"Illegal column " + arg_jsonpath->getName(), ErrorCodes::ILLEGAL_COLUMN}; + } + + /// Check 2 argument: must be const or non-const String (JSON) + const ColumnPtr & arg_json = second_column.column; + const auto * col_json_const = typeid_cast(arg_json.get()); + const auto * col_json_string + = typeid_cast(col_json_const ? col_json_const->getDataColumnPtr().get() : arg_json.get()); + + /// Get data and offsets for 1 argument (JSONPath) + const ColumnString::Chars & chars_path = arg_jsonpath_string->getChars(); + const ColumnString::Offsets & offsets_path = arg_jsonpath_string->getOffsets(); + + /// Get data and offsets for 1 argument (JSON) + const char * query_begin = reinterpret_cast(&chars_path[0]); + const char * query_end = query_begin + offsets_path[0] - 1; + + /// Tokenize query + Tokens tokens(query_begin, query_end); + /// Max depth 0 indicates that depth is not limited + IParser::Pos token_iterator(tokens, 0); + + /// Parse query and create AST tree + Expected expected; + ASTPtr res; + ParserJSONPath parser; + const bool parse_res = parser.parse(token_iterator, res, expected); + if (!parse_res) + { + throw Exception{"Unable to parse JSONPath", ErrorCodes::BAD_ARGUMENTS}; + } + + /// Get data and offsets for 1 argument (JSON) + const ColumnString::Chars & chars_json = col_json_string->getChars(); + const ColumnString::Offsets & offsets_json = col_json_string->getOffsets(); + + JSONParser json_parser; + using Element = typename JSONParser::Element; + Element document; + bool document_ok = false; + + /// Parse JSON for every row + Impl impl; + for (const auto i : ext::range(0, input_rows_count)) + { + std::string_view json{ + reinterpret_cast(&chars_json[offsets_json[i - 1]]), offsets_json[i] - offsets_json[i - 1] - 1}; + document_ok = json_parser.parse(json, document); + + bool added_to_column = false; + if (document_ok) + { + added_to_column = impl.insertResultToColumn(*to, document, res); + } + if (!added_to_column) + { + to->insertDefault(); + } + } + return to; + } + }; + +private: +}; + +template typename Impl> +class FunctionSQLJSON : public IFunction +{ +public: + static FunctionPtr create(const Context & context_) { return std::make_shared(context_); } + FunctionSQLJSON(const Context & context_) : context(context_) { } + + static constexpr auto name = Name::name; + String getName() const override { return Name::name; } + bool isVariadic() const override { return true; } + size_t getNumberOfArguments() const override { return 0; } + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + return Impl::getReturnType(Name::name, arguments); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + /// Choose JSONParser. + /// 1. Lexer(path) -> Tokens + /// 2. Create ASTPtr + /// 3. Parser(Tokens, ASTPtr) -> complete AST + /// 4. Execute functions, call interpreter for each json (in function) +#if USE_SIMDJSON + if (context.getSettingsRef().allow_simdjson) + return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); +#endif + +#if USE_RAPIDJSON + throw Exception{"RapidJSON is not supported :(", ErrorCodes::BAD_ARGUMENTS}; +#else + return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); +#endif + } + +private: + const Context & context; +}; + +struct NameSQLJSONTest +{ + static constexpr auto name{"SQLJSONTest"}; +}; + +struct NameSQLJSONMemberAccess +{ + static constexpr auto name{"SQLJSONMemberAccess"}; +}; + +/** + * Function to test logic before function calling, will be removed in final PR + * @tparam JSONParser parser + */ +template +class SQLJSONTestImpl +{ +public: + using Element = typename JSONParser::Element; + + static DataTypePtr getReturnType(const char *, const ColumnsWithTypeAndName &) { return std::make_shared(); } + + static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } + + static bool insertResultToColumn(IColumn & dest, const Element &, ASTPtr &) + { + String str = "I am working:-)"; + ColumnString & col_str = assert_cast(dest); + col_str.insertData(str.data(), str.size()); + return true; + } +}; + +/** + * Function to test jsonpath member access, will be removed in final PR + * @tparam JSONParser parser + */ +template +class SQLJSONMemberAccessImpl +{ +public: + using Element = typename JSONParser::Element; + + static DataTypePtr getReturnType(const char *, const ColumnsWithTypeAndName &) { return std::make_shared(); } + + static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } + + static bool insertResultToColumn(IColumn & dest, const Element & root, ASTPtr & query_ptr) + { + GeneratorJSONPath generator_json_path(query_ptr); + Element current_element = root; + VisitorStatus status; + while ((status = generator_json_path.getNextItem(current_element)) == VisitorStatus::Ok) + { + /// No-op + } + if (status == VisitorStatus::Error) + { + return false; + } + ColumnString & col_str = assert_cast(dest); + std::stringstream ostr; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + ostr << current_element.getElement(); + auto output_str = ostr.str(); + col_str.insertData(output_str.data(), output_str.size()); + return true; + } +}; + +} diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index f066bb1029a..581cc2015aa 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -80,8 +80,6 @@ public: const ColumnString::Chars & chars = col_json_string->getChars(); const ColumnString::Offsets & offsets = col_json_string->getOffsets(); - size_t num_index_arguments = Impl::getNumberOfIndexArguments(arguments); - std::vector moves = prepareMoves(Name::name, arguments, 1, num_index_arguments); /// Preallocate memory in parser if necessary. JSONParser parser; diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPath.h b/src/Functions/JSONPath/ASTs/ASTJSONPath.h new file mode 100644 index 00000000000..cd73cd14257 --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPath.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + +namespace DB +{ +class ASTJSONPath : public IAST +{ +public: + String getID(char) const override + { + std::cerr << "in ASTJSONPath: getID\n"; + return "ASTJSONPath"; + } + + ASTPtr clone() const override + { + std::cerr << "in " << "ASTJSONPath" << ": clone\n"; + return std::make_shared(*this); + } + + ASTJSONPathQuery * jsonpath_query; +}; + +} diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h b/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h new file mode 100644 index 00000000000..663859f566f --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace DB +{ +class ASTJSONPathMemberAccess : public IAST +{ +public: + String getID(char) const override + { + return "ASTJSONPathMemberAccess"; + } + + ASTPtr clone() const override + { + return std::make_shared(*this); + } + +public: + /// Member name to lookup in json document (in path: $.some_key.another_key. ...) + String member_name; +}; + +} diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h new file mode 100644 index 00000000000..6b952c2519d --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace DB +{ +class ASTJSONPathQuery : public IAST +{ +public: + String getID(char) const override + { + std::cerr << "in ASTJSONPathQuery: getID\n"; + return "ASTJSONPathQuery"; + } + + ASTPtr clone() const override + { + std::cerr << "in " << getID(' ') << ": clone\n"; + return std::make_shared(*this); + } +}; + +} diff --git a/src/Functions/JSONPath/ASTs/CMakeLists.txt b/src/Functions/JSONPath/ASTs/CMakeLists.txt new file mode 100644 index 00000000000..c671dbbc001 --- /dev/null +++ b/src/Functions/JSONPath/ASTs/CMakeLists.txt @@ -0,0 +1,8 @@ +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") +add_headers_and_sources(clickhouse_functions_jsonpath_asts .) +add_library(clickhouse_functions_jsonpath_asts ${clickhouse_functions_jsonpath_asts_sources} ${clickhouse_functions_jsonpath_asts_headers}) +target_link_libraries(clickhouse_functions_jsonpath_asts PRIVATE dbms) + +if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) + target_compile_options(clickhouse_functions_jsonpath_asts PRIVATE "-g0") +endif() \ No newline at end of file diff --git a/src/Functions/JSONPath/CMakeLists.txt b/src/Functions/JSONPath/CMakeLists.txt new file mode 100644 index 00000000000..8a46909f555 --- /dev/null +++ b/src/Functions/JSONPath/CMakeLists.txt @@ -0,0 +1,8 @@ +add_subdirectory(ASTs) +target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_asts) + +add_subdirectory(Generators) +target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_generators) + +add_subdirectory(Parsers) +target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_parsers) \ No newline at end of file diff --git a/src/Functions/JSONPath/Generators/CMakeLists.txt b/src/Functions/JSONPath/Generators/CMakeLists.txt new file mode 100644 index 00000000000..0d1a289e8b4 --- /dev/null +++ b/src/Functions/JSONPath/Generators/CMakeLists.txt @@ -0,0 +1,8 @@ +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") +add_headers_and_sources(clickhouse_functions_jsonpath_generators .) +add_library(clickhouse_functions_jsonpath_generators ${clickhouse_functions_jsonpath_generators_sources} ${clickhouse_functions_jsonpath_generators_headers}) +target_link_libraries(clickhouse_functions_jsonpath_generators PRIVATE dbms) + +if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) + target_compile_options(clickhouse_functions_jsonpath_generators PRIVATE "-g0") +endif() \ No newline at end of file diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h new file mode 100644 index 00000000000..dd4354a4613 --- /dev/null +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -0,0 +1,98 @@ +#include +#include +#include + +#include + +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +template +class GeneratorJSONPath : public IGenerator +{ +public: + GeneratorJSONPath(ASTPtr query_ptr_) + { + query_ptr = query_ptr_; + const auto * path = query_ptr->as(); + if (!path) { + throw Exception("Invalid path", ErrorCodes::LOGICAL_ERROR); + } + const auto * query = path->jsonpath_query; + if (!path || !query) + { + throw Exception("Something went terribly wrong", ErrorCodes::LOGICAL_ERROR); + } + + for (auto child_ast : query->children) + { + if (child_ast->getID() == "ASTJSONPathMemberAccess") + { + auto member_access_generator = std::make_shared>(child_ast); + if (member_access_generator) { + visitors.push_back(member_access_generator); + } else { + throw Exception("member_access_generator could not be nullptr", ErrorCodes::LOGICAL_ERROR); + } + } + } + } + + const char * getName() const override { return "GeneratorJSONPath"; } + + /** + * The only generator which is called from JSONPath functions. + * @param element root of JSON document + * @return is the generator exhausted + */ + VisitorStatus getNextItem(typename JSONParser::Element & element) override + { + if (visitors[current_visitor]->isExhausted()) { + if (!backtrace()) { + return VisitorStatus::Exhausted; + } + } + + /// Apply all non-exhausted visitors + for (int i = 0; i < current_visitor; ++i) { + VisitorStatus status = visitors[i]->apply(element); + /// on fail return immediately + if (status == VisitorStatus::Error) { + return status; + } + } + + /// Visit newly initialized (for the first time or through reinitialize) visitors + for (size_t i = current_visitor; i < visitors.size(); ++i) { + VisitorStatus status = visitors[i]->visit(element); + current_visitor = i; + /// on fail return immediately + if (status == VisitorStatus::Error) { + return status; + } + } + return VisitorStatus::Ok; + } + +private: + bool backtrace() { + while (current_visitor >= 0 && visitors[current_visitor]->isExhausted()) { + visitors[current_visitor]->reinitialize(); + current_visitor--; + } + return current_visitor >= 0; + } + + int current_visitor = 0; + ASTPtr query_ptr; + VisitorList visitors; +}; + +} // namespace DB diff --git a/src/Functions/JSONPath/Generators/IGenerator.h b/src/Functions/JSONPath/Generators/IGenerator.h new file mode 100644 index 00000000000..31d9e167f24 --- /dev/null +++ b/src/Functions/JSONPath/Generators/IGenerator.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +template +class IGenerator +{ +public: + IGenerator() = default; + + virtual const char * getName() const = 0; + + /** + * Used to yield next element in JSONPath query. Does so by recursively calling getNextItem + * on its children Generators one by one. + * + * @param element to be extracted into + * @return true if generator is not exhausted + */ + virtual VisitorStatus getNextItem(typename JSONParser::Element & element) = 0; + + virtual ~IGenerator() = default; +}; + +} // namespace DB diff --git a/src/Functions/JSONPath/Generators/IGenerator_fwd.h b/src/Functions/JSONPath/Generators/IGenerator_fwd.h new file mode 100644 index 00000000000..27c3976b95b --- /dev/null +++ b/src/Functions/JSONPath/Generators/IGenerator_fwd.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace DB { + +template +class IGenerator; + +template +using IVisitorPtr = std::shared_ptr>; + +template +using VisitorList = std::vector>; + +} // namespace DB diff --git a/src/Functions/JSONPath/Generators/IVisitor.h b/src/Functions/JSONPath/Generators/IVisitor.h new file mode 100644 index 00000000000..fdd254478a5 --- /dev/null +++ b/src/Functions/JSONPath/Generators/IVisitor.h @@ -0,0 +1,40 @@ +#pragma once + +#include + +namespace DB { + +template +class IVisitor { +public: + /** + * Applies this visitor to document and mutates its state + * @param element simdjson element + */ + virtual VisitorStatus visit(typename JSONParser::Element & element) = 0; + + /** + * Applies this visitor to document, but does not mutate state + * @param element simdjson element + */ + virtual VisitorStatus apply(typename JSONParser::Element & element) const = 0; + + /** + * Restores visitor's initial state for later use + */ + virtual void reinitialize() = 0; + + bool isExhausted() { + return is_exhausted; + } + + void setExhausted(bool exhausted) { + is_exhausted = exhausted; + } + + virtual ~IVisitor() = default; +private: + bool is_exhausted = false; +}; + +} // namespace DB diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h new file mode 100644 index 00000000000..50b814eeaeb --- /dev/null +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h @@ -0,0 +1,39 @@ +#include +#include +#include + +namespace DB +{ +template +class VisitorJSONPathMemberAccess : public IVisitor +{ +public: + VisitorJSONPathMemberAccess(ASTPtr member_access_ptr_) : member_access_ptr(member_access_ptr_) { } + + VisitorStatus apply(typename JSONParser::Element & element) const override { + const auto * member_access = member_access_ptr->as(); + typename JSONParser::Element result; + bool result_ok = element.getObject().find(std::string_view(member_access->member_name), result); + if (result_ok) + { + element = result; + return VisitorStatus::Ok; + } + return VisitorStatus::Error; + } + + VisitorStatus visit(typename JSONParser::Element & element) override + { + this->setExhausted(true); + return apply(element); + } + + void reinitialize() override { + this->setExhausted(false); + } + +private: + ASTPtr member_access_ptr; +}; + +} // namespace DB diff --git a/src/Functions/JSONPath/Generators/VisitorStatus.h b/src/Functions/JSONPath/Generators/VisitorStatus.h new file mode 100644 index 00000000000..51d795efbf7 --- /dev/null +++ b/src/Functions/JSONPath/Generators/VisitorStatus.h @@ -0,0 +1,11 @@ +#pragma once + +namespace DB { + +enum VisitorStatus { + Ok, + Exhausted, + Error +}; + +} diff --git a/src/Functions/JSONPath/Parsers/CMakeLists.txt b/src/Functions/JSONPath/Parsers/CMakeLists.txt new file mode 100644 index 00000000000..f2f94298576 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/CMakeLists.txt @@ -0,0 +1,8 @@ +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") +add_headers_and_sources(clickhouse_functions_jsonpath_parsers .) +add_library(clickhouse_functions_jsonpath_parsers ${clickhouse_functions_jsonpath_parsers_sources} ${clickhouse_functions_jsonpath_parsers_headers}) +target_link_libraries(clickhouse_functions_jsonpath_parsers PRIVATE dbms) + +if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) + target_compile_options(clickhouse_functions_jsonpath_parsers PRIVATE "-g0") +endif() \ No newline at end of file diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp new file mode 100644 index 00000000000..bf62f44fade --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp @@ -0,0 +1,34 @@ +#include + +#include + +#include + +#include + +namespace DB +{ + +/** + * Entry parser for JSONPath + */ +bool ParserJSONPath::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + auto ast_jsonpath = std::make_shared(); + ParserJSONPathQuery parser_jsonpath_query; + + /// Push back dot AST and brackets AST to query->children + ASTPtr query; + + bool res = parser_jsonpath_query.parse(pos, query, expected); + + if (res) { + /// Set ASTJSONPathQuery of ASTJSONPath + ast_jsonpath->set(ast_jsonpath->jsonpath_query, query); + } + + node = ast_jsonpath; + return res; +} + +} // namespace DB diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPath.h b/src/Functions/JSONPath/Parsers/ParserJSONPath.h new file mode 100644 index 00000000000..5defc76b515 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPath.h @@ -0,0 +1,22 @@ +#pragma once + +#include + + +namespace DB +{ + +/** + * Entry parser for JSONPath + */ +class ParserJSONPath : public IParserBase +{ +private: + const char * getName() const override { return "ParserJSONPath"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +public: + explicit ParserJSONPath() = default; +}; + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp new file mode 100644 index 00000000000..10ae128616b --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp @@ -0,0 +1,42 @@ +#include +#include + +#include +#include +#include + +namespace DB +{ + +/** + * + * @param pos token iterator + * @param node node of ASTJSONPathMemberAccess + * @param expected stuff for logging + * @return was parse successful + */ +bool ParserJSONPathMemberAccess::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + auto member_access = std::make_shared(); + node = member_access; + if (pos->type != TokenType::Dot) { + return false; + } + ++pos; + + if (pos->type != TokenType::BareWord) { + return false; + } + ParserIdentifier name_p; + ASTPtr member_name; + if (!name_p.parse(pos, member_name, expected)) { + return false; + } + + if (!tryGetIdentifierNameInto(member_name, member_access->member_name)) { + return false; + } + return true; +} + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h new file mode 100644 index 00000000000..49fda6f1ac8 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h @@ -0,0 +1,12 @@ +#include + +namespace DB +{ +class ParserJSONPathMemberAccess : public IParserBase +{ + const char * getName() const override {return "ParserJSONPathMemberAccess";} + + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp new file mode 100644 index 00000000000..c0831780fc4 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp @@ -0,0 +1,39 @@ +#include + +#include + +#include + +namespace DB + +{ +/** + * + * @param pos token iterator + * @param query node of ASTJSONPathQuery + * @param expected stuff for logging + * @return was parse successful + */ +bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expected) +{ + query = std::make_shared(); + ParserJSONPathMemberAccess parser_jsonpath_member_access; + + if (pos->type != TokenType::DollarSign) { + return false; + } + ++pos; + + bool res = false; + ASTPtr member_access; + while (parser_jsonpath_member_access.parse(pos, member_access, expected)) + { + query->children.push_back(member_access); + member_access = nullptr; + res = true; + } + /// true in case of at least one success + return res; +} + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h new file mode 100644 index 00000000000..cffec125c70 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h @@ -0,0 +1,17 @@ +#pragma once + +#include + + +namespace DB +{ +class ParserJSONPathQuery : public IParserBase +{ +protected: + const char * getName() const override { return "ParserJSONPathQuery"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +private: + /// backlog: strict or lax mode +}; +} diff --git a/src/Functions/RapidJSONParser.h b/src/Functions/RapidJSONParser.h index 992480d64f7..5604a8c9fe0 100644 --- a/src/Functions/RapidJSONParser.h +++ b/src/Functions/RapidJSONParser.h @@ -12,7 +12,6 @@ namespace DB { - /// This class can be used as an argument for the template class FunctionJSON. /// It provides ability to parse JSONs using rapidjson library. struct RapidJSONParser @@ -45,6 +44,8 @@ struct RapidJSONParser Array getArray() const; Object getObject() const; + ALWAYS_INLINE rapidjson::Value * getDom() const { return nullptr; } + private: const rapidjson::Value * ptr = nullptr; }; diff --git a/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h index a9adfa27e2c..2dd952d920f 100644 --- a/src/Functions/SimdJSONParser.h +++ b/src/Functions/SimdJSONParser.h @@ -5,10 +5,10 @@ #endif #if USE_SIMDJSON -# include +# include # include # include -# include +# include namespace DB @@ -30,8 +30,8 @@ struct SimdJSONParser class Element { public: - ALWAYS_INLINE Element() {} - ALWAYS_INLINE Element(const simdjson::dom::element & element_) : element(element_) {} + ALWAYS_INLINE Element() { } + ALWAYS_INLINE Element(const simdjson::dom::element & element_) : element(element_) { } ALWAYS_INLINE bool isInt64() const { return element.type() == simdjson::dom::element_type::INT64; } ALWAYS_INLINE bool isUInt64() const { return element.type() == simdjson::dom::element_type::UINT64; } @@ -50,6 +50,8 @@ struct SimdJSONParser ALWAYS_INLINE Array getArray() const; ALWAYS_INLINE Object getObject() const; + ALWAYS_INLINE simdjson::dom::element getElement() const { return element; } + private: simdjson::dom::element element; }; @@ -61,21 +63,35 @@ struct SimdJSONParser class Iterator { public: - ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) {} + ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) { } ALWAYS_INLINE Element operator*() const { return *it; } - ALWAYS_INLINE Iterator & operator++() { ++it; return *this; } - ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; } + ALWAYS_INLINE Iterator & operator++() + { + ++it; + return *this; + } + ALWAYS_INLINE Iterator operator++(int) + { + auto res = *this; + ++it; + return res; + } ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; } ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); } + private: simdjson::dom::array::iterator it; }; - ALWAYS_INLINE Array(const simdjson::dom::array & array_) : array(array_) {} + ALWAYS_INLINE Array(const simdjson::dom::array & array_) : array(array_) { } ALWAYS_INLINE Iterator begin() const { return array.begin(); } ALWAYS_INLINE Iterator end() const { return array.end(); } ALWAYS_INLINE size_t size() const { return array.size(); } - ALWAYS_INLINE Element operator[](size_t index) const { assert(index < size()); return array.at(index).first; } + ALWAYS_INLINE Element operator[](size_t index) const + { + assert(index < size()); + return array.at(index).first; + } private: simdjson::dom::array array; @@ -90,17 +106,31 @@ struct SimdJSONParser class Iterator { public: - ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) {} - ALWAYS_INLINE KeyValuePair operator*() const { const auto & res = *it; return {res.key, res.value}; } - ALWAYS_INLINE Iterator & operator++() { ++it; return *this; } - ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; } + ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) { } + ALWAYS_INLINE KeyValuePair operator*() const + { + const auto & res = *it; + return {res.key, res.value}; + } + ALWAYS_INLINE Iterator & operator++() + { + ++it; + return *this; + } + ALWAYS_INLINE Iterator operator++(int) + { + auto res = *this; + ++it; + return res; + } ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; } ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); } + private: simdjson::dom::object::iterator it; }; - ALWAYS_INLINE Object(const simdjson::dom::object & object_) : object(object_) {} + ALWAYS_INLINE Object(const simdjson::dom::object & object_) : object(object_) { } ALWAYS_INLINE Iterator begin() const { return object.begin(); } ALWAYS_INLINE Iterator end() const { return object.end(); } ALWAYS_INLINE size_t size() const { return object.size(); } @@ -126,6 +156,8 @@ struct SimdJSONParser return {res.key, res.value}; } + ALWAYS_INLINE simdjson::dom::object getDom() const { return object; } + private: simdjson::dom::object object; }; @@ -145,8 +177,8 @@ struct SimdJSONParser void reserve(size_t max_size) { if (parser.allocate(max_size) != simdjson::error_code::SUCCESS) - throw Exception{"Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON", - ErrorCodes::CANNOT_ALLOCATE_MEMORY}; + throw Exception{ + "Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON", ErrorCodes::CANNOT_ALLOCATE_MEMORY}; } private: diff --git a/src/Functions/registerFunctions.cpp b/src/Functions/registerFunctions.cpp index d827cc40a86..116a2ae0324 100644 --- a/src/Functions/registerFunctions.cpp +++ b/src/Functions/registerFunctions.cpp @@ -40,6 +40,7 @@ void registerFunctionsGeo(FunctionFactory &); void registerFunctionsIntrospection(FunctionFactory &); void registerFunctionsNull(FunctionFactory &); void registerFunctionsJSON(FunctionFactory &); +void registerFunctionsSQLJSON(FunctionFactory &); void registerFunctionsConsistentHashing(FunctionFactory & factory); void registerFunctionsUnixTimestamp64(FunctionFactory & factory); void registerFunctionBitHammingDistance(FunctionFactory & factory); @@ -97,6 +98,7 @@ void registerFunctions() registerFunctionsGeo(factory); registerFunctionsNull(factory); registerFunctionsJSON(factory); + registerFunctionsSQLJSON(factory); registerFunctionsIntrospection(factory); registerFunctionsConsistentHashing(factory); registerFunctionsUnixTimestamp64(factory); diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index ffa8250a3f3..33cecab208f 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -240,6 +240,9 @@ Token Lexer::nextTokenImpl() case '*': ++pos; return Token(TokenType::Asterisk, token_begin, pos); + case '$': + ++pos; + return Token(TokenType::DollarSign, token_begin, pos); case '/': /// division (/) or start of comment (//, /*) { ++pos; diff --git a/src/Parsers/Lexer.h b/src/Parsers/Lexer.h index dc1c9824b6b..e9885d8df83 100644 --- a/src/Parsers/Lexer.h +++ b/src/Parsers/Lexer.h @@ -33,6 +33,7 @@ namespace DB \ M(Asterisk) /** Could be used as multiplication operator or on it's own: "SELECT *" */ \ \ + M(DollarSign) \ M(Plus) \ M(Minus) \ M(Slash) \ From 1ee77eae8982f7859c07c899423309cad155fd65 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 16 May 2021 21:35:37 +0000 Subject: [PATCH 129/931] Better way to distinguish between user query and replication thread query --- .../PostgreSQL/DatabaseMaterializePostgreSQL.cpp | 13 ++++--------- src/Interpreters/Context.h | 6 ++++++ .../PostgreSQL/MaterializePostgreSQLConsumer.cpp | 3 +-- .../PostgreSQL/StorageMaterializePostgreSQL.cpp | 4 +--- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp index cbedc98fc3d..3cb5b7d7c55 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp @@ -118,8 +118,7 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte /// to its nested ReplacingMergeTree table (in all other cases), the context of a query os modified. /// Also if materialzied_tables set is empty - it means all access is done to ReplacingMergeTree tables - it is a case after /// replication_handler was shutdown. - if ((local_context->hasQueryContext() && local_context->getQueryContext()->getQueryFactoriesInfo().storages.count("ReplacingMergeTree")) - || materialized_tables.empty()) + if (local_context->isInternalQuery() || materialized_tables.empty()) { return DatabaseAtomic::tryGetTable(name, local_context); } @@ -143,14 +142,10 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & table, const ASTPtr & query) { /// Create table query can only be called from replication thread. - if (local_context->hasQueryContext()) + if (local_context->isInternalQuery()) { - auto storage_set = local_context->getQueryContext()->getQueryFactoriesInfo().storages; - if (storage_set.find("ReplacingMergeTree") != storage_set.end()) - { - DatabaseAtomic::createTable(local_context, table_name, table, query); - return; - } + DatabaseAtomic::createTable(local_context, table_name, table, query); + return; } throw Exception(ErrorCodes::NOT_IMPLEMENTED, diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index a8fd0cf1700..de1d9b94ca7 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -259,6 +259,9 @@ private: /// XXX: move this stuff to shared part instead. ContextPtr buffer_context; /// Buffer context. Could be equal to this. + /// A flag, used to distinquish between user query and internal query to a database engine (MaterializePostgreSQL). + bool is_internal_query = false; + public: // Top-level OpenTelemetry trace context for the query. Makes sense only for a query context. OpenTelemetryTraceContext query_trace_context; @@ -728,6 +731,9 @@ public: void shutdown(); + bool isInternalQuery() const { return is_internal_query; } + void setInternalQuery(bool internal) { is_internal_query = internal; } + ActionLocksManagerPtr getActionLocksManager(); enum class ApplicationType diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index 5c4fc27a334..418a95ae078 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -475,8 +475,7 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptrcolumns = buffer.columnsAST; auto insert_context = Context::createCopy(context); - insert_context->makeQueryContext(); - insert_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); + insert_context->setInternalQuery(true); InterpreterInsertQuery interpreter(insert, insert_context, true); auto block_io = interpreter.execute(); diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 7c20b49897f..1b99b2eabfb 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -173,9 +173,7 @@ void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructure std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) { auto new_context = Context::createCopy(from_context); - new_context->makeQueryContext(); - new_context->addQueryFactoriesInfo(Context::QueryLogFactories::Storage, "ReplacingMergeTree"); - + new_context->setInternalQuery(true); return new_context; } From d3a9d6633fdcec1174287a8439ad73de5cb237c9 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 18 May 2021 11:34:13 +0300 Subject: [PATCH 130/931] Rename uniqThetaSketch to uniqTheta --- tests/performance/uniq.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index f688f1d5a9d..b373bccd938 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -46,7 +46,7 @@ uniqUpTo(10) uniqUpTo(25) uniqUpTo(100) - uniqThetaSketch + uniqTheta From 4b945321368ac2749b825ab8559b0c5a476938b5 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 18 May 2021 17:34:11 +0300 Subject: [PATCH 131/931] Adjust query with SearchPhrase --- tests/performance/uniq.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index b373bccd938..52dfefb902b 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -23,7 +23,6 @@ --> SearchEngineID RegionID - SearchPhrase ClientIP @@ -52,4 +51,5 @@ SELECT {key} AS k, {func}(UserID) FROM hits_100m_single GROUP BY k FORMAT Null + SELECT SearchPhrase AS k, uniqTheta(UserID) FROM (SELECT SearchPhrase, UserID FROM hits_100m_single LIMIT 20000000) GROUP BY k From b93d59e9310939ef1a7cbc2e9ed821f5015467ac Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 20 May 2021 11:13:27 +0300 Subject: [PATCH 132/931] Try to limit all queries to see the changes --- tests/performance/uniq.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index 52dfefb902b..378a7ee5193 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -24,6 +24,7 @@ SearchEngineID RegionID ClientIP + SearchPhrase @@ -50,6 +51,5 @@ - SELECT {key} AS k, {func}(UserID) FROM hits_100m_single GROUP BY k FORMAT Null - SELECT SearchPhrase AS k, uniqTheta(UserID) FROM (SELECT SearchPhrase, UserID FROM hits_100m_single LIMIT 20000000) GROUP BY k + SELECT {key} AS k, {func}(UserID) FROM (SELECT {key}, UserID FROM hits_100m_single LIMIT 20000000) GROUP BY k From e832296768da5eff3bf0a7916d1a9a17f78a9603 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 20 May 2021 11:14:24 +0300 Subject: [PATCH 133/931] Reorder values --- tests/performance/uniq.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index 378a7ee5193..766742f43cd 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -23,8 +23,8 @@ --> SearchEngineID RegionID - ClientIP SearchPhrase + ClientIP From 346dc65140bf2d17648bde9b59ffe204730b621a Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 20 May 2021 16:47:09 +0300 Subject: [PATCH 134/931] Save join_use_nulls in SETTINGS section for storage View --- src/Storages/StorageView.cpp | 58 +++++++++++++++++-- src/Storages/StorageView.h | 2 - .../01866_view_persist_settings.reference | 24 ++++++++ .../01866_view_persist_settings.sql | 8 ++- 4 files changed, 83 insertions(+), 9 deletions(-) diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 6c74bfdcdd7..cf9d5ab7b3d 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -29,6 +29,57 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +namespace +{ + +void addSettingsChanges(ASTPtr ast, const Settings & settings) +{ + auto * settings_ast = ast->as(); + if (!settings_ast) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "ASTSetQuery expected"); + + settings_ast->is_standalone = false; + if (settings_ast->changes.tryGet("join_use_nulls") == nullptr) + settings_ast->changes.emplace_back("join_use_nulls", Field(settings.join_use_nulls)); +} + +/// Save to AST settings from context that affects view behaviour. +void saveSettingsToAst(ASTSelectWithUnionQuery * select, const Settings & settings) +{ + /// Check SETTINGS section on the top level + if (select->settings_ast) + { + addSettingsChanges(select->settings_ast, settings); + return; + } + + /// We cannot add SETTINGS on the top level because it will clash with section from inner SELECT + /// and will got query: SELECT ... SETTINGS ... SETTINGS ... + + /// Process every select in ast and add SETTINGS section to each + for (const auto & child : select->list_of_selects->children) + { + auto * child_select = child->as(); + if (!child_select) + continue; + + ASTPtr ast_set_query = child_select->settings(); + if (ast_set_query) + { + /// Modify existing SETTINGS section + addSettingsChanges(ast_set_query, settings); + } + else + { + /// Add SETTINGS section to query + ast_set_query = std::make_shared(); + addSettingsChanges(ast_set_query, settings); + child_select->setExpression(ASTSelectQuery::Expression::SETTINGS, std::move(ast_set_query)); + } + } +} + +} StorageView::StorageView( const StorageID & table_id_, @@ -37,7 +88,6 @@ StorageView::StorageView( const String & comment, const Settings & settings) : IStorage(table_id_) - , settings_changes(settings.changes()) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -46,6 +96,7 @@ StorageView::StorageView( if (!query.select) throw Exception("SELECT query is not specified for " + getName(), ErrorCodes::INCORRECT_QUERY); + saveSettingsToAst(query.select, settings); SelectQueryDescription description; description.inner_query = query.select->ptr(); @@ -89,10 +140,7 @@ void StorageView::read( current_inner_query = query_info.view_query->clone(); } - auto modified_context = Context::createCopy(context); - modified_context->applySettingsChanges(settings_changes); - - InterpreterSelectWithUnionQuery interpreter(current_inner_query, modified_context, {}, column_names); + InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, {}, column_names); interpreter.buildQueryPlan(query_plan); /// It's expected that the columns read from storage are not constant. diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index 337fb358905..318d9708bab 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -55,8 +55,6 @@ protected: const ColumnsDescription & columns_, const String & comment, const Settings & settings); - - SettingsChanges settings_changes; }; } diff --git a/tests/queries/0_stateless/01866_view_persist_settings.reference b/tests/queries/0_stateless/01866_view_persist_settings.reference index 986b34a5e13..158c5d0a0a9 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.reference +++ b/tests/queries/0_stateless/01866_view_persist_settings.reference @@ -1,3 +1,27 @@ +SELECT + a, + b, + c +FROM +( + SELECT * + FROM + ( + SELECT + number + 1 AS a, + number + 11 AS b + FROM numbers(2) + ) AS t1 + FULL OUTER JOIN + ( + SELECT + number + 2 AS a, + number + 22 AS c + FROM numbers(2) + ) AS t2 USING (a) + ORDER BY a ASC + SETTINGS max_block_size = 666, join_use_nulls = 0 +) AS view_no_nulls 1 11 0 2 12 22 3 0 23 diff --git a/tests/queries/0_stateless/01866_view_persist_settings.sql b/tests/queries/0_stateless/01866_view_persist_settings.sql index b2158d5d179..93dcb725179 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.sql +++ b/tests/queries/0_stateless/01866_view_persist_settings.sql @@ -8,13 +8,17 @@ SET join_use_nulls = 0; CREATE OR REPLACE VIEW view_no_nulls AS SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 -USING a ORDER BY a; +USING a ORDER BY a +SETTINGS max_block_size = 666; + +-- check that max_block_size not rewriten +EXPLAIN SYNTAX SELECT * FROM view_no_nulls; CREATE OR REPLACE VIEW view_nulls_set AS SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 USING a ORDER BY a -SETTINGS join_use_nulls = 1; +SETTINGS join_use_nulls = 1, max_block_size = 666; SET join_use_nulls = 1; From aeab58fe32eae938a56122214b1d81172023a68a Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 21 May 2021 13:48:30 +0300 Subject: [PATCH 135/931] Add SETTINGS join_use_nulls to storage view tests --- .../00599_create_view_with_subquery.reference | 2 +- .../0_stateless/00916_create_or_replace_view.reference | 4 ++-- .../01076_predicate_optimizer_with_view.reference | 4 ++++ .../0_stateless/01602_show_create_view.reference | 10 +++++----- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference index d83d2837a18..ea2e00fa19a 100644 --- a/tests/queries/0_stateless/00599_create_view_with_subquery.reference +++ b/tests/queries/0_stateless/00599_create_view_with_subquery.reference @@ -1 +1 @@ -CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = \n(\n SELECT 1\n) +CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = \n(\n SELECT 1\n)\nSETTINGS join_use_nulls = 0 diff --git a/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference index 50323e47556..31b08b602f8 100644 --- a/tests/queries/0_stateless/00916_create_or_replace_view.reference +++ b/tests/queries/0_stateless/00916_create_or_replace_view.reference @@ -1,2 +1,2 @@ -CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers -CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers +CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers\nSETTINGS join_use_nulls = 0 +CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers\nSETTINGS join_use_nulls = 0 diff --git a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference index dfab41b5e4c..e855fa66225 100644 --- a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference +++ b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference @@ -8,6 +8,7 @@ FROM SELECT * FROM default.test HAVING id = 1 + SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 1 SELECT @@ -20,6 +21,7 @@ FROM SELECT * FROM default.test HAVING id = 2 + SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 2 SELECT id @@ -28,6 +30,7 @@ FROM SELECT * FROM default.test HAVING id = 1 + SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 1 SELECT id @@ -36,5 +39,6 @@ FROM SELECT * FROM default.test HAVING id = 1 + SETTINGS join_use_nulls = 0 ) AS s WHERE id = 1 diff --git a/tests/queries/0_stateless/01602_show_create_view.reference b/tests/queries/0_stateless/01602_show_create_view.reference index 5d4bd2cd972..2130834910c 100644 --- a/tests/queries/0_stateless/01602_show_create_view.reference +++ b/tests/queries/0_stateless/01602_show_create_view.reference @@ -1,7 +1,7 @@ -CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 CREATE MATERIALIZED VIEW test_1602.vv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nENGINE = MergeTree\nPARTITION BY toYYYYMM(EventDate)\nORDER BY (CounterID, EventDate, intHash32(UserID))\nSETTINGS index_granularity = 8192 AS\nSELECT *\nFROM test_1602.tbl CREATE LIVE VIEW test_1602.vvv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.TABLE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 +CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 +CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 +CREATE VIEW test_1602.TABLE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 From 9758e46c3c8978ed25b8d296e59905b665699811 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 21 May 2021 17:46:28 +0300 Subject: [PATCH 136/931] Add TODO --- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 1f8d25ab32d..db29a2b4aaa 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -5,6 +5,10 @@ #include +/// TODO: Add test for multiple databases to be replicated. Add test to make sure unneeded tables fo not get into replication slot. +/// Test behavior of publication. + + namespace DB { From d8f8fb2ae573da122d1bf54ba5024eac25a5d01f Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 22 May 2021 09:54:03 +0000 Subject: [PATCH 137/931] Some decorative changes --- src/Common/DiskStatisticsOS.cpp | 25 +++++----- src/Common/DiskStatisticsOS.h | 7 +-- src/Common/MemoryInfoOS.cpp | 39 ++++++++------- src/Common/MemoryInfoOS.h | 7 +-- src/Common/ProcessorStatisticsOS.cpp | 74 ++++++++++++++-------------- src/Common/ProcessorStatisticsOS.h | 7 +-- 6 files changed, 75 insertions(+), 84 deletions(-) diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp index 40ba15ac6b8..3654f843c3a 100644 --- a/src/Common/DiskStatisticsOS.cpp +++ b/src/Common/DiskStatisticsOS.cpp @@ -17,31 +17,32 @@ namespace ErrorCodes extern const int CANNOT_STATVFS; } +namespace +{ + void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); + } +} + static constexpr auto mounts_filename = "/proc/mounts"; static constexpr std::size_t READ_BUFFER_BUF_SIZE = (64 << 10); -void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} - -DiskStatisticsOS::DiskStatisticsOS() - : mounts_in(mounts_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) -{} +DiskStatisticsOS::DiskStatisticsOS() {} DiskStatisticsOS::~DiskStatisticsOS() {} DiskStatisticsOS::Data DiskStatisticsOS::get() { - mounts_in.seek(0, SEEK_SET); + ReadBufferFromFile mounts_in(mounts_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); DiskStatisticsOS::Data data = {0, 0}; while (!mounts_in.eof()) { - String filesystem = readNextFilesystem(); + String filesystem = readNextFilesystem(mounts_in); struct statvfs stat; @@ -60,7 +61,7 @@ DiskStatisticsOS::Data DiskStatisticsOS::get() return data; } -String DiskStatisticsOS::readNextFilesystem() +String DiskStatisticsOS::readNextFilesystem(ReadBuffer& mounts_in) { String filesystem, unused; diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h index a1c260f24c3..d14cf273ccd 100644 --- a/src/Common/DiskStatisticsOS.h +++ b/src/Common/DiskStatisticsOS.h @@ -9,7 +9,7 @@ namespace DB { -/** Opens file /proc/mounts. Keeps it open, reads all mounted filesytems and +/** Opens file /proc/mounts, reads all mounted filesytems and * calculates disk usage. */ class DiskStatisticsOS @@ -27,10 +27,7 @@ public: Data get(); private: - String readNextFilesystem(); - -private: - ReadBufferFromFile mounts_in; + String readNextFilesystem(ReadBuffer& mounts_in); }; } diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index 5eb2501e322..b8641809ae9 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -14,32 +14,33 @@ namespace DB { +namespace +{ + template + void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) + { + readIntText(x, buf); + skipWhitespaceIfAny(buf); + } + + void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); + } +} + static constexpr auto meminfo_filename = "/proc/meminfo"; static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); -void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} - -template -void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} - -MemoryInfoOS::MemoryInfoOS() - : meminfo_in(meminfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) -{} +MemoryInfoOS::MemoryInfoOS() {} MemoryInfoOS::~MemoryInfoOS() {} MemoryInfoOS::Data MemoryInfoOS::get() { - meminfo_in.seek(0, SEEK_SET); + ReadBufferFromFile meminfo_in(meminfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); MemoryInfoOS::Data data; String field_name; @@ -47,7 +48,7 @@ MemoryInfoOS::Data MemoryInfoOS::get() std::unordered_map meminfo; while (!meminfo_in.eof()) - meminfo.insert(readField()); + meminfo.insert(readField(meminfo_in)); data.total = meminfo["MemTotal"]; data.free = meminfo["MemFree"]; @@ -62,7 +63,7 @@ MemoryInfoOS::Data MemoryInfoOS::get() return data; } -std::pair MemoryInfoOS::readField() +std::pair MemoryInfoOS::readField(ReadBuffer& meminfo_in) { String key; uint64_t val; diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index e1bf1dcfde4..a868d4bc23d 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -12,11 +12,10 @@ namespace DB { -/** Opens file /proc/meminfo. Keeps it open and reads statistics about memory usage. +/** Opens file /proc/meminfo and reads statistics about memory usage. * This is Linux specific. * See: man procfs */ - class MemoryInfoOS { public: @@ -39,9 +38,7 @@ public: Data get(); private: - ReadBufferFromFile meminfo_in; - - std::pair readField(); + std::pair readField(ReadBuffer& meminfo_in); }; } diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index d7d308916b7..78353cfeeab 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -29,6 +29,40 @@ namespace ErrorCodes extern const int CANNOT_CLOSE_FILE; } +namespace +{ + template + void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) + { + readIntText(x, buf); + skipWhitespaceIfAny(buf); + } + + void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readString(s, buf); + skipWhitespaceIfAny(buf); + } + + void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); + } + + void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) + { + readChar(c, buf); + skipWhitespaceIfAny(buf); + } + + void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) + { + readFloatText(f, buf); + skipWhitespaceIfAny(buf); + } +} + static constexpr auto loadavg_filename = "/proc/loadavg"; static constexpr auto procst_filename = "/proc/stat"; static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; @@ -37,41 +71,7 @@ static const uint64_t USER_HZ = static_cast(sysconf(_SC_CLK_TCK)); static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); -template -void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} - -void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readString(s, buf); - skipWhitespaceIfAny(buf); -} - -void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} - -void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) -{ - readChar(c, buf); - skipWhitespaceIfAny(buf); -} - -void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) -{ - readFloatText(f, buf); - skipWhitespaceIfAny(buf); -} - ProcessorStatisticsOS::ProcessorStatisticsOS() - : loadavg_in(loadavg_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) - , procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) - , cpuinfo_in(cpuinfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC) { ProcStLoad unused; calcStLoad(unused); @@ -90,7 +90,7 @@ ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() void ProcessorStatisticsOS::readLoadavg(ProcLoadavg& loadavg) { - loadavg_in.seek(0, SEEK_SET); + ReadBufferFromFile loadavg_in(loadavg_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); readFloatAndSkipWhitespaceIfAny(loadavg.avg1, loadavg_in); readFloatAndSkipWhitespaceIfAny(loadavg.avg5, loadavg_in); @@ -128,7 +128,7 @@ void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload) { - procst_in.seek(0, SEEK_SET); + ReadBufferFromFile procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); String field_name, field_val; uint64_t unused; @@ -173,7 +173,7 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS void ProcessorStatisticsOS::readFreq(ProcFreq & freq) { - cpuinfo_in.seek(0, SEEK_SET); + ReadBufferFromFile cpuinfo_in(cpuinfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); String field_name, field_val; char unused; diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 20ba680b6dd..ba95b006e9a 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -11,11 +11,10 @@ namespace DB { -/** Opens files: /proc/loadavg, /proc/stat, /proc/cpuinfo. Keeps it open and reads processor statistics. +/** Opens files: /proc/loadavg, /proc/stat, /proc/cpuinfo and reads processor statistics in get() method. * This is Linux specific. * See: man procfs */ - class ProcessorStatisticsOS { public: @@ -78,10 +77,6 @@ private: void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload); private: - ReadBufferFromFile loadavg_in; - ReadBufferFromFile procst_in; - ReadBufferFromFile cpuinfo_in; - std::time_t last_stload_call_time; ProcTime last_proc_time; }; From ee3223b9440f9d4ed9c4feed029ccda6e19721cd Mon Sep 17 00:00:00 2001 From: elevankoff Date: Sat, 22 May 2021 09:57:51 +0000 Subject: [PATCH 138/931] Add ProcessorStatisticsOS, MemoryInfoOS and DiskStatisticsOS --- src/Interpreters/AsynchronousMetrics.cpp | 54 ++++++++++++++++++++++++ src/Interpreters/AsynchronousMetrics.h | 7 ++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 6eb143d17df..28c7be9ea2a 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -236,6 +236,60 @@ void AsynchronousMetrics::update() } #endif + /// Process memory information according to OS +#if defined(OS_LINUX) + { + MemoryInfoOS::Data data = memory_info.get(); + + new_values["MemoryTotal"] = data.total; + new_values["MemoryFree"] = data.free; + new_values["MemoryBuffers"] = data.buffers; + new_values["MemoryCached"] = data.cached; + new_values["MemoryFreeAndCached"] = data.free_and_cached; + new_values["MemorySwapTotal"] = data.swap_total; + new_values["MemorySwapFree"] = data.swap_free; + new_values["MemorySwapCached"] = data.swap_cached; + } +#endif + + /// Process processor usage according to OS +#if defined(OS_LINUX) + { + ProcessorStatisticsOS::Data data = proc_stat.get(); + + new_values["LoadAvg1"] = data.loadavg.avg1; + new_values["LoadAvg5"] = data.loadavg.avg5; + new_values["LoadAvg15"] = data.loadavg.avg15; + + new_values["FreqMin"] = data.freq.min; + new_values["FreqMax"] = data.freq.max; + new_values["FreqAvg"] = data.freq.avg; + + new_values["TimeLoadUser"] = data.stload.user_time; + new_values["TimeLoadNice"] = data.stload.nice_time; + new_values["TimeLoadSystem"] = data.stload.system_time; + new_values["TimeLoadIDLE"] = data.stload.idle_time; + new_values["TimeLoadIowait"] = data.stload.iowait_time; + new_values["TimeLoadSteal"] = data.stload.steal_time; + new_values["TimeLoadGuest"] = data.stload.guest_time; + new_values["TimeLoadGuestNice"] = data.stload.guest_nice_time; + + new_values["Processess"] = data.stload.processes; + new_values["ProcessesRunning"] = data.stload.procs_running; + new_values["ProcessesBlocked"] = data.stload.procs_blocked; + } +#endif + + /// Process disk usage according to OS +#if defined(OS_LINUX) + { + DiskStatisticsOS::Data data = disk_stat.get(); + + new_values["DiskTotal"] = data.total; + new_values["DiskUsed"] = data.used; + } +#endif + { auto databases = DatabaseCatalog::instance().getDatabases(); diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 38875c21edd..36e0fabd8a9 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -2,6 +2,9 @@ #include #include +#include +#include +#include #include #include @@ -10,7 +13,6 @@ #include #include - namespace DB { @@ -80,6 +82,9 @@ private: #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; + MemoryInfoOS memory_info; + ProcessorStatisticsOS proc_stat; + DiskStatisticsOS disk_stat; #endif std::unique_ptr thread; From f61d685ffac8e42419aaedfda1a82d979bfd9e97 Mon Sep 17 00:00:00 2001 From: vdimir Date: Sat, 22 May 2021 19:23:56 +0300 Subject: [PATCH 139/931] Fix whitespaces in 01866_view_persist_settings.reference --- .../0_stateless/01866_view_persist_settings.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01866_view_persist_settings.reference b/tests/queries/0_stateless/01866_view_persist_settings.reference index 158c5d0a0a9..529b62a4024 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.reference +++ b/tests/queries/0_stateless/01866_view_persist_settings.reference @@ -2,17 +2,17 @@ SELECT a, b, c -FROM +FROM ( SELECT * - FROM + FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 - FULL OUTER JOIN + FULL OUTER JOIN ( SELECT number + 2 AS a, From da32f661228ea9bf516ee418b7270f4c4fedfd05 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 23 May 2021 12:09:20 +0000 Subject: [PATCH 140/931] More tests --- src/Core/PostgreSQL/insertPostgreSQLValue.cpp | 2 +- .../PostgreSQLReplicationHandler.cpp | 18 +- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 - .../test.py | 213 +++++++++++++----- 4 files changed, 161 insertions(+), 76 deletions(-) diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index 07f2404fdc3..26138bafb92 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -114,7 +114,7 @@ void insertPostgreSQLValue( size_t dimension = 0, max_dimension = 0, expected_dimensions = array_info[idx].num_dimensions; const auto parse_value = array_info[idx].pqxx_parser; - std::vector> dimensions(expected_dimensions + 1); + std::vector dimensions(expected_dimensions + 1); while (parsed.first != pqxx::array_parser::juncture::done) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 5f1a62d8086..ac9ff2edbe1 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -165,21 +165,6 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } catch (Exception & e) { - if (e.code() == ErrorCodes::UNKNOWN_TABLE) - { - try - { - /// If nested table does not exist, try load it once again. - loadFromSnapshot(snapshot_name, table_name, storage->as ()); - nested_storages[table_name] = materialized_storage->prepare(); - continue; - } - catch (...) - { - e.addMessage("Table load failed for the second time"); - } - } - e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); @@ -309,6 +294,9 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo } } + if (tables_list.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No table found to be replicated"); + /// 'ONLY' means just a table, without descendants. std::string query_str = fmt::format("CREATE PUBLICATION {} FOR TABLE ONLY {}", publication_name, tables_list); try diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index db29a2b4aaa..1f8d25ab32d 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -5,10 +5,6 @@ #include -/// TODO: Add test for multiple databases to be replicated. Add test to make sure unneeded tables fo not get into replication slot. -/// Test behavior of publication. - - namespace DB { diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 3526cac57e7..c98e4ee14d8 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,9 +31,9 @@ postgres_table_template_3 = """ key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ -def get_postgres_conn(database=False, auto_commit=True): +def get_postgres_conn(database=False, auto_commit=True, database_name='postgres_database'): if database == True: - conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" + conn_string = "host='localhost' dbname='{}' user='postgres' password='mysecretpassword'".format(database_name) else: conn_string = "host='localhost' user='postgres' password='mysecretpassword'" conn = psycopg2.connect(conn_string) @@ -43,9 +43,27 @@ def get_postgres_conn(database=False, auto_commit=True): return conn -def create_postgres_db(cursor, name): +def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) +def drop_postgres_db(cursor, name='postgres_database'): + cursor.execute("DROP DATABASE IF EXISTS {}".format(name)) + +def create_clickhouse_postgres_db(name='postgres_database'): + instance.query(''' + CREATE DATABASE {} + ENGINE = PostgreSQL('postgres1:5432', '{}', 'postgres', 'mysecretpassword')'''.format(name, name)) + +def drop_clickhouse_postgres_db(name='postgres_database'): + instance.query('DROP DATABASE IF EXISTS {}'.format(name)) + +def create_materialized_db(materialized_database='test_database', postgres_database='postgres_database'): + instance.query("CREATE DATABASE {} ENGINE = MaterializePostgreSQL('postgres1:5432', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, postgres_database)) + assert materialized_database in instance.query('SHOW DATABASES') + +def drop_materialized_db(materialized_database='test_database'): + instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) + assert materialized_database not in instance.query('SHOW DATABASES') def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) @@ -53,26 +71,47 @@ def create_postgres_table(cursor, table_name, replica_identity_full=False, templ if replica_identity_full: cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) +queries = [ + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', + "UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0", + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', + 'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;', + "UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1", + 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', + 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', + 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', + "UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1", + 'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);', + 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', + "UPDATE postgresql_replica_{} SET key=key+10000000", + 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', + 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] + @pytest.mark.timeout(30) -def assert_nested_table_is_created(table_name): - database_tables = instance.query('SHOW TABLES FROM test_database') +def assert_nested_table_is_created(table_name, materialized_database='test_database'): + database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database)) while table_name not in database_tables: time.sleep(0.2) - database_tables = instance.query('SHOW TABLES FROM test_database') + database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database)) assert(table_name in database_tables) @pytest.mark.timeout(30) -def check_tables_are_synchronized(table_name, order_by='key'): - assert_nested_table_is_created(table_name) +def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database'): + assert_nested_table_is_created(table_name, materialized_database) - expected = instance.query('select * from postgres_database.{} order by {};'.format(table_name, order_by)) - result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) + expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by)) + result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) while result != expected: time.sleep(0.5) - result = instance.query('select * from test_database.{} order by {};'.format(table_name, order_by)) + result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) assert(result == expected) @@ -84,10 +123,9 @@ def started_cluster(): conn = get_postgres_conn() cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database') + create_clickhouse_postgres_db() + instance.query("DROP DATABASE IF EXISTS test_database") - instance.query(''' - CREATE DATABASE postgres_database - ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') yield cluster finally: @@ -432,7 +470,7 @@ def test_table_schema_changes(started_cluster): @pytest.mark.timeout(120) -def test_random_queries(started_cluster): +def test_many_concurrent_queries(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") conn = get_postgres_conn(True) cursor = conn.cursor() @@ -443,7 +481,7 @@ def test_random_queries(started_cluster): instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(10000)'.format(i)) n = [10000] - query = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', + query_pool = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', 'UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;', @@ -458,12 +496,12 @@ def test_random_queries(started_cluster): def attack(thread_id): print('thread {}'.format(thread_id)) k = 10000 - for i in range(10): - query_id = random.randrange(0, len(query)-1) + for i in range(20): + query_id = random.randrange(0, len(query_pool)-1) table_id = random.randrange(0, 5) # num tables # random update / delete query - cursor.execute(query[query_id].format(table_id)) + cursor.execute(query_pool[query_id].format(table_id)) print("table {} query {} ok".format(table_id, query_id)) # allow some thread to do inserts (not to violate key constraints) @@ -481,16 +519,15 @@ def test_random_queries(started_cluster): threads = [] threads_num = 16 - for i in range(threads_num): threads.append(threading.Thread(target=attack, args=(i,))) + + create_materialized_db() + for thread in threads: time.sleep(random.uniform(0, 1)) thread.start() - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") - n[0] = 50000 for table_id in range(NUM_TABLES): n[0] += 1 @@ -502,14 +539,11 @@ def test_random_queries(started_cluster): for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - count = instance.query('SELECT count() FROM test_database.postgresql_replica_{}'.format(i)) - print(count) - - for i in range(NUM_TABLES): - cursor.execute('drop table postgresql_replica_{};'.format(i)) - - instance.query("DROP DATABASE test_database") - assert 'test_database' not in instance.query('SHOW DATABASES') + count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) + count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) + assert(int(count1) == int(count2)) + print(count1, count2) + drop_materialized_db() @pytest.mark.timeout(120) @@ -520,30 +554,9 @@ def test_single_transaction(started_cluster): create_postgres_table(cursor, 'postgresql_replica_0'); conn.commit() - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") - assert_nested_table_is_created('postgresql_replica_0') - queries = [ - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', - "UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0", - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;', - "UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1", - 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', - "UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1", - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', - "UPDATE postgresql_replica_{} SET key=key+10000000", - 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', - 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] + create_materialized_db() + assert_nested_table_is_created('postgresql_replica_0') for query in queries: print('query {}'.format(query)) @@ -556,7 +569,7 @@ def test_single_transaction(started_cluster): conn.commit() check_tables_are_synchronized('postgresql_replica_0'); - instance.query("DROP DATABASE test_database") + drop_materialized_db() def test_virtual_columns(started_cluster): @@ -588,8 +601,96 @@ def test_virtual_columns(started_cluster): result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') print(result) + drop_materialized_db() - instance.query("DROP DATABASE test_database") + +def test_multiple_databases(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database_1") + instance.query("DROP DATABASE IF EXISTS test_database_2") + NUM_TABLES = 5 + + conn = get_postgres_conn() + cursor = conn.cursor() + create_postgres_db(cursor, 'postgres_database_1') + create_postgres_db(cursor, 'postgres_database_2') + + conn1 = get_postgres_conn(True, True, 'postgres_database_1') + conn2 = get_postgres_conn(True, True, 'postgres_database_2') + + cursor1 = conn1.cursor() + cursor2 = conn2.cursor() + + create_clickhouse_postgres_db('postgres_database_1') + create_clickhouse_postgres_db('postgres_database_2') + + cursors = [cursor1, cursor2] + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + create_postgres_table(cursors[cursor_id], table_name); + instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) + print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) + print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) + + create_materialized_db('test_database_1', 'postgres_database_1') + create_materialized_db('test_database_2', 'postgres_database_2') + + cursors = [cursor1, cursor2] + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) + + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + check_tables_are_synchronized( + table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); + + drop_clickhouse_postgres_db('postgres_database_1') + drop_clickhouse_postgres_db('postgres_database_2') + drop_materialized_db('test_database_1') + drop_materialized_db('test_database_2') + + +@pytest.mark.timeout(320) +def test_concurrent_transactions(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 6 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + + def transaction(thread_id): + conn_ = get_postgres_conn(True, auto_commit=False) + cursor_ = conn.cursor() + for query in queries: + cursor_.execute(query.format(thread_id)) + print('thread {}, query {}'.format(thread_id, query)) + conn_.commit() + + threads = [] + threads_num = 6 + for i in range(threads_num): + threads.append(threading.Thread(target=transaction, args=(i,))) + + create_materialized_db() + + for thread in threads: + time.sleep(random.uniform(0, 0.5)) + thread.start() + for thread in threads: + thread.join() + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) + count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) + print(int(count1), int(count2), sep=' ') + assert(int(count1) == int(count2)) + drop_materialized_db() if __name__ == '__main__': From 4c3882f2f9a4374648638880b7ea94795eab1c02 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Fri, 21 May 2021 15:51:24 +0100 Subject: [PATCH 141/931] Config in-place include --- src/Common/Config/ConfigProcessor.cpp | 57 ++++++++++++------- .../configs/config_zk.xml | 3 + .../test_config_substitutions/test.py | 7 +++ 3 files changed, 46 insertions(+), 21 deletions(-) diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index fa9e9b72087..851d8d9d250 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -322,33 +322,48 @@ void ConfigProcessor::doIncludesRecursive( } else { - Element & element = dynamic_cast(*node); - - for (const auto & attr_name : SUBSTITUTION_ATTRS) - element.removeAttribute(attr_name); - - if (replace) + /// Replace the whole node not just contents. + if (node->nodeName() == "include") { - while (Node * child = node->firstChild()) - node->removeChild(child); + const NodeListPtr children = node_to_include->childNodes(); + for (size_t i = 0, size = children->length(); i < size; ++i) + { + NodePtr new_node = config->importNode(children->item(i), true); + node->parentNode()->insertBefore(new_node, node); + } - element.removeAttribute("replace"); + node->parentNode()->removeChild(node); } - - const NodeListPtr children = node_to_include->childNodes(); - for (size_t i = 0, size = children->length(); i < size; ++i) + else { - NodePtr new_node = config->importNode(children->item(i), true); - node->appendChild(new_node); - } + Element & element = dynamic_cast(*node); - const NamedNodeMapPtr from_attrs = node_to_include->attributes(); - for (size_t i = 0, size = from_attrs->length(); i < size; ++i) - { - element.setAttributeNode(dynamic_cast(config->importNode(from_attrs->item(i), true))); - } + for (const auto & attr_name : SUBSTITUTION_ATTRS) + element.removeAttribute(attr_name); - included_something = true; + if (replace) + { + while (Node * child = node->firstChild()) + node->removeChild(child); + + element.removeAttribute("replace"); + } + + const NodeListPtr children = node_to_include->childNodes(); + for (size_t i = 0, size = children->length(); i < size; ++i) + { + NodePtr new_node = config->importNode(children->item(i), true); + node->appendChild(new_node); + } + + const NamedNodeMapPtr from_attrs = node_to_include->attributes(); + for (size_t i = 0, size = from_attrs->length(); i < size; ++i) + { + element.setAttributeNode(dynamic_cast(config->importNode(from_attrs->item(i), true))); + } + + included_something = true; + } } }; diff --git a/tests/integration/test_config_substitutions/configs/config_zk.xml b/tests/integration/test_config_substitutions/configs/config_zk.xml index aa589e9f9d3..9fad5658445 100644 --- a/tests/integration/test_config_substitutions/configs/config_zk.xml +++ b/tests/integration/test_config_substitutions/configs/config_zk.xml @@ -10,5 +10,8 @@ default default + + + diff --git a/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py index 565cd1c0e97..47154efec36 100644 --- a/tests/integration/test_config_substitutions/test.py +++ b/tests/integration/test_config_substitutions/test.py @@ -20,6 +20,8 @@ def start_cluster(): try: def create_zk_roots(zk): zk.create(path="/setting/max_query_size", value=b"77777", makepath=True) + zk.create(path="/users_from_zk_1", value=b"default", makepath=True) + zk.create(path="/users_from_zk_2", value=b"default", makepath=True) cluster.add_zookeeper_startup_command(create_zk_roots) @@ -37,6 +39,11 @@ def test_config(start_cluster): assert node6.query("select value from system.settings where name = 'max_query_size'") == "99999\n" +def test_include_config(start_cluster): + assert node3.query("select 1", user="user_1") + assert node3.query("select 1", user="user_2") + + def test_allow_databases(start_cluster): node5.query("CREATE DATABASE db1") node5.query( From 700850a9705d814e3799d87d7ce6978b169eb0c9 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Mon, 24 May 2021 13:34:39 +0100 Subject: [PATCH 142/931] Add more tests and handle correctly missing include --- src/Common/Config/ConfigProcessor.cpp | 5 +++++ .../configs/config_env.xml | 3 +++ .../configs/config_incl.xml | 5 ++++- .../configs/config_include_from_env.xml | 2 ++ .../configs/include_from_source.xml | 17 +++++++++++++++++ .../configs/max_query_size.xml | 3 --- .../test_config_substitutions/test.py | 13 ++++++++++--- 7 files changed, 41 insertions(+), 7 deletions(-) create mode 100644 tests/integration/test_config_substitutions/configs/include_from_source.xml delete mode 100644 tests/integration/test_config_substitutions/configs/max_query_size.xml diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 851d8d9d250..5af13d8a9df 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -318,7 +318,12 @@ void ConfigProcessor::doIncludesRecursive( else if (throw_on_bad_incl) throw Poco::Exception(error_msg + name); else + { + if (node->nodeName() == "include") + node->parentNode()->removeChild(node); + LOG_WARNING(log, "{}{}", error_msg, name); + } } else { diff --git a/tests/integration/test_config_substitutions/configs/config_env.xml b/tests/integration/test_config_substitutions/configs/config_env.xml index 712855c47c0..2d63b9c688d 100644 --- a/tests/integration/test_config_substitutions/configs/config_env.xml +++ b/tests/integration/test_config_substitutions/configs/config_env.xml @@ -10,5 +10,8 @@ default default + + + diff --git a/tests/integration/test_config_substitutions/configs/config_incl.xml b/tests/integration/test_config_substitutions/configs/config_incl.xml index 383a23af1ff..43ec78ff8ef 100644 --- a/tests/integration/test_config_substitutions/configs/config_incl.xml +++ b/tests/integration/test_config_substitutions/configs/config_incl.xml @@ -1,5 +1,5 @@ - /etc/clickhouse-server/config.d/max_query_size.xml + /etc/clickhouse-server/config.d/include_from_source.xml @@ -11,5 +11,8 @@ default default + + + diff --git a/tests/integration/test_config_substitutions/configs/config_include_from_env.xml b/tests/integration/test_config_substitutions/configs/config_include_from_env.xml index 71e11235749..79b650f3d9e 100644 --- a/tests/integration/test_config_substitutions/configs/config_include_from_env.xml +++ b/tests/integration/test_config_substitutions/configs/config_include_from_env.xml @@ -11,5 +11,7 @@ default default + + diff --git a/tests/integration/test_config_substitutions/configs/include_from_source.xml b/tests/integration/test_config_substitutions/configs/include_from_source.xml new file mode 100644 index 00000000000..6095180bb59 --- /dev/null +++ b/tests/integration/test_config_substitutions/configs/include_from_source.xml @@ -0,0 +1,17 @@ + + 99999 + + + + + default + + + + + + + default + + + diff --git a/tests/integration/test_config_substitutions/configs/max_query_size.xml b/tests/integration/test_config_substitutions/configs/max_query_size.xml deleted file mode 100644 index 9ec61368be9..00000000000 --- a/tests/integration/test_config_substitutions/configs/max_query_size.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 99999 - diff --git a/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py index 47154efec36..aec3f1d3635 100644 --- a/tests/integration/test_config_substitutions/test.py +++ b/tests/integration/test_config_substitutions/test.py @@ -8,11 +8,11 @@ node2 = cluster.add_instance('node2', user_configs=['configs/config_env.xml'], env_variables={"MAX_QUERY_SIZE": "55555"}) node3 = cluster.add_instance('node3', user_configs=['configs/config_zk.xml'], with_zookeeper=True) node4 = cluster.add_instance('node4', user_configs=['configs/config_incl.xml'], - main_configs=['configs/max_query_size.xml']) # include value 77777 + main_configs=['configs/include_from_source.xml']) # include value 77777 node5 = cluster.add_instance('node5', user_configs=['configs/config_allow_databases.xml']) node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from_env.xml'], - env_variables={"INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/max_query_size.xml"}, - main_configs=['configs/max_query_size.xml']) + env_variables={"INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/include_from_source.xml"}, + main_configs=['configs/include_from_source.xml']) @pytest.fixture(scope="module") @@ -40,6 +40,13 @@ def test_config(start_cluster): def test_include_config(start_cluster): + # + assert node4.query("select 1") + assert node4.query("select 1", user="user_1") + assert node4.query("select 1", user="user_2") + + # Date: Mon, 24 May 2021 13:53:02 +0100 Subject: [PATCH 143/931] Document new include element --- docs/en/operations/configuration-files.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index a52d82f21d0..c5114ae19bd 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -22,6 +22,23 @@ Some settings specified in the main configuration file can be overridden in othe The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md)). +If you want to replace an entire element with a substitution use `include` as element name. + +XML substitution example: + +```xml + + + + + + + + + + +``` + Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. ## User Settings {#user-settings} @@ -32,6 +49,8 @@ Users configuration can be splitted into separate files similar to `config.xml` Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`. Directory `users.d` is used by default, as `users_config` defaults to `users.xml`. +Note that configuration files are first merged taking into account [Override](#override) settings and includes are processed after that. + ## XML example {#example} For example, you can have separate config file for each user like this: From 399e998ecb535ace36c253a31f9e3240235fdb9b Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Mon, 24 May 2021 14:07:21 +0100 Subject: [PATCH 144/931] Count non-empty substitutions, easier to interpret --- src/Common/Config/ConfigProcessor.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 5af13d8a9df..44f5ffc5536 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -296,10 +296,10 @@ void ConfigProcessor::doIncludesRecursive( { const auto * subst = attributes->getNamedItem(attr_name); attr_nodes[attr_name] = subst; - substs_count += static_cast(subst == nullptr); + substs_count += static_cast(subst != nullptr); } - if (substs_count < SUBSTITUTION_ATTRS.size() - 1) /// only one substitution is allowed + if (substs_count > 1) /// only one substitution is allowed throw Poco::Exception("several substitutions attributes set for element <" + node->nodeName() + ">"); /// Replace the original contents, not add to it. From 027fc70acef44a41ca9ec565f499246f04d3dbe5 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Mon, 24 May 2021 14:07:38 +0100 Subject: [PATCH 145/931] Validate that include element is used properly --- src/Common/Config/ConfigProcessor.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 44f5ffc5536..2b6e3287461 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -302,6 +302,14 @@ void ConfigProcessor::doIncludesRecursive( if (substs_count > 1) /// only one substitution is allowed throw Poco::Exception("several substitutions attributes set for element <" + node->nodeName() + ">"); + if (node->nodeName() == "include") + { + if (node->hasChildNodes()) + throw Poco::Exception(" element must have no children"); + if (substs_count == 0) + throw Poco::Exception("no substitution attributes set for element , must have one"); + } + /// Replace the original contents, not add to it. bool replace = attributes->getNamedItem("replace"); From 067ec0855cfeb16392e5bef121328abb5b6e7957 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Mon, 24 May 2021 17:16:15 +0000 Subject: [PATCH 146/931] Decorative fixes --- src/Common/DiskStatisticsOS.cpp | 28 ++--- src/Common/DiskStatisticsOS.h | 12 ++- src/Common/MemoryInfoOS.cpp | 46 ++++---- src/Common/MemoryInfoOS.h | 7 +- src/Common/ProcessorStatisticsOS.cpp | 152 ++++++++++++--------------- src/Common/ProcessorStatisticsOS.h | 21 ++-- 6 files changed, 127 insertions(+), 139 deletions(-) diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp index 3654f843c3a..0485d129ecc 100644 --- a/src/Common/DiskStatisticsOS.cpp +++ b/src/Common/DiskStatisticsOS.cpp @@ -9,15 +9,15 @@ #include #include -namespace DB +namespace DB { -namespace ErrorCodes +namespace ErrorCodes { extern const int CANNOT_STATVFS; } -namespace +namespace { void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) { @@ -34,35 +34,35 @@ DiskStatisticsOS::DiskStatisticsOS() {} DiskStatisticsOS::~DiskStatisticsOS() {} -DiskStatisticsOS::Data DiskStatisticsOS::get() +DiskStatisticsOS::Data DiskStatisticsOS::get() { ReadBufferFromFile mounts_in(mounts_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); DiskStatisticsOS::Data data = {0, 0}; - while (!mounts_in.eof()) + while (!mounts_in.eof()) { String filesystem = readNextFilesystem(mounts_in); - + struct statvfs stat; - if (statvfs(filesystem.c_str(), &stat)) + if (statvfs(filesystem.c_str(), &stat)) throwFromErrno("Cannot statvfs", ErrorCodes::CANNOT_STATVFS); - + uint64_t total_blocks = static_cast(stat.f_blocks); - uint64_t free_blocks = static_cast(stat.f_bfree); - uint64_t used_blocks = total_blocks - free_blocks; - uint64_t block_size = static_cast(stat.f_bsize); + uint64_t free_blocks = static_cast(stat.f_bfree); + uint64_t used_blocks = total_blocks - free_blocks; + uint64_t block_size = static_cast(stat.f_bsize); data.total += total_blocks * block_size; - data.used += used_blocks * block_size; + data.used += used_blocks * block_size; } return data; } -String DiskStatisticsOS::readNextFilesystem(ReadBuffer& mounts_in) -{ +String DiskStatisticsOS::readNextFilesystem(ReadBuffer& mounts_in) +{ String filesystem, unused; readStringUntilWhitespaceAndSkipWhitespaceIfAny(unused, mounts_in); diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h index d14cf273ccd..05f53a421d2 100644 --- a/src/Common/DiskStatisticsOS.h +++ b/src/Common/DiskStatisticsOS.h @@ -1,3 +1,4 @@ +#pragma once #if defined (OS_LINUX) #include @@ -6,17 +7,18 @@ #include -namespace DB +namespace DB { -/** Opens file /proc/mounts, reads all mounted filesytems and +/** Opens file /proc/mounts, reads all mounted filesystems and * calculates disk usage. - */ -class DiskStatisticsOS + */ +class DiskStatisticsOS { public: // In bytes - struct Data { + struct Data + { uint64_t total; uint64_t used; }; diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index b8641809ae9..17036d115e8 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -11,23 +11,23 @@ #include #include -namespace DB +namespace DB { -namespace +namespace { - template - void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) - { - readIntText(x, buf); - skipWhitespaceIfAny(buf); - } +template +void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); +} - void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); - } +void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); +} } static constexpr auto meminfo_filename = "/proc/meminfo"; @@ -38,10 +38,10 @@ MemoryInfoOS::MemoryInfoOS() {} MemoryInfoOS::~MemoryInfoOS() {} -MemoryInfoOS::Data MemoryInfoOS::get() +MemoryInfoOS::Data MemoryInfoOS::get() { ReadBufferFromFile meminfo_in(meminfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - + MemoryInfoOS::Data data; String field_name; @@ -49,14 +49,14 @@ MemoryInfoOS::Data MemoryInfoOS::get() while (!meminfo_in.eof()) meminfo.insert(readField(meminfo_in)); - - data.total = meminfo["MemTotal"]; - data.free = meminfo["MemFree"]; - data.buffers = meminfo["Buffers"]; - data.cached = meminfo["Cached"]; - data.swap_total = meminfo["SwapTotal"]; + + data.total = meminfo["MemTotal"]; + data.free = meminfo["MemFree"]; + data.buffers = meminfo["Buffers"]; + data.cached = meminfo["Cached"]; + data.swap_total = meminfo["SwapTotal"]; data.swap_cached = meminfo["SwapCached"]; - data.swap_free = meminfo["SwapFree"]; + data.swap_free = meminfo["SwapFree"]; data.free_and_cached = data.free + data.cached; @@ -67,7 +67,7 @@ std::pair MemoryInfoOS::readField(ReadBuffer& meminfo_in) { String key; uint64_t val; - + readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); skipToNextLineOrEOF(meminfo_in); diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index a868d4bc23d..ae630e4ee70 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -9,18 +9,19 @@ #include -namespace DB +namespace DB { /** Opens file /proc/meminfo and reads statistics about memory usage. * This is Linux specific. * See: man procfs */ -class MemoryInfoOS +class MemoryInfoOS { public: // In kB - struct Data { + struct Data + { uint64_t total; uint64_t free; uint64_t buffers; diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 78353cfeeab..0deea56e7fc 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -21,46 +21,38 @@ namespace DB { -namespace ErrorCodes +namespace { - extern const int FILE_DOESNT_EXIST; - extern const int CANNOT_OPEN_FILE; - extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR; - extern const int CANNOT_CLOSE_FILE; +template +void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) +{ + readIntText(x, buf); + skipWhitespaceIfAny(buf); } -namespace +void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) { - template - void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) - { - readIntText(x, buf); - skipWhitespaceIfAny(buf); - } + readString(s, buf); + skipWhitespaceIfAny(buf); +} - void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readString(s, buf); - skipWhitespaceIfAny(buf); - } +void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) +{ + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); +} - void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); - } +void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) +{ + readChar(c, buf); + skipWhitespaceIfAny(buf); +} - void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) - { - readChar(c, buf); - skipWhitespaceIfAny(buf); - } - - void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) - { - readFloatText(f, buf); - skipWhitespaceIfAny(buf); - } +void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) +{ + readFloatText(f, buf); + skipWhitespaceIfAny(buf); +} } static constexpr auto loadavg_filename = "/proc/loadavg"; @@ -84,20 +76,20 @@ ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() Data data; readLoadavg(data.loadavg); calcStLoad(data.stload); - readFreq(data.freq); + readFreq(data.freq); return data; } void ProcessorStatisticsOS::readLoadavg(ProcLoadavg& loadavg) { ReadBufferFromFile loadavg_in(loadavg_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - + readFloatAndSkipWhitespaceIfAny(loadavg.avg1, loadavg_in); readFloatAndSkipWhitespaceIfAny(loadavg.avg5, loadavg_in); readFloatAndSkipWhitespaceIfAny(loadavg.avg15, loadavg_in); } -void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) +void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) { ProcTime cur_proc_time; readProcTimeAndProcesses(cur_proc_time, stload); @@ -105,23 +97,15 @@ void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) std::time_t cur_time = std::time(nullptr); float time_dif = static_cast(cur_time - last_stload_call_time); - stload.user_time = - (cur_proc_time.user - last_proc_time.user) / time_dif; - stload.nice_time = - (cur_proc_time.nice - last_proc_time.nice) / time_dif; - stload.system_time = - (cur_proc_time.system - last_proc_time.system) / time_dif; - stload.idle_time = - (cur_proc_time.idle - last_proc_time.idle) / time_dif; - stload.iowait_time = - (cur_proc_time.iowait - last_proc_time.iowait) / time_dif; - stload.steal_time = - (cur_proc_time.steal - last_proc_time.steal) / time_dif; - stload.guest_time = - (cur_proc_time.guest - last_proc_time.guest) / time_dif; - stload.guest_nice_time = - (cur_proc_time.guest_nice - last_proc_time.guest_nice) / time_dif; - + stload.user_time = (cur_proc_time.user - last_proc_time.user) / time_dif; + stload.nice_time = (cur_proc_time.nice - last_proc_time.nice) / time_dif; + stload.system_time = (cur_proc_time.system - last_proc_time.system) / time_dif; + stload.idle_time = (cur_proc_time.idle - last_proc_time.idle) / time_dif; + stload.iowait_time = (cur_proc_time.iowait - last_proc_time.iowait) / time_dif; + stload.steal_time = (cur_proc_time.steal - last_proc_time.steal) / time_dif; + stload.guest_time = (cur_proc_time.guest - last_proc_time.guest) / time_dif; + stload.guest_nice_time = (cur_proc_time.guest_nice - last_proc_time.guest_nice) / time_dif; + last_stload_call_time = cur_time; last_proc_time = cur_proc_time; } @@ -131,76 +115,72 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS ReadBufferFromFile procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); String field_name, field_val; - uint64_t unused; - + uint64_t unused; + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.user, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.nice, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.user, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.nice, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.system, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.idle, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.idle, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.iowait, procst_in); - proc_time.user /= USER_HZ; - proc_time.nice /= USER_HZ; + proc_time.user /= USER_HZ; + proc_time.nice /= USER_HZ; proc_time.system /= USER_HZ; - proc_time.idle /= USER_HZ; + proc_time.idle /= USER_HZ; proc_time.iowait /= USER_HZ; - + readIntTextAndSkipWhitespaceIfAny(unused, procst_in); readIntTextAndSkipWhitespaceIfAny(unused, procst_in); - - readIntTextAndSkipWhitespaceIfAny(proc_time.steal, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.guest, procst_in); + + readIntTextAndSkipWhitespaceIfAny(proc_time.steal, procst_in); + readIntTextAndSkipWhitespaceIfAny(proc_time.guest, procst_in); readIntTextAndSkipWhitespaceIfAny(proc_time.guest_nice, procst_in); - proc_time.steal /= USER_HZ; - proc_time.guest /= USER_HZ; + proc_time.steal /= USER_HZ; + proc_time.guest /= USER_HZ; proc_time.guest_nice /= USER_HZ; - do - { + do { readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); readStringAndSkipWhitespaceIfAny(field_val, procst_in); } while (field_name != String("processes")); - + stload.processes = static_cast(std::stoul(field_val)); - + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); readIntTextAndSkipWhitespaceIfAny(stload.procs_running, procst_in); - + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); readIntTextAndSkipWhitespaceIfAny(stload.procs_blocked, procst_in); } void ProcessorStatisticsOS::readFreq(ProcFreq & freq) -{ +{ ReadBufferFromFile cpuinfo_in(cpuinfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - + String field_name, field_val; char unused; int cpu_count = 0; + freq.max = freq.min = freq.avg = 0; - do - { - do - { + do { + do { readStringAndSkipWhitespaceIfAny(field_name, cpuinfo_in); } while (!cpuinfo_in.eof() && field_name != String("cpu MHz")); - - if (cpuinfo_in.eof()) + + if (cpuinfo_in.eof()) break; readCharAndSkipWhitespaceIfAny(unused, cpuinfo_in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, cpuinfo_in); + readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, cpuinfo_in); cpu_count++; - + float cur_cpu_freq = stof(field_val); freq.avg += cur_cpu_freq; - freq.max = (cpu_count == 1 ? cur_cpu_freq : - std::max(freq.max, cur_cpu_freq)); - freq.min = (cpu_count == 1 ? cur_cpu_freq : - std::min(freq.min, cur_cpu_freq)); + freq.max = (cpu_count == 1 ? cur_cpu_freq : std::max(freq.max, cur_cpu_freq)); + freq.min = (cpu_count == 1 ? cur_cpu_freq : std::min(freq.min, cur_cpu_freq)); } while (true); freq.avg /= static_cast(cpu_count); diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index ba95b006e9a..f29e5156bfe 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -8,23 +8,26 @@ #include -namespace DB +namespace DB { /** Opens files: /proc/loadavg, /proc/stat, /proc/cpuinfo and reads processor statistics in get() method. * This is Linux specific. * See: man procfs */ -class ProcessorStatisticsOS +class ProcessorStatisticsOS { public: - struct ProcLoadavg { + + struct ProcLoadavg + { float avg1; float avg5; float avg15; }; - struct ProcStLoad { + struct ProcStLoad + { float user_time; float nice_time; float system_time; @@ -39,7 +42,8 @@ public: uint32_t procs_blocked; }; - struct ProcFreq { + struct ProcFreq + { float max; float min; float avg; @@ -54,11 +58,12 @@ public: ProcessorStatisticsOS(); ~ProcessorStatisticsOS(); - + Data get(); private: - struct ProcTime { + struct ProcTime + { // The amount of time, measured in seconds uint64_t user; uint64_t nice; @@ -73,7 +78,7 @@ private: void readLoadavg(ProcLoadavg & loadavg); void calcStLoad(ProcStLoad & stload); void readFreq(ProcFreq & freq); - + void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload); private: From 6bf0840562dd140d01699c0f22120876bf3a132a Mon Sep 17 00:00:00 2001 From: elevankoff Date: Mon, 24 May 2021 17:24:29 +0000 Subject: [PATCH 147/931] More decorative fixes --- src/Common/MemoryInfoOS.cpp | 22 +++++----- src/Common/ProcessorStatisticsOS.cpp | 61 +++++++++++++++------------- src/Common/ProcessorStatisticsOS.h | 1 - 3 files changed, 43 insertions(+), 41 deletions(-) diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index 17036d115e8..8cf2a0b44f4 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -16,18 +16,18 @@ namespace DB namespace { -template -void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} + template + void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) + { + readIntText(x, buf); + skipWhitespaceIfAny(buf); + } -void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} + void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); + } } static constexpr auto meminfo_filename = "/proc/meminfo"; diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 0deea56e7fc..69bce5f5b51 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -23,36 +23,36 @@ namespace DB namespace { -template -void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) -{ - readIntText(x, buf); - skipWhitespaceIfAny(buf); -} + template + void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) + { + readIntText(x, buf); + skipWhitespaceIfAny(buf); + } -void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readString(s, buf); - skipWhitespaceIfAny(buf); -} + void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readString(s, buf); + skipWhitespaceIfAny(buf); + } -void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) -{ - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); -} + void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) + { + readStringUntilWhitespace(s, buf); + skipWhitespaceIfAny(buf); + } -void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) -{ - readChar(c, buf); - skipWhitespaceIfAny(buf); -} + void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) + { + readChar(c, buf); + skipWhitespaceIfAny(buf); + } -void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) -{ - readFloatText(f, buf); - skipWhitespaceIfAny(buf); -} + void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) + { + readFloatText(f, buf); + skipWhitespaceIfAny(buf); + } } static constexpr auto loadavg_filename = "/proc/loadavg"; @@ -140,7 +140,8 @@ void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcS proc_time.guest /= USER_HZ; proc_time.guest_nice /= USER_HZ; - do { + do + { readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); readStringAndSkipWhitespaceIfAny(field_val, procst_in); } while (field_name != String("processes")); @@ -163,8 +164,10 @@ void ProcessorStatisticsOS::readFreq(ProcFreq & freq) int cpu_count = 0; freq.max = freq.min = freq.avg = 0; - do { - do { + do + { + do + { readStringAndSkipWhitespaceIfAny(field_name, cpuinfo_in); } while (!cpuinfo_in.eof() && field_name != String("cpu MHz")); diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index f29e5156bfe..70edfceb2ca 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -18,7 +18,6 @@ namespace DB class ProcessorStatisticsOS { public: - struct ProcLoadavg { float avg1; From 7d1524561e8e588c63b38db02012a92a4c667a67 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Mon, 24 May 2021 17:35:38 +0000 Subject: [PATCH 148/931] Delete extra whitespaces --- src/Interpreters/AsynchronousMetrics.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 28c7be9ea2a..92ff4931481 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -236,7 +236,7 @@ void AsynchronousMetrics::update() } #endif - /// Process memory information according to OS + /// Process memory information according to OS #if defined(OS_LINUX) { MemoryInfoOS::Data data = memory_info.get(); @@ -252,7 +252,7 @@ void AsynchronousMetrics::update() } #endif - /// Process processor usage according to OS + /// Process processor usage according to OS #if defined(OS_LINUX) { ProcessorStatisticsOS::Data data = proc_stat.get(); @@ -280,7 +280,7 @@ void AsynchronousMetrics::update() } #endif - /// Process disk usage according to OS + /// Process disk usage according to OS #if defined(OS_LINUX) { DiskStatisticsOS::Data data = disk_stat.get(); From 9d3c24c9c0413faaf0148ed94c68e40cacd2a4a0 Mon Sep 17 00:00:00 2001 From: elevankoff Date: Mon, 24 May 2021 19:48:29 +0000 Subject: [PATCH 149/931] Fix typo --- src/Interpreters/AsynchronousMetrics.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 92ff4931481..2b6d552b179 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -274,7 +274,7 @@ void AsynchronousMetrics::update() new_values["TimeLoadGuest"] = data.stload.guest_time; new_values["TimeLoadGuestNice"] = data.stload.guest_nice_time; - new_values["Processess"] = data.stload.processes; + new_values["Processes"] = data.stload.processes; new_values["ProcessesRunning"] = data.stload.procs_running; new_values["ProcessesBlocked"] = data.stload.procs_blocked; } From 625377f553d8aca133a512ed0000a5bba589a0e9 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 26 May 2021 23:38:53 +0000 Subject: [PATCH 150/931] Add backoff for reading replication messages, fix some checks --- src/Interpreters/Context.h | 2 +- .../PostgreSQLReplicationHandler.cpp | 19 ++++++++++++++----- .../PostgreSQL/PostgreSQLReplicationHandler.h | 4 +++- .../StorageMaterializePostgreSQL.cpp | 8 ++++---- 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index b12d4c82ce8..5e9a84516ef 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -259,7 +259,7 @@ private: /// XXX: move this stuff to shared part instead. ContextPtr buffer_context; /// Buffer context. Could be equal to this. - /// A flag, used to distinquish between user query and internal query to a database engine (MaterializePostgreSQL). + /// A flag, used to distinguish between user query and internal query to a database engine (MaterializePostgreSQL). bool is_internal_query = false; public: diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index ac9ff2edbe1..33d5c49ec09 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -14,11 +14,12 @@ namespace DB { -static const auto reschedule_ms = 500; +static const auto RESCHEDULE_MS = 500; +static const auto BACKOFF_TRESHOLD = 32000; namespace ErrorCodes { - extern const int UNKNOWN_TABLE; + extern const int LOGICAL_ERROR; } PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( @@ -41,6 +42,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , is_materialize_postgresql_database(is_materialize_postgresql_database_) , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) + , milliseconds_to_wait(RESCHEDULE_MS) { replication_slot = fmt::format("{}_ch_replication_slot", replication_identifier); publication_name = fmt::format("{}_ch_publication", replication_identifier); @@ -72,7 +74,7 @@ void PostgreSQLReplicationHandler::waitConnectionAndStart() catch (const pqxx::broken_connection & pqxx_error) { LOG_ERROR(log, "Unable to set up connection. Reconnection attempt will continue. Error message: {}", pqxx_error.what()); - startup_task->scheduleAfter(reschedule_ms); + startup_task->scheduleAfter(RESCHEDULE_MS); } catch (...) { @@ -256,9 +258,16 @@ void PostgreSQLReplicationHandler::consumerFunc() return; if (schedule_now) + { consumer_task->schedule(); + milliseconds_to_wait = RESCHEDULE_MS; + } else - consumer_task->scheduleAfter(reschedule_ms); + { + consumer_task->scheduleAfter(milliseconds_to_wait); + if (milliseconds_to_wait < BACKOFF_TRESHOLD) + milliseconds_to_wait *= 2; + } } @@ -448,7 +457,7 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::work & tx PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( - pqxx::ReplicationTransaction & tx, const std::string & table_name) + pqxx::ReplicationTransaction & tx, const std::string & table_name) const { if (!is_materialize_postgresql_database) return nullptr; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 1f8d25ab32d..6c919389392 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -78,7 +78,7 @@ private: void reloadFromSnapshot(const std::vector> & relation_data); - PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const String & table_name); + PostgreSQLTableStructurePtr fetchTableStructure(pqxx::ReplicationTransaction & tx, const String & table_name) const; Poco::Logger * log; ContextPtr context; @@ -120,6 +120,8 @@ private: /// MaterializePostgreSQL tables. Used for managing all operations with its internal nested tables. MaterializedStorages materialized_storages; + + UInt64 milliseconds_to_wait; }; } diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp index 1b99b2eabfb..07d13ace7c2 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp @@ -406,11 +406,11 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt ordinary_columns_and_types.push_back({"_sign", std::make_shared()}); ordinary_columns_and_types.push_back({"_version", std::make_shared()}); - StorageInMemoryMetadata metadata; - metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); - metadata.setConstraints(metadata_snapshot->getConstraints()); + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(ColumnsDescription(ordinary_columns_and_types)); + storage_metadata.setConstraints(metadata_snapshot->getConstraints()); - setInMemoryMetadata(metadata); + setInMemoryMetadata(storage_metadata); return create_table_query; } From 3d01028d192ba7534d8f7f5801bcb4c07751c383 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 28 May 2021 14:20:39 +0300 Subject: [PATCH 151/931] Use hits_10m_single instead of hits_100m_single --- tests/performance/uniq.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index acd84d75788..f6f7ac01c65 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -1,6 +1,6 @@ - hits_100m_single + hits_10m_single 30000000000 @@ -58,5 +58,5 @@ - SELECT {key} AS k, {func}(UserID) FROM (SELECT {key}, UserID FROM hits_100m_single LIMIT 20000000) GROUP BY k + SELECT {key} AS k, {func}(UserID) FROM hits_10m_single GROUP BY k FORMAT Null From 8199b45e95b169e86adea0ab806b464c120c941f Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 15 May 2021 13:10:19 +0300 Subject: [PATCH 152/931] Fix style Fifth try v2.0 Fifth try v2.1 Fifth try v2.2 Fifth try v2.3 Fifth try v2.4 Fifth try v2.5 Fifth try v2.6 Fifth try v2.7 Fifth try v2.8 Fifth try v2.9 Fifth try v2.10 Fifth try v2.11 Fifth try v2.12 Fifth try v2.13 Fifth try v2.14 Fifth try v2.15 Fifth try v2.16 Fifth try v2.17 Fifth try v2.18 Fifth try v2.19 Fifth try v2.20 Fifth try v2.21 Fifth try v2.22 Fifth try v2.23 Fifth try v2.24 Fifth try v2.25 Fifth try v2.26 Fifth try v2.27 Fifth try v2.28 Add ranges Add ranges try v1.1 Add ranges try v1.2 Add ranges try v1.3 Add ranges try v1.4 Add ranges try v1.5 Add ranges try v1.6 Add ranges try v1.7 Add ranges try v1.8 Add ranges try v1.9 Add ranges try v1.10 Add ranges try v1.11 Add ranges try v1.12 Add ranges try v1.13 Add ranges try v1.14 Add ranges try v1.15 Add ranges try v1.16 Add ranges try v1.17 Add ranges try v1.18 Add ranges try v1.19 Add ranges try v1.20 Add ranges try v1.21 Add ranges try v1.22 Add ranges try v1.23 Add ranges try v1.24 Add ranges try v1.25 Add ranges try v1.26 Add ranges try v1.27 Add ranges try v1.28 Add ranges try v1.29 Add ranges try v1.30 Add ranges try v1.31 Add ranges try v1.32 Add ranges try v1.33 Add ranges try v1.34 Add ranges try v1.35 Add ranges try v1.36 Add ranges try v1.37 Add ranges try v1.38 Add ranges try v1.39 Add ranges try v1.40 Add ranges try v1.41 Add ranges try v1.42 Add ranges try v1.43 Add ranges try v1.44 Add ranges try v1.45 Add ranges try v1.46 Add ranges try v1.47 Leftover comment Try wildcard Try wildcard v1.1 Try wildcard v1.2 Try wildcard v1.3 New functions New functions 1.1 New functions 1.2 New functions 1.3 New functions 1.4 New functions 1.5 New functions 1.6 New functions 1.7 New functions 1.8 New functions 1.9 New functions 1.10 New functions 1.11 New functions 1.12 New functions 1.13 New functions 1.14 New functions 1.15 New functions 1.16 Final steps Final steps v1.1 Final steps v1.2 --- src/Functions/DummyJSONParser.h | 2 + src/Functions/FunctionSQLJSON.cpp | 6 +- src/Functions/FunctionSQLJSON.h | 145 +++++++++++++----- src/Functions/FunctionsJSON.h | 2 + src/Functions/JSONPath/ASTs/ASTJSONPath.h | 2 - .../JSONPath/ASTs/ASTJSONPathQuery.h | 2 - .../JSONPath/ASTs/ASTJSONPathRange.h | 30 ++++ src/Functions/JSONPath/ASTs/CMakeLists.txt | 2 +- src/Functions/JSONPath/CMakeLists.txt | 2 +- .../JSONPath/Generators/CMakeLists.txt | 2 +- .../JSONPath/Generators/GeneratorJSONPath.h | 65 ++++---- .../JSONPath/Generators/IGenerator.h | 3 +- src/Functions/JSONPath/Generators/IVisitor.h | 5 + .../Generators/VisitorJSONPathMemberAccess.h | 27 +++- .../Generators/VisitorJSONPathRange.h | 93 +++++++++++ .../JSONPath/Generators/VisitorStatus.h | 3 +- src/Functions/JSONPath/Parsers/CMakeLists.txt | 2 +- .../JSONPath/Parsers/ParserJSONPath.cpp | 3 - .../JSONPath/Parsers/ParserJSONPathQuery.cpp | 20 ++- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 98 ++++++++++++ .../JSONPath/Parsers/ParserJSONPathRange.h | 19 +++ src/Functions/RapidJSONParser.h | 3 +- src/Functions/SimdJSONParser.h | 62 ++------ 23 files changed, 445 insertions(+), 153 deletions(-) create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPathRange.h create mode 100644 src/Functions/JSONPath/Generators/VisitorJSONPathRange.h create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathRange.h diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index a71c90e4a19..3010347e4c4 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -39,6 +39,8 @@ struct DummyJSONParser std::string_view getString() const { return {}; } Array getArray() const { return {}; } Object getObject() const { return {}; } + + Element getElement() {return {}; } }; /// References an array in a JSON document. diff --git a/src/Functions/FunctionSQLJSON.cpp b/src/Functions/FunctionSQLJSON.cpp index ddcca12835f..7d558dd0950 100644 --- a/src/Functions/FunctionSQLJSON.cpp +++ b/src/Functions/FunctionSQLJSON.cpp @@ -9,11 +9,11 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } - void registerFunctionsSQLJSON(FunctionFactory & factory) { - factory.registerFunction>(); - factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); } } diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 24749099e57..1fc6986256d 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -1,16 +1,12 @@ #pragma once #include -#include -#include -#include -#include #include -#include -#include -#include +#include +#include #include #include +#include #include #include #include @@ -21,12 +17,7 @@ #include #include #include -#include -#include -#include -#include #include -//#include #include #if !defined(ARCADIA_BUILD) @@ -37,11 +28,11 @@ namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; - extern const int BAD_ARGUMENTS; +extern const int ILLEGAL_COLUMN; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; +extern const int BAD_ARGUMENTS; } class FunctionSQLJSONHelpers @@ -141,6 +132,7 @@ public: /// Parse JSON for every row Impl impl; + for (const auto i : ext::range(0, input_rows_count)) { std::string_view json{ @@ -205,45 +197,58 @@ private: const Context & context; }; -struct NameSQLJSONTest +struct NameJSONExists { - static constexpr auto name{"SQLJSONTest"}; + static constexpr auto name{"JSON_EXISTS"}; }; -struct NameSQLJSONMemberAccess +struct NameJSONValue { - static constexpr auto name{"SQLJSONMemberAccess"}; + static constexpr auto name{"JSON_VALUE"}; +}; + +struct NameJSONQuery +{ + static constexpr auto name{"JSON_QUERY"}; }; -/** - * Function to test logic before function calling, will be removed in final PR - * @tparam JSONParser parser - */ template -class SQLJSONTestImpl +class JSONExistsImpl { public: using Element = typename JSONParser::Element; - static DataTypePtr getReturnType(const char *, const ColumnsWithTypeAndName &) { return std::make_shared(); } + static DataTypePtr getReturnType(const char *, const ColumnsWithTypeAndName &) { return std::make_shared(); } static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element &, ASTPtr &) + static bool insertResultToColumn(IColumn & dest, const Element & root, ASTPtr & query_ptr) { - String str = "I am working:-)"; - ColumnString & col_str = assert_cast(dest); - col_str.insertData(str.data(), str.size()); + GeneratorJSONPath generator_json_path(query_ptr); + Element current_element = root; + VisitorStatus status; + while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) + { + if (status == VisitorStatus::Ok) { + break; + } + current_element = root; + } + + /// insert result, status can be either Ok (if we found the item) + /// or Exhausted (if we never found the item) + ColumnUInt8 & col_bool = assert_cast(dest); + if (status == VisitorStatus::Ok) { + col_bool.insert(0); + } else { + col_bool.insert(1); + } return true; } }; -/** - * Function to test jsonpath member access, will be removed in final PR - * @tparam JSONParser parser - */ template -class SQLJSONMemberAccessImpl +class JSONValueImpl { public: using Element = typename JSONParser::Element; @@ -257,18 +262,74 @@ public: GeneratorJSONPath generator_json_path(query_ptr); Element current_element = root; VisitorStatus status; - while ((status = generator_json_path.getNextItem(current_element)) == VisitorStatus::Ok) + Element res; + while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) { - /// No-op + if (status == VisitorStatus::Ok) { + if (!(current_element.isArray() || current_element.isObject())) { + break; + } + } else if (status == VisitorStatus::Error) { + /// ON ERROR + } + current_element = root; } - if (status == VisitorStatus::Error) + + if (status == VisitorStatus::Exhausted) { + return false; + } + + std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + out << current_element.getElement(); + auto output_str = out.str(); + ColumnString & col_str = assert_cast(dest); + col_str.insertData(output_str.data(), output_str.size()); + return true; + } +}; + +/** + * Function to test jsonpath member access, will be removed in final PR + * @tparam JSONParser parser + */ +template +class JSONQueryImpl +{ +public: + using Element = typename JSONParser::Element; + + static DataTypePtr getReturnType(const char *, const ColumnsWithTypeAndName &) { return std::make_shared(); } + + static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } + + static bool insertResultToColumn(IColumn & dest, const Element & root, ASTPtr & query_ptr) + { + GeneratorJSONPath generator_json_path(query_ptr); + Element current_element = root; + VisitorStatus status; + std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + /// Create json array of results: [res1, res2, ...] + out << "["; + bool success = false; + while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) { + if (status == VisitorStatus::Ok) { + if (success) { + out << ", "; + } + success = true; + out << current_element.getElement(); + } else if (status == VisitorStatus::Error) { + /// ON ERROR + } + current_element = root; + } + out << "]"; + if (!success) { return false; } ColumnString & col_str = assert_cast(dest); - std::stringstream ostr; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - ostr << current_element.getElement(); - auto output_str = ostr.str(); + auto output_str = out.str(); col_str.insertData(output_str.data(), output_str.size()); return true; } diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 581cc2015aa..f066bb1029a 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -80,6 +80,8 @@ public: const ColumnString::Chars & chars = col_json_string->getChars(); const ColumnString::Offsets & offsets = col_json_string->getOffsets(); + size_t num_index_arguments = Impl::getNumberOfIndexArguments(arguments); + std::vector moves = prepareMoves(Name::name, arguments, 1, num_index_arguments); /// Preallocate memory in parser if necessary. JSONParser parser; diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPath.h b/src/Functions/JSONPath/ASTs/ASTJSONPath.h index cd73cd14257..092fe16bd9e 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPath.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPath.h @@ -10,13 +10,11 @@ class ASTJSONPath : public IAST public: String getID(char) const override { - std::cerr << "in ASTJSONPath: getID\n"; return "ASTJSONPath"; } ASTPtr clone() const override { - std::cerr << "in " << "ASTJSONPath" << ": clone\n"; return std::make_shared(*this); } diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h index 6b952c2519d..8da8a7baafb 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h @@ -9,13 +9,11 @@ class ASTJSONPathQuery : public IAST public: String getID(char) const override { - std::cerr << "in ASTJSONPathQuery: getID\n"; return "ASTJSONPathQuery"; } ASTPtr clone() const override { - std::cerr << "in " << getID(' ') << ": clone\n"; return std::make_shared(*this); } }; diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h new file mode 100644 index 00000000000..21af3cff363 --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class ASTJSONPathRange : public IAST +{ +public: + String getID(char) const override + { + return "ASTJSONPathRange"; + } + + ASTPtr clone() const override + { + return std::make_shared(*this); + } + +public: + /// Ranges to lookup in json array ($[0, 1, 2, 4 to 9]) + /// Range is represented as + /// Single index is represented as + std::vector> ranges; + bool is_star = false; +}; + +} diff --git a/src/Functions/JSONPath/ASTs/CMakeLists.txt b/src/Functions/JSONPath/ASTs/CMakeLists.txt index c671dbbc001..ef56e3b0072 100644 --- a/src/Functions/JSONPath/ASTs/CMakeLists.txt +++ b/src/Functions/JSONPath/ASTs/CMakeLists.txt @@ -5,4 +5,4 @@ target_link_libraries(clickhouse_functions_jsonpath_asts PRIVATE dbms) if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) target_compile_options(clickhouse_functions_jsonpath_asts PRIVATE "-g0") -endif() \ No newline at end of file +endif() diff --git a/src/Functions/JSONPath/CMakeLists.txt b/src/Functions/JSONPath/CMakeLists.txt index 8a46909f555..8e65f7c8c6d 100644 --- a/src/Functions/JSONPath/CMakeLists.txt +++ b/src/Functions/JSONPath/CMakeLists.txt @@ -5,4 +5,4 @@ add_subdirectory(Generators) target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_generators) add_subdirectory(Parsers) -target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_parsers) \ No newline at end of file +target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_parsers) diff --git a/src/Functions/JSONPath/Generators/CMakeLists.txt b/src/Functions/JSONPath/Generators/CMakeLists.txt index 0d1a289e8b4..76a116132fd 100644 --- a/src/Functions/JSONPath/Generators/CMakeLists.txt +++ b/src/Functions/JSONPath/Generators/CMakeLists.txt @@ -5,4 +5,4 @@ target_link_libraries(clickhouse_functions_jsonpath_generators PRIVATE dbms) if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) target_compile_options(clickhouse_functions_jsonpath_generators PRIVATE "-g0") -endif() \ No newline at end of file +endif() diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index dd4354a4613..68ea5a2a3c5 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -1,11 +1,10 @@ #include #include +#include #include #include -#include - namespace DB { @@ -26,20 +25,23 @@ public: throw Exception("Invalid path", ErrorCodes::LOGICAL_ERROR); } const auto * query = path->jsonpath_query; - if (!path || !query) - { - throw Exception("Something went terribly wrong", ErrorCodes::LOGICAL_ERROR); - } for (auto child_ast : query->children) { if (child_ast->getID() == "ASTJSONPathMemberAccess") { - auto member_access_generator = std::make_shared>(child_ast); - if (member_access_generator) { - visitors.push_back(member_access_generator); + auto member_access_visitor = std::make_shared>(child_ast); + if (member_access_visitor) { + visitors.push_back(member_access_visitor); } else { - throw Exception("member_access_generator could not be nullptr", ErrorCodes::LOGICAL_ERROR); + throw Exception("member_access_visitor could not be nullptr", ErrorCodes::LOGICAL_ERROR); + } + } else if (child_ast->getID() == "ASTJSONPathRange") { + auto range_visitor = std::make_shared>(child_ast); + if (range_visitor) { + visitors.push_back(range_visitor); + } else { + throw Exception("range_visitor could not be nullptr", ErrorCodes::LOGICAL_ERROR); } } } @@ -54,39 +56,42 @@ public: */ VisitorStatus getNextItem(typename JSONParser::Element & element) override { - if (visitors[current_visitor]->isExhausted()) { - if (!backtrace()) { + while (true) { + auto root = element; + if (current_visitor < 0) { return VisitorStatus::Exhausted; } - } - /// Apply all non-exhausted visitors - for (int i = 0; i < current_visitor; ++i) { - VisitorStatus status = visitors[i]->apply(element); - /// on fail return immediately - if (status == VisitorStatus::Error) { + for (int i = 0; i < current_visitor; ++i) { + visitors[i]->apply(root); + } + + VisitorStatus status = VisitorStatus::Error; + for (size_t i = current_visitor; i < visitors.size(); ++i) { + status = visitors[i]->visit(root); + current_visitor = i; + if (status == VisitorStatus::Error || status == VisitorStatus::Ignore) { + break; + } + } + updateVisitorsForNextRun(); + + if (status != VisitorStatus::Ignore) { + element = root; return status; } } - - /// Visit newly initialized (for the first time or through reinitialize) visitors - for (size_t i = current_visitor; i < visitors.size(); ++i) { - VisitorStatus status = visitors[i]->visit(element); - current_visitor = i; - /// on fail return immediately - if (status == VisitorStatus::Error) { - return status; - } - } - return VisitorStatus::Ok; } private: - bool backtrace() { + bool updateVisitorsForNextRun() { while (current_visitor >= 0 && visitors[current_visitor]->isExhausted()) { visitors[current_visitor]->reinitialize(); current_visitor--; } + if (current_visitor >= 0) { + visitors[current_visitor]->updateState(); + } return current_visitor >= 0; } diff --git a/src/Functions/JSONPath/Generators/IGenerator.h b/src/Functions/JSONPath/Generators/IGenerator.h index 31d9e167f24..18c0ac7da67 100644 --- a/src/Functions/JSONPath/Generators/IGenerator.h +++ b/src/Functions/JSONPath/Generators/IGenerator.h @@ -16,8 +16,7 @@ public: virtual const char * getName() const = 0; /** - * Used to yield next element in JSONPath query. Does so by recursively calling getNextItem - * on its children Generators one by one. + * Used to yield next non-ignored element describes by JSONPath query. * * @param element to be extracted into * @return true if generator is not exhausted diff --git a/src/Functions/JSONPath/Generators/IVisitor.h b/src/Functions/JSONPath/Generators/IVisitor.h index fdd254478a5..78c1efe64fc 100644 --- a/src/Functions/JSONPath/Generators/IVisitor.h +++ b/src/Functions/JSONPath/Generators/IVisitor.h @@ -7,6 +7,9 @@ namespace DB { template class IVisitor { public: + + virtual const char * getName() const = 0; + /** * Applies this visitor to document and mutates its state * @param element simdjson element @@ -24,6 +27,8 @@ public: */ virtual void reinitialize() = 0; + virtual void updateState() = 0; + bool isExhausted() { return is_exhausted; } diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h index 50b814eeaeb..cad36e40e4d 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h @@ -10,28 +10,39 @@ class VisitorJSONPathMemberAccess : public IVisitor public: VisitorJSONPathMemberAccess(ASTPtr member_access_ptr_) : member_access_ptr(member_access_ptr_) { } + const char * getName() const override { return "VisitorJSONPathMemberAccess"; } + VisitorStatus apply(typename JSONParser::Element & element) const override { const auto * member_access = member_access_ptr->as(); typename JSONParser::Element result; - bool result_ok = element.getObject().find(std::string_view(member_access->member_name), result); - if (result_ok) - { - element = result; - return VisitorStatus::Ok; - } - return VisitorStatus::Error; + element.getObject().find(std::string_view(member_access->member_name), result); + element = result; + return VisitorStatus::Ok; } VisitorStatus visit(typename JSONParser::Element & element) override { + if (!element.isObject()) { + this->setExhausted(true); + return VisitorStatus::Error; + } + const auto * member_access = member_access_ptr->as(); + typename JSONParser::Element result; + if (!element.getObject().find(std::string_view(member_access->member_name), result)) { + this->setExhausted(true); + return VisitorStatus::Error; + } + apply(element); this->setExhausted(true); - return apply(element); + return VisitorStatus::Ok; } void reinitialize() override { this->setExhausted(false); } + void updateState() override {} + private: ASTPtr member_access_ptr; }; diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h new file mode 100644 index 00000000000..0858d9b70da --- /dev/null +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h @@ -0,0 +1,93 @@ +#include +#include +#include + +namespace DB { + +template +class VisitorJSONPathRange : public IVisitor +{ +public: + VisitorJSONPathRange(ASTPtr range_ptr_) : range_ptr(range_ptr_) { + const auto * range = range_ptr->as(); + current_range = 0; + if (range->is_star) { + current_index = 0; + } else { + current_index = range->ranges[current_range].first; + } + } + + const char * getName() const override { return "VisitorJSONPathRange"; } + + VisitorStatus apply(typename JSONParser::Element & element) const override { + typename JSONParser::Element result; + typename JSONParser::Array array = element.getArray(); + if (current_index >= array.size()) { + return VisitorStatus::Error; + } + result = array[current_index]; + element = result; + return VisitorStatus::Ok; + } + + VisitorStatus visit(typename JSONParser::Element & element) override + { + if (!element.isArray()) { + this->setExhausted(true); + return VisitorStatus::Error; + } + + const auto * range = range_ptr->as(); + VisitorStatus status; + if (current_index < element.getArray().size()) { + apply(element); + status = VisitorStatus::Ok; + } else if (!range->is_star) { + status = VisitorStatus::Ignore; + } else { + status = VisitorStatus::Ignore; + this->setExhausted(true); + } + + if (!range->is_star) { + if (current_index + 1 == range->ranges[current_range].second) { + if (current_range + 1 == range->ranges.size()) { + this->setExhausted(true); + } + } + } + + return status; + } + + void reinitialize() override { + const auto * range = range_ptr->as(); + current_range = 0; + if (range->is_star) { + current_index = 0; + } else { + current_index = range->ranges[current_range].first; + } + this->setExhausted(false); + } + + void updateState() override { + const auto * range = range_ptr->as(); + current_index++; + if (range->is_star) { + return; + } + if (current_index == range->ranges[current_range].second) { + current_range++; + current_index = range->ranges[current_range].first; + } + } + +private: + ASTPtr range_ptr; + size_t current_range; + UInt32 current_index; +}; + +} // namespace diff --git a/src/Functions/JSONPath/Generators/VisitorStatus.h b/src/Functions/JSONPath/Generators/VisitorStatus.h index 51d795efbf7..17b424a3bf6 100644 --- a/src/Functions/JSONPath/Generators/VisitorStatus.h +++ b/src/Functions/JSONPath/Generators/VisitorStatus.h @@ -5,7 +5,8 @@ namespace DB { enum VisitorStatus { Ok, Exhausted, - Error + Error, + Ignore }; } diff --git a/src/Functions/JSONPath/Parsers/CMakeLists.txt b/src/Functions/JSONPath/Parsers/CMakeLists.txt index f2f94298576..ecabe5cc13b 100644 --- a/src/Functions/JSONPath/Parsers/CMakeLists.txt +++ b/src/Functions/JSONPath/Parsers/CMakeLists.txt @@ -5,4 +5,4 @@ target_link_libraries(clickhouse_functions_jsonpath_parsers PRIVATE dbms) if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) target_compile_options(clickhouse_functions_jsonpath_parsers PRIVATE "-g0") -endif() \ No newline at end of file +endif() diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp index bf62f44fade..b65de621f9a 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp @@ -1,9 +1,6 @@ #include - #include - #include - #include namespace DB diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp index c0831780fc4..10cf6f2915c 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp @@ -1,7 +1,6 @@ #include - #include - +#include #include namespace DB @@ -18,6 +17,7 @@ bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expect { query = std::make_shared(); ParserJSONPathMemberAccess parser_jsonpath_member_access; + ParserJSONPathRange parser_jsonpath_range; if (pos->type != TokenType::DollarSign) { return false; @@ -25,15 +25,19 @@ bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expect ++pos; bool res = false; - ASTPtr member_access; - while (parser_jsonpath_member_access.parse(pos, member_access, expected)) + ASTPtr subquery; + while (parser_jsonpath_member_access.parse(pos, subquery, expected) || + parser_jsonpath_range.parse(pos, subquery, expected)) { - query->children.push_back(member_access); - member_access = nullptr; + if (subquery) + { + query->children.push_back(subquery); + subquery = nullptr; + } res = true; } - /// true in case of at least one success - return res; + /// if we had at least one success and no fails + return res && pos->type == TokenType::EndOfStream; } } diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp new file mode 100644 index 00000000000..4f4a87f15ce --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -0,0 +1,98 @@ +#include +#include +#include + +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int BAD_ARGUMENTS; +} +/** + * + * @param pos token iterator + * @param node node of ASTJSONPathQuery + * @param expected stuff for logging + * @return was parse successful + */ +bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + auto range = std::make_shared(); + node = range; + + if (pos->type != TokenType::OpeningSquareBracket) { + return false; + } + ++pos; + + while (pos->type != TokenType::ClosingSquareBracket) { + if (pos->type != TokenType::Number && pos->type != TokenType::Asterisk) + { + return false; + } + if (pos->type == TokenType::Asterisk) { + if (range->is_star) { + throw Exception{"Multiple asterisks in square array range are not allowed", ErrorCodes::BAD_ARGUMENTS}; + } + range->is_star = true; + ++pos; + continue; + } + + std::pair range_indices; + ParserNumber number_p; + ASTPtr number_ptr; + if (!number_p.parse(pos, number_ptr, expected)) + { + return false; + } + range_indices.first = number_ptr->as()->value.get(); + + if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) { + /// Single index case + range_indices.second = range_indices.first + 1; + } else if (pos->type == TokenType::BareWord) { + /// Range case + ParserIdentifier name_p; + ASTPtr word; + if (!name_p.parse(pos, word, expected)) { + return false; + } + String to_identifier; + if (!tryGetIdentifierNameInto(word, to_identifier) || to_identifier != "to") { + return false; + } + if (!number_p.parse(pos, number_ptr, expected)) + { + return false; + } + range_indices.second = number_ptr->as()->value.get(); + } else { + return false; + } + + if (range_indices.first >= range_indices.second) { + throw Exception{ErrorCodes::BAD_ARGUMENTS, "Start of range must be greater than end of range, however {} >= {}", + range_indices.first, range_indices.second}; + } + + range->ranges.push_back(std::move(range_indices)); + if (pos->type != TokenType::ClosingSquareBracket) { + ++pos; + } + } + ++pos; + + /// We cant have both ranges and star present, so parse was successful <=> exactly 1 of these conditions is true + return !range->ranges.empty() != range->is_star; +} + +} // namespace DB diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h new file mode 100644 index 00000000000..95708e5e7b8 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h @@ -0,0 +1,19 @@ +#pragma once + +#include + + +namespace DB +{ + +class ParserJSONPathRange : public IParserBase +{ +private: + const char * getName() const override { return "ParserJSONPathRange"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +public: + explicit ParserJSONPathRange() = default; +}; + +} diff --git a/src/Functions/RapidJSONParser.h b/src/Functions/RapidJSONParser.h index 5604a8c9fe0..992480d64f7 100644 --- a/src/Functions/RapidJSONParser.h +++ b/src/Functions/RapidJSONParser.h @@ -12,6 +12,7 @@ namespace DB { + /// This class can be used as an argument for the template class FunctionJSON. /// It provides ability to parse JSONs using rapidjson library. struct RapidJSONParser @@ -44,8 +45,6 @@ struct RapidJSONParser Array getArray() const; Object getObject() const; - ALWAYS_INLINE rapidjson::Value * getDom() const { return nullptr; } - private: const rapidjson::Value * ptr = nullptr; }; diff --git a/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h index 2dd952d920f..b9df8b142e3 100644 --- a/src/Functions/SimdJSONParser.h +++ b/src/Functions/SimdJSONParser.h @@ -5,10 +5,10 @@ #endif #if USE_SIMDJSON -# include +# include # include # include -# include +# include namespace DB @@ -30,8 +30,8 @@ struct SimdJSONParser class Element { public: - ALWAYS_INLINE Element() { } - ALWAYS_INLINE Element(const simdjson::dom::element & element_) : element(element_) { } + ALWAYS_INLINE Element() {} + ALWAYS_INLINE Element(const simdjson::dom::element & element_) : element(element_) {} ALWAYS_INLINE bool isInt64() const { return element.type() == simdjson::dom::element_type::INT64; } ALWAYS_INLINE bool isUInt64() const { return element.type() == simdjson::dom::element_type::UINT64; } @@ -63,35 +63,21 @@ struct SimdJSONParser class Iterator { public: - ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) { } + ALWAYS_INLINE Iterator(const simdjson::dom::array::iterator & it_) : it(it_) {} ALWAYS_INLINE Element operator*() const { return *it; } - ALWAYS_INLINE Iterator & operator++() - { - ++it; - return *this; - } - ALWAYS_INLINE Iterator operator++(int) - { - auto res = *this; - ++it; - return res; - } + ALWAYS_INLINE Iterator & operator++() { ++it; return *this; } + ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; } ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; } ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); } - private: simdjson::dom::array::iterator it; }; - ALWAYS_INLINE Array(const simdjson::dom::array & array_) : array(array_) { } + ALWAYS_INLINE Array(const simdjson::dom::array & array_) : array(array_) {} ALWAYS_INLINE Iterator begin() const { return array.begin(); } ALWAYS_INLINE Iterator end() const { return array.end(); } ALWAYS_INLINE size_t size() const { return array.size(); } - ALWAYS_INLINE Element operator[](size_t index) const - { - assert(index < size()); - return array.at(index).first; - } + ALWAYS_INLINE Element operator[](size_t index) const { assert(index < size()); return array.at(index).first; } private: simdjson::dom::array array; @@ -106,31 +92,17 @@ struct SimdJSONParser class Iterator { public: - ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) { } - ALWAYS_INLINE KeyValuePair operator*() const - { - const auto & res = *it; - return {res.key, res.value}; - } - ALWAYS_INLINE Iterator & operator++() - { - ++it; - return *this; - } - ALWAYS_INLINE Iterator operator++(int) - { - auto res = *this; - ++it; - return res; - } + ALWAYS_INLINE Iterator(const simdjson::dom::object::iterator & it_) : it(it_) {} + ALWAYS_INLINE KeyValuePair operator*() const { const auto & res = *it; return {res.key, res.value}; } + ALWAYS_INLINE Iterator & operator++() { ++it; return *this; } + ALWAYS_INLINE Iterator operator++(int) { auto res = *this; ++it; return res; } ALWAYS_INLINE friend bool operator!=(const Iterator & left, const Iterator & right) { return left.it != right.it; } ALWAYS_INLINE friend bool operator==(const Iterator & left, const Iterator & right) { return !(left != right); } - private: simdjson::dom::object::iterator it; }; - ALWAYS_INLINE Object(const simdjson::dom::object & object_) : object(object_) { } + ALWAYS_INLINE Object(const simdjson::dom::object & object_) : object(object_) {} ALWAYS_INLINE Iterator begin() const { return object.begin(); } ALWAYS_INLINE Iterator end() const { return object.end(); } ALWAYS_INLINE size_t size() const { return object.size(); } @@ -156,8 +128,6 @@ struct SimdJSONParser return {res.key, res.value}; } - ALWAYS_INLINE simdjson::dom::object getDom() const { return object; } - private: simdjson::dom::object object; }; @@ -177,8 +147,8 @@ struct SimdJSONParser void reserve(size_t max_size) { if (parser.allocate(max_size) != simdjson::error_code::SUCCESS) - throw Exception{ - "Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON", ErrorCodes::CANNOT_ALLOCATE_MEMORY}; + throw Exception{"Couldn't allocate " + std::to_string(max_size) + " bytes when parsing JSON", + ErrorCodes::CANNOT_ALLOCATE_MEMORY}; } private: From b9b28c3b7f2d8f0a4f01d8125c8405a9508ff324 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 29 May 2021 15:34:39 +0300 Subject: [PATCH 153/931] Fix include --- src/Functions/FunctionSQLJSON.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 1fc6986256d..b3f9db87ac9 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include From 2035c0cd6aa0338ecabb81fa8d5c3c5d260a62ea Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 29 May 2021 16:15:58 +0300 Subject: [PATCH 154/931] Change to IFunction --- src/Functions/FunctionsJSON.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index f066bb1029a..1032ab15ff9 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include From 819ad748a4016b35ad682fae09599f1e6f8282b1 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 29 May 2021 16:53:55 +0300 Subject: [PATCH 155/931] Fix IFunction --- src/Functions/FunctionSQLJSON.h | 2 +- src/Functions/FunctionsJSON.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index b3f9db87ac9..cee7390d67d 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -8,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 1032ab15ff9..f066bb1029a 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include From 4b85c8e31f1be21f9b21e89c13ec0138d6ed6aab Mon Sep 17 00:00:00 2001 From: elevankoff Date: Wed, 2 Jun 2021 08:00:10 +0000 Subject: [PATCH 156/931] Small style changes --- src/Common/DiskStatisticsOS.cpp | 2 +- src/Common/DiskStatisticsOS.h | 2 +- src/Common/MemoryInfoOS.cpp | 2 +- src/Common/MemoryInfoOS.h | 2 +- src/Common/ProcessorStatisticsOS.cpp | 2 +- src/Common/ProcessorStatisticsOS.h | 6 +++--- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp index 0485d129ecc..69f15b30a9e 100644 --- a/src/Common/DiskStatisticsOS.cpp +++ b/src/Common/DiskStatisticsOS.cpp @@ -61,7 +61,7 @@ DiskStatisticsOS::Data DiskStatisticsOS::get() return data; } -String DiskStatisticsOS::readNextFilesystem(ReadBuffer& mounts_in) +String DiskStatisticsOS::readNextFilesystem(ReadBuffer & mounts_in) { String filesystem, unused; diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h index 05f53a421d2..d4ec2417924 100644 --- a/src/Common/DiskStatisticsOS.h +++ b/src/Common/DiskStatisticsOS.h @@ -29,7 +29,7 @@ public: Data get(); private: - String readNextFilesystem(ReadBuffer& mounts_in); + String readNextFilesystem(ReadBuffer & mounts_in); }; } diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index 8cf2a0b44f4..301fcb6ad15 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -63,7 +63,7 @@ MemoryInfoOS::Data MemoryInfoOS::get() return data; } -std::pair MemoryInfoOS::readField(ReadBuffer& meminfo_in) +std::pair MemoryInfoOS::readField(ReadBuffer & meminfo_in) { String key; uint64_t val; diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index ae630e4ee70..63cda5b5c37 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -39,7 +39,7 @@ public: Data get(); private: - std::pair readField(ReadBuffer& meminfo_in); + std::pair readField(ReadBuffer & meminfo_in); }; } diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp index 69bce5f5b51..9b43fa428a9 100644 --- a/src/Common/ProcessorStatisticsOS.cpp +++ b/src/Common/ProcessorStatisticsOS.cpp @@ -110,7 +110,7 @@ void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) last_proc_time = cur_proc_time; } -void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload) +void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad & stload) { ReadBufferFromFile procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h index 70edfceb2ca..10b6d050b8c 100644 --- a/src/Common/ProcessorStatisticsOS.h +++ b/src/Common/ProcessorStatisticsOS.h @@ -75,10 +75,10 @@ private: }; void readLoadavg(ProcLoadavg & loadavg); - void calcStLoad(ProcStLoad & stload); - void readFreq(ProcFreq & freq); + void calcStLoad(ProcStLoad & stload); + void readFreq(ProcFreq & freq); - void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad& stload); + void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad & stload); private: std::time_t last_stload_call_time; From b00c3d8f5a911bd6ef3af9a1af50ae0794041734 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Thu, 3 Jun 2021 20:47:53 +0300 Subject: [PATCH 157/931] Fix build (probably) --- src/Functions/DummyJSONParser.h | 3 ++- src/Functions/FunctionSQLJSON.h | 15 ++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index 3010347e4c4..21b4ebef150 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -40,7 +40,8 @@ struct DummyJSONParser Array getArray() const { return {}; } Object getObject() const { return {}; } - Element getElement() {return {}; } + Element getElement() { return {}; } + std::ostream & operator<<(std::ostream & os) { return os; } }; /// References an array in a JSON document. diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index cee7390d67d..cf33cf804ce 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -157,11 +157,11 @@ private: }; template typename Impl> -class FunctionSQLJSON : public IFunction +class FunctionSQLJSON : public IFunction, WithConstContext { public: - static FunctionPtr create(const Context & context_) { return std::make_shared(context_); } - FunctionSQLJSON(const Context & context_) : context(context_) { } + static FunctionPtr create(ContextConstPtr context_) { return std::make_shared(context_); } + FunctionSQLJSON(ContextConstPtr context_) : WithConstContext(context_) {} static constexpr auto name = Name::name; String getName() const override { return Name::name; } @@ -182,7 +182,7 @@ public: /// 3. Parser(Tokens, ASTPtr) -> complete AST /// 4. Execute functions, call interpreter for each json (in function) #if USE_SIMDJSON - if (context.getSettingsRef().allow_simdjson) + if (getContext()->getSettingsRef().allow_simdjson) return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); #endif @@ -192,9 +192,6 @@ public: return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); #endif } - -private: - const Context & context; }; struct NameJSONExists @@ -239,9 +236,9 @@ public: /// or Exhausted (if we never found the item) ColumnUInt8 & col_bool = assert_cast(dest); if (status == VisitorStatus::Ok) { - col_bool.insert(0); - } else { col_bool.insert(1); + } else { + col_bool.insert(0); } return true; } From 04daa9b5ed79c358dee587afe009bf8229fe79ef Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Fri, 4 Jun 2021 00:17:51 +0300 Subject: [PATCH 158/931] Second try to fix build --- src/Functions/DummyJSONParser.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index 21b4ebef150..e74ab57ded1 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -41,7 +41,7 @@ struct DummyJSONParser Object getObject() const { return {}; } Element getElement() { return {}; } - std::ostream & operator<<(std::ostream & os) { return os; } + std::ostream & operator<<(std::ostream & os) const { return os; } }; /// References an array in a JSON document. @@ -100,4 +100,9 @@ struct DummyJSONParser #endif }; +ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, DummyJSONParser::Element) +{ + return out; +} + } From bef8b4c2c6b205d639b69571e039fe82f5e9b713 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Fri, 4 Jun 2021 00:56:06 +0300 Subject: [PATCH 159/931] Add 'inline' to DummyParser's << operator; don't throw exceptioin on USE_RAPIDJSON --- src/Functions/DummyJSONParser.h | 3 +-- src/Functions/FunctionSQLJSON.h | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index e74ab57ded1..01fdab1abb6 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -41,7 +41,6 @@ struct DummyJSONParser Object getObject() const { return {}; } Element getElement() { return {}; } - std::ostream & operator<<(std::ostream & os) const { return os; } }; /// References an array in a JSON document. @@ -100,7 +99,7 @@ struct DummyJSONParser #endif }; -ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, DummyJSONParser::Element) +inline ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, DummyJSONParser::Element) { return out; } diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index cf33cf804ce..2d4f51e63c4 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -152,8 +152,6 @@ public: return to; } }; - -private: }; template typename Impl> @@ -184,10 +182,6 @@ public: #if USE_SIMDJSON if (getContext()->getSettingsRef().allow_simdjson) return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); -#endif - -#if USE_RAPIDJSON - throw Exception{"RapidJSON is not supported :(", ErrorCodes::BAD_ARGUMENTS}; #else return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); #endif From 00e76ca372edccde2b6f7ac7430d3231878b19e8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 3 Jun 2021 19:45:27 +0000 Subject: [PATCH 160/931] Checking tests --- src/Storages/StorageMaterializeMySQL.cpp | 66 +++- .../test.py | 303 ++++++++++-------- .../test_storage_postgresql_replica/test.py | 176 +++++----- 3 files changed, 307 insertions(+), 238 deletions(-) diff --git a/src/Storages/StorageMaterializeMySQL.cpp b/src/Storages/StorageMaterializeMySQL.cpp index 6352b62d6f4..8e6f2e1ad63 100644 --- a/src/Storages/StorageMaterializeMySQL.cpp +++ b/src/Storages/StorageMaterializeMySQL.cpp @@ -23,7 +23,6 @@ #include #include -#include namespace DB { @@ -38,7 +37,7 @@ StorageMaterializeMySQL::StorageMaterializeMySQL(const StoragePtr & nested_stora Pipe StorageMaterializeMySQL::read( const Names & column_names, - const StorageMetadataPtr & metadata_snapshot, + const StorageMetadataPtr & /*metadata_snapshot*/, SelectQueryInfo & query_info, ContextPtr context, QueryProcessingStage::Enum processed_stage, @@ -48,15 +47,60 @@ Pipe StorageMaterializeMySQL::read( /// If the background synchronization thread has exception. rethrowSyncExceptionIfNeed(database); - return readFinalFromNestedStorage( - nested_storage, - column_names, - metadata_snapshot, - query_info, - context, - processed_stage, - max_block_size, - num_streams); + NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); + auto lock = nested_storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); + const StorageMetadataPtr & nested_metadata = nested_storage->getInMemoryMetadataPtr(); + + Block nested_header = nested_metadata->getSampleBlock(); + ColumnWithTypeAndName & sign_column = nested_header.getByPosition(nested_header.columns() - 2); + ColumnWithTypeAndName & version_column = nested_header.getByPosition(nested_header.columns() - 1); + + if (ASTSelectQuery * select_query = query_info.query->as(); select_query && !column_names_set.count(version_column.name)) + { + auto & tables_in_select_query = select_query->tables()->as(); + + if (!tables_in_select_query.children.empty()) + { + auto & tables_element = tables_in_select_query.children[0]->as(); + + if (tables_element.table_expression) + tables_element.table_expression->as().final = true; + } + } + + String filter_column_name; + Names require_columns_name = column_names; + ASTPtr expressions = std::make_shared(); + if (column_names_set.empty() || !column_names_set.count(sign_column.name)) + { + require_columns_name.emplace_back(sign_column.name); + + const auto & sign_column_name = std::make_shared(sign_column.name); + const auto & fetch_sign_value = std::make_shared(Field(Int8(1))); + + expressions->children.emplace_back(makeASTFunction("equals", sign_column_name, fetch_sign_value)); + filter_column_name = expressions->children.back()->getColumnName(); + + for (const auto & column_name : column_names) + expressions->children.emplace_back(std::make_shared(column_name)); + } + + Pipe pipe = nested_storage->read(require_columns_name, nested_metadata, query_info, context, processed_stage, max_block_size, num_streams); + pipe.addTableLock(lock); + + if (!expressions->children.empty() && !pipe.empty()) + { + Block pipe_header = pipe.getHeader(); + auto syntax = TreeRewriter(context).analyze(expressions, pipe_header.getNamesAndTypesList()); + ExpressionActionsPtr expression_actions = ExpressionAnalyzer(expressions, syntax, context).getActions(true /* add_aliases */, false /* project_result */); + + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, expression_actions, filter_column_name, false); + }); + } + + return pipe; } NamesAndTypesList StorageMaterializeMySQL::getVirtuals() const diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index c98e4ee14d8..f19a5cf2467 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -31,11 +31,12 @@ postgres_table_template_3 = """ key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ -def get_postgres_conn(database=False, auto_commit=True, database_name='postgres_database'): +def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'): if database == True: - conn_string = "host='localhost' dbname='{}' user='postgres' password='mysecretpassword'".format(database_name) + conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) else: - conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + conn = psycopg2.connect(conn_string) if auto_commit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) @@ -47,22 +48,32 @@ def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) def drop_postgres_db(cursor, name='postgres_database'): - cursor.execute("DROP DATABASE IF EXISTS {}".format(name)) + cursor.execute("DROP DATABASE {}".format(name)) -def create_clickhouse_postgres_db(name='postgres_database'): +def create_clickhouse_postgres_db(ip, port, name='postgres_database'): instance.query(''' CREATE DATABASE {} - ENGINE = PostgreSQL('postgres1:5432', '{}', 'postgres', 'mysecretpassword')'''.format(name, name)) + ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')'''.format(name, ip, port, name)) def drop_clickhouse_postgres_db(name='postgres_database'): - instance.query('DROP DATABASE IF EXISTS {}'.format(name)) + instance.query('DROP DATABASE {}'.format(name)) -def create_materialized_db(materialized_database='test_database', postgres_database='postgres_database'): - instance.query("CREATE DATABASE {} ENGINE = MaterializePostgreSQL('postgres1:5432', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, postgres_database)) +def create_materialized_db(ip, port, + materialized_database='test_database', + postgres_database='postgres_database', + settings=[]): + create_query = "CREATE DATABASE {} ENGINE = MaterializePostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, ip, port, postgres_database) + if len(settings) > 0: + create_query += " SETTINGS " + for i in range(len(settings)): + if i != 0: + create_query += ', ' + create_query += settings[i] + instance.query(create_query) assert materialized_database in instance.query('SHOW DATABASES') def drop_materialized_db(materialized_database='test_database'): - instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) + instance.query('DROP DATABASE {}'.format(materialized_database)) assert materialized_database not in instance.query('SHOW DATABASES') def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): @@ -120,10 +131,12 @@ def check_tables_are_synchronized(table_name, order_by='key', postgres_database= def started_cluster(): try: cluster.start() - conn = get_postgres_conn() + conn = get_postgres_conn(ip=cluster.postgres_ip, + port=cluster.postgres_port) cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database') - create_clickhouse_postgres_db() + create_clickhouse_postgres_db(ip=cluster.postgres_ip, + port=cluster.postgres_port) instance.query("DROP DATABASE IF EXISTS test_database") yield cluster @@ -140,7 +153,9 @@ def postgresql_setup_teardown(): @pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 5 @@ -149,7 +164,8 @@ def test_load_and_sync_all_database_tables(started_cluster): create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name)) - instance.query("CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) assert 'test_database' in instance.query('SHOW DATABASES') for i in range(NUM_TABLES): @@ -167,7 +183,9 @@ def test_load_and_sync_all_database_tables(started_cluster): @pytest.mark.timeout(120) def test_replicating_dml(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 5 @@ -175,8 +193,8 @@ def test_replicating_dml(started_cluster): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) @@ -210,7 +228,9 @@ def test_replicating_dml(started_cluster): @pytest.mark.timeout(120) def test_different_data_types(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() cursor.execute('drop table if exists test_data_types;') cursor.execute('drop table if exists test_array_data_type;') @@ -236,8 +256,8 @@ def test_different_data_types(started_cluster): k Char(2)[] -- Nullable(String) )''') - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) for i in range(10): instance.query(''' @@ -294,7 +314,9 @@ def test_different_data_types(started_cluster): @pytest.mark.timeout(120) def test_load_and_sync_subset_of_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 10 @@ -309,11 +331,9 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): publication_tables += ', ' publication_tables += table_name - instance.query(''' - CREATE DATABASE test_database - ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS materialize_postgresql_tables_list = '{}'; - '''.format(publication_tables)) + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialize_postgresql_tables_list = '{}'".format(publication_tables)]) assert 'test_database' in instance.query('SHOW DATABASES') time.sleep(1) @@ -347,13 +367,15 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): @pytest.mark.timeout(120) def test_changing_replica_identity_value(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)") - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)") check_tables_are_synchronized('postgresql_replica'); @@ -364,7 +386,9 @@ def test_changing_replica_identity_value(started_cluster): @pytest.mark.timeout(320) def test_clickhouse_restart(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 5 @@ -390,7 +414,9 @@ def test_clickhouse_restart(started_cluster): @pytest.mark.timeout(120) def test_replica_identity_index(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica', template=postgres_table_template_3); @@ -398,8 +424,8 @@ def test_replica_identity_index(started_cluster): cursor.execute("ALTER TABLE postgresql_replica REPLICA IDENTITY USING INDEX idx") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)") - instance.query( - "CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)") check_tables_are_synchronized('postgresql_replica', order_by='key1'); @@ -416,7 +442,9 @@ def test_replica_identity_index(started_cluster): @pytest.mark.timeout(320) def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 5 @@ -424,11 +452,9 @@ def test_table_schema_changes(started_cluster): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) - instance.query( - """CREATE DATABASE test_database - ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS materialize_postgresql_allow_automatic_update = 1; - """) + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialize_postgresql_allow_automatic_update = 1"]) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) @@ -472,7 +498,9 @@ def test_table_schema_changes(started_cluster): @pytest.mark.timeout(120) def test_many_concurrent_queries(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 5 @@ -522,7 +550,8 @@ def test_many_concurrent_queries(started_cluster): for i in range(threads_num): threads.append(threading.Thread(target=attack, args=(i,))) - create_materialized_db() + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 1)) @@ -549,13 +578,16 @@ def test_many_concurrent_queries(started_cluster): @pytest.mark.timeout(120) def test_single_transaction(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(database=True, auto_commit=False) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, auto_commit=False) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica_0'); conn.commit() - create_materialized_db() + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) assert_nested_table_is_created('postgresql_replica_0') for query in queries: @@ -573,14 +605,15 @@ def test_single_transaction(started_cluster): def test_virtual_columns(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica_0'); - instance.query( - """CREATE DATABASE test_database - ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword') - SETTINGS materialize_postgresql_allow_automatic_update = 1; """) + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialize_postgresql_allow_automatic_update = 1"]) assert_nested_table_is_created('postgresql_replica_0') instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number from numbers(10)") check_tables_are_synchronized('postgresql_replica_0'); @@ -604,93 +637,93 @@ def test_virtual_columns(started_cluster): drop_materialized_db() -def test_multiple_databases(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database_1") - instance.query("DROP DATABASE IF EXISTS test_database_2") - NUM_TABLES = 5 - - conn = get_postgres_conn() - cursor = conn.cursor() - create_postgres_db(cursor, 'postgres_database_1') - create_postgres_db(cursor, 'postgres_database_2') - - conn1 = get_postgres_conn(True, True, 'postgres_database_1') - conn2 = get_postgres_conn(True, True, 'postgres_database_2') - - cursor1 = conn1.cursor() - cursor2 = conn2.cursor() - - create_clickhouse_postgres_db('postgres_database_1') - create_clickhouse_postgres_db('postgres_database_2') - - cursors = [cursor1, cursor2] - for cursor_id in range(len(cursors)): - for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - create_postgres_table(cursors[cursor_id], table_name); - instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) - print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) - print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) - - create_materialized_db('test_database_1', 'postgres_database_1') - create_materialized_db('test_database_2', 'postgres_database_2') - - cursors = [cursor1, cursor2] - for cursor_id in range(len(cursors)): - for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) - - for cursor_id in range(len(cursors)): - for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - check_tables_are_synchronized( - table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); - - drop_clickhouse_postgres_db('postgres_database_1') - drop_clickhouse_postgres_db('postgres_database_2') - drop_materialized_db('test_database_1') - drop_materialized_db('test_database_2') - - -@pytest.mark.timeout(320) -def test_concurrent_transactions(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) - cursor = conn.cursor() - NUM_TABLES = 6 - - for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); - - def transaction(thread_id): - conn_ = get_postgres_conn(True, auto_commit=False) - cursor_ = conn.cursor() - for query in queries: - cursor_.execute(query.format(thread_id)) - print('thread {}, query {}'.format(thread_id, query)) - conn_.commit() - - threads = [] - threads_num = 6 - for i in range(threads_num): - threads.append(threading.Thread(target=transaction, args=(i,))) - - create_materialized_db() - - for thread in threads: - time.sleep(random.uniform(0, 0.5)) - thread.start() - for thread in threads: - thread.join() - - for i in range(NUM_TABLES): - check_tables_are_synchronized('postgresql_replica_{}'.format(i)); - count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) - count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) - print(int(count1), int(count2), sep=' ') - assert(int(count1) == int(count2)) - drop_materialized_db() +#def test_multiple_databases(started_cluster): +# instance.query("DROP DATABASE IF EXISTS test_database_1") +# instance.query("DROP DATABASE IF EXISTS test_database_2") +# NUM_TABLES = 5 +# +# conn = get_postgres_conn() +# cursor = conn.cursor() +# create_postgres_db(cursor, 'postgres_database_1') +# create_postgres_db(cursor, 'postgres_database_2') +# +# conn1 = get_postgres_conn(True, True, 'postgres_database_1') +# conn2 = get_postgres_conn(True, True, 'postgres_database_2') +# +# cursor1 = conn1.cursor() +# cursor2 = conn2.cursor() +# +# create_clickhouse_postgres_db('postgres_database_1') +# create_clickhouse_postgres_db('postgres_database_2') +# +# cursors = [cursor1, cursor2] +# for cursor_id in range(len(cursors)): +# for i in range(NUM_TABLES): +# table_name = 'postgresql_replica_{}'.format(i) +# create_postgres_table(cursors[cursor_id], table_name); +# instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) +# print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) +# print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) +# +# create_materialized_db('test_database_1', 'postgres_database_1') +# create_materialized_db('test_database_2', 'postgres_database_2') +# +# cursors = [cursor1, cursor2] +# for cursor_id in range(len(cursors)): +# for i in range(NUM_TABLES): +# table_name = 'postgresql_replica_{}'.format(i) +# instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) +# +# for cursor_id in range(len(cursors)): +# for i in range(NUM_TABLES): +# table_name = 'postgresql_replica_{}'.format(i) +# check_tables_are_synchronized( +# table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); +# +# drop_clickhouse_postgres_db('postgres_database_1') +# drop_clickhouse_postgres_db('postgres_database_2') +# drop_materialized_db('test_database_1') +# drop_materialized_db('test_database_2') +# +# +#@pytest.mark.timeout(320) +#def test_concurrent_transactions(started_cluster): +# instance.query("DROP DATABASE IF EXISTS test_database") +# conn = get_postgres_conn(True) +# cursor = conn.cursor() +# NUM_TABLES = 6 +# +# for i in range(NUM_TABLES): +# create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); +# +# def transaction(thread_id): +# conn_ = get_postgres_conn(True, auto_commit=False) +# cursor_ = conn.cursor() +# for query in queries: +# cursor_.execute(query.format(thread_id)) +# print('thread {}, query {}'.format(thread_id, query)) +# conn_.commit() +# +# threads = [] +# threads_num = 6 +# for i in range(threads_num): +# threads.append(threading.Thread(target=transaction, args=(i,))) +# +# create_materialized_db() +# +# for thread in threads: +# time.sleep(random.uniform(0, 0.5)) +# thread.start() +# for thread in threads: +# thread.join() +# +# for i in range(NUM_TABLES): +# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); +# count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) +# count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) +# print(int(count1), int(count2), sep=' ') +# assert(int(count1) == int(count2)) +# drop_materialized_db() if __name__ == '__main__': diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 53eedbc8b7d..e448cfc8e99 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -16,21 +16,33 @@ postgres_table_template = """ key Integer NOT NULL, value Integer, PRIMARY KEY(key)) """ - -def get_postgres_conn(database=False): +def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'): if database == True: - conn_string = "host='localhost' dbname='postgres_database' user='postgres' password='mysecretpassword'" + conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) else: - conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + conn = psycopg2.connect(conn_string) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) - conn.autocommit = True + if auto_commit: + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn.autocommit = True return conn def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) +def create_clickhouse_postgres_db(ip, port, name='postgres_database'): + instance.query(''' + CREATE DATABASE {} + ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')'''.format(name, ip, port, name)) + +def create_materialized_table(ip, port): + instance.query(''' + CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + ENGINE = MaterializePostgreSQL( + '{}:{}', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') + PRIMARY KEY key; '''.format(ip, port)) def create_postgres_table(cursor, table_name, replica_identity_full=False): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) @@ -52,12 +64,13 @@ def postgresql_replica_check_result(result, check=False, ref_file='test_postgres def started_cluster(): try: cluster.start() - conn = get_postgres_conn() + conn = get_postgres_conn(ip=cluster.postgres_ip, + port=cluster.postgres_port) cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database') - instance.query(''' - CREATE DATABASE postgres_database - ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')''') + create_clickhouse_postgres_db(ip=cluster.postgres_ip, + port=cluster.postgres_port) + instance.query('CREATE DATABASE test') yield cluster @@ -65,25 +78,23 @@ def started_cluster(): cluster.shutdown() @pytest.fixture(autouse=True) -def rabbitmq_setup_teardown(): +def postgresql_setup_teardown(): yield # run test instance.query('DROP TABLE IF EXISTS test.postgresql_replica') @pytest.mark.timeout(320) def test_initial_load_from_snapshot(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') while postgresql_replica_check_result(result) == False: @@ -96,18 +107,16 @@ def test_initial_load_from_snapshot(started_cluster): @pytest.mark.timeout(320) def test_no_connection_at_startup(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) time.sleep(3) instance.query('DETACH TABLE test.postgresql_replica') @@ -129,18 +138,16 @@ def test_no_connection_at_startup(started_cluster): @pytest.mark.timeout(320) def test_detach_attach_is_ok(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) == 0): @@ -164,19 +171,17 @@ def test_detach_attach_is_ok(started_cluster): @pytest.mark.timeout(320) def test_replicating_insert_queries(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) != 10): @@ -206,19 +211,17 @@ def test_replicating_insert_queries(started_cluster): @pytest.mark.timeout(320) def test_replicating_delete_queries(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') while postgresql_replica_check_result(result) == False: @@ -245,19 +248,17 @@ def test_replicating_delete_queries(started_cluster): @pytest.mark.timeout(320) def test_replicating_update_queries(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) != 50): @@ -277,18 +278,16 @@ def test_replicating_update_queries(started_cluster): @pytest.mark.timeout(320) def test_resume_from_written_version(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) != 50): @@ -320,18 +319,16 @@ def test_resume_from_written_version(started_cluster): @pytest.mark.timeout(320) def test_many_replication_messages(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64, PRIMARY KEY(key)) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - SETTINGS materialize_postgresql_max_block_size = 50000; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) result = instance.query('SELECT count() FROM test.postgresql_replica;') while (int(result) != 100000): @@ -375,18 +372,16 @@ def test_many_replication_messages(started_cluster): @pytest.mark.timeout(320) def test_connection_loss(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; - ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) i = 50 while i < 100000: @@ -412,17 +407,16 @@ def test_connection_loss(started_cluster): @pytest.mark.timeout(320) def test_clickhouse_restart(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) i = 50 while i < 100000: @@ -442,16 +436,15 @@ def test_clickhouse_restart(started_cluster): def test_rename_table(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25)") @@ -477,16 +470,15 @@ def test_rename_table(started_cluster): def test_virtual_columns(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - instance.query(''' - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( - 'postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; ''') + create_materialized_table(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") result = instance.query('SELECT count() FROM test.postgresql_replica;') From 32c45d14ab662da808dc9666ac0f00b748e3b3d7 Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 4 Jun 2021 14:48:48 +0000 Subject: [PATCH 161/931] a little --- src/Storages/StorageMerge.cpp | 17 +++++++---------- src/Storages/StorageMerge.h | 9 ++++----- src/TableFunctions/TableFunctionMerge.cpp | 19 ++++++++++--------- src/TableFunctions/TableFunctionMerge.h | 6 +++--- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 15d520c13aa..7e093fe162a 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -103,13 +103,9 @@ StorageMerge::StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_, - const Strings & source_tables_, + const std::unordered_map> & source_databases_and_tables_, ContextPtr context_) - : IStorage(table_id_) - , WithContext(context_->getGlobalContext()) - , source_database(source_database_) - , source_tables(std::in_place, source_tables_.begin(), source_tables_.end()) + : IStorage(table_id_), WithContext(context_->getGlobalContext()), source_databases_and_tables(source_databases_and_tables_) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -121,12 +117,12 @@ StorageMerge::StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_, + const String & source_database_regexp_, const String & source_table_regexp_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , source_database(source_database_) + , source_database_regexp(source_database_regexp_) , source_table_regexp(source_table_regexp_) { StorageInMemoryMetadata storage_metadata; @@ -624,10 +620,11 @@ void registerStorageMerge(StorageFactory & factory) engine_args[0] = evaluateConstantExpressionForDatabaseName(engine_args[0], args.getLocalContext()); engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.getLocalContext()); - String source_database = engine_args[0]->as().value.safeGet(); + String source_database_regexp = engine_args[0]->as().value.safeGet(); String table_name_regexp = engine_args[1]->as().value.safeGet(); - return StorageMerge::create(args.table_id, args.columns, args.comment, source_database, table_name_regexp, args.getContext()); + return StorageMerge::create( + args.table_id, args.columns, args.comment, source_database_regexp, table_name_regexp, args.getContext()); }); } diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index b9d44bfa27e..cbefa550204 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -49,9 +49,9 @@ public: const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot) const override; private: - String source_database; - std::optional> source_tables; + std::optional source_database_regexp; std::optional source_table_regexp; + std::optional>> source_databases_and_tables; using StorageWithLockAndName = std::tuple; using StorageListWithLocks = std::list; @@ -72,15 +72,14 @@ protected: const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_, - const Strings & source_tables_, + const std::unordered_map> & source_databases_and_tables_, ContextPtr context_); StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_, + const String & source_database_regexp_, const String & source_table_regexp_, ContextPtr context_); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 051aa38398f..e1629035180 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -52,20 +52,22 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr args[0] = evaluateConstantExpressionForDatabaseName(args[0], context); args[1] = evaluateConstantExpressionAsLiteral(args[1], context); - source_database = args[0]->as().value.safeGet(); + source_database_regexp = args[0]->as().value.safeGet(); source_table_regexp = args[1]->as().value.safeGet(); } -const Strings & TableFunctionMerge::getSourceTables(ContextPtr context) const +const std::unordered_map> & TableFunctionMerge::getSourceDatabasesAndTables(ContextPtr context) const { - if (source_tables) - return *source_tables; + if (source_databases_and_tables) + return *source_databases_and_tables; - auto database = DatabaseCatalog::instance().getDatabase(source_database); + // auto database = DatabaseCatalog::instance().getDatabase(source_database); - OptimizedRegularExpression re(source_table_regexp); - auto table_name_match = [&](const String & table_name_) { return re.match(table_name_); }; + OptimizedRegularExpression database_re(source_database_regexp); + OptimizedRegularExpression table_re(source_table_regexp); + auto database_name_match = [&](const String & database_name_) { return database_re.match(database_name_); }; + auto table_name_match = [&](const String & table_name_) { return table_re.match(table_name_); }; auto access = context->getAccess(); bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, source_database); @@ -110,8 +112,7 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & /*ast_function*/, Cont StorageID(getDatabaseName(), table_name), getActualTableStructure(context), String{}, - source_database, - getSourceTables(context), + getSourceDatabasesAndTables(context), context); res->startup(); diff --git a/src/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h index 04027b9d76a..b971a00d4b6 100644 --- a/src/TableFunctions/TableFunctionMerge.h +++ b/src/TableFunctions/TableFunctionMerge.h @@ -19,13 +19,13 @@ private: StoragePtr executeImpl(const ASTPtr & ast_function, ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns) const override; const char * getStorageTypeName() const override { return "Merge"; } - const Strings & getSourceTables(ContextPtr context) const; + const std::unordered_map> & getSourceDatabasesAndTables(ContextPtr context) const; ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - String source_database; + String source_database_regexp; String source_table_regexp; - mutable std::optional source_tables; + mutable std::optional>> source_databases_and_tables; }; From 14687eccf7fb29efd8cf92b1354f6bca58c075c6 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 5 Jun 2021 00:04:53 +0300 Subject: [PATCH 162/931] Fix dollar token for lexer --- src/Functions/ya.make | 5 +++++ src/Parsers/Lexer.cpp | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 2a541369ff4..76fa893791e 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -42,6 +42,7 @@ SRCS( FunctionFile.cpp FunctionHelpers.cpp FunctionJoinGet.cpp + FunctionSQLJSON.cpp FunctionsAES.cpp FunctionsCoding.cpp FunctionsConversion.cpp @@ -74,6 +75,10 @@ SRCS( GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp GeoHash.cpp IFunction.cpp + JSONPath/Parsers/ParserJSONPath.cpp + JSONPath/Parsers/ParserJSONPathMemberAccess.cpp + JSONPath/Parsers/ParserJSONPathQuery.cpp + JSONPath/Parsers/ParserJSONPathRange.cpp TargetSpecific.cpp URL/URLHierarchy.cpp URL/URLPathHierarchy.cpp diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index c3b3cf98a2e..3a6e7a26700 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -240,9 +240,6 @@ Token Lexer::nextTokenImpl() case '*': ++pos; return Token(TokenType::Asterisk, token_begin, pos); - case '$': - ++pos; - return Token(TokenType::DollarSign, token_begin, pos); case '/': /// division (/) or start of comment (//, /*) { ++pos; @@ -341,6 +338,9 @@ Token Lexer::nextTokenImpl() } default: + if (*pos == '$' && pos + 1 < end && !isWordCharASCII(pos[1])) { + return Token(TokenType::DollarSign, token_begin, ++pos); + } if (isWordCharASCII(*pos) || *pos == '$') { ++pos; From cdd13b5ab42a53e65fea32235943749f73a5ef8d Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 5 Jun 2021 01:31:55 +0300 Subject: [PATCH 163/931] Style --- src/Functions/FunctionSQLJSON.h | 54 ++++++++++------ src/Functions/JSONPath/ASTs/ASTJSONPath.h | 12 +--- .../JSONPath/ASTs/ASTJSONPathMemberAccess.h | 10 +-- .../JSONPath/ASTs/ASTJSONPathQuery.h | 10 +-- .../JSONPath/ASTs/ASTJSONPathRange.h | 13 +--- .../JSONPath/Generators/GeneratorJSONPath.h | 2 +- .../JSONPath/Generators/IGenerator.h | 2 +- .../JSONPath/Generators/IGenerator_fwd.h | 6 +- src/Functions/JSONPath/Generators/IVisitor.h | 19 +++--- .../Generators/VisitorJSONPathMemberAccess.h | 17 ++--- .../Generators/VisitorJSONPathRange.h | 62 +++++++++++++------ .../JSONPath/Generators/VisitorStatus.h | 7 ++- .../JSONPath/Parsers/ParserJSONPath.cpp | 10 +-- .../JSONPath/Parsers/ParserJSONPath.h | 1 - .../Parsers/ParserJSONPathMemberAccess.h | 2 +- .../JSONPath/Parsers/ParserJSONPathQuery.cpp | 13 ++-- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 44 ++++++++----- .../JSONPath/Parsers/ParserJSONPathRange.h | 1 - 18 files changed, 155 insertions(+), 130 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 2d4f51e63c4..5f478cebad8 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -1,14 +1,15 @@ #pragma once -#include +#include #include -#include #include +#include #include #include #include #include #include +#include #include #include #include @@ -18,7 +19,6 @@ #include #include #include -#include #if !defined(ARCADIA_BUILD) # include "config_functions.h" @@ -28,11 +28,11 @@ namespace DB { namespace ErrorCodes { -extern const int ILLEGAL_COLUMN; -extern const int ILLEGAL_TYPE_OF_ARGUMENT; -extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; -extern const int BAD_ARGUMENTS; + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int BAD_ARGUMENTS; } class FunctionSQLJSONHelpers @@ -159,7 +159,7 @@ class FunctionSQLJSON : public IFunction, WithConstContext { public: static FunctionPtr create(ContextConstPtr context_) { return std::make_shared(context_); } - FunctionSQLJSON(ContextConstPtr context_) : WithConstContext(context_) {} + FunctionSQLJSON(ContextConstPtr context_) : WithConstContext(context_) { } static constexpr auto name = Name::name; String getName() const override { return Name::name; } @@ -220,7 +220,8 @@ public: VisitorStatus status; while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) { - if (status == VisitorStatus::Ok) { + if (status == VisitorStatus::Ok) + { break; } current_element = root; @@ -229,9 +230,12 @@ public: /// insert result, status can be either Ok (if we found the item) /// or Exhausted (if we never found the item) ColumnUInt8 & col_bool = assert_cast(dest); - if (status == VisitorStatus::Ok) { + if (status == VisitorStatus::Ok) + { col_bool.insert(1); - } else { + } + else + { col_bool.insert(0); } return true; @@ -256,17 +260,22 @@ public: Element res; while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) { - if (status == VisitorStatus::Ok) { - if (!(current_element.isArray() || current_element.isObject())) { + if (status == VisitorStatus::Ok) + { + if (!(current_element.isArray() || current_element.isObject())) + { break; } - } else if (status == VisitorStatus::Error) { + } + else if (status == VisitorStatus::Error) + { /// ON ERROR } current_element = root; } - if (status == VisitorStatus::Exhausted) { + if (status == VisitorStatus::Exhausted) + { return false; } @@ -304,19 +313,24 @@ public: bool success = false; while ((status = generator_json_path.getNextItem(current_element)) != VisitorStatus::Exhausted) { - if (status == VisitorStatus::Ok) { - if (success) { + if (status == VisitorStatus::Ok) + { + if (success) + { out << ", "; } success = true; out << current_element.getElement(); - } else if (status == VisitorStatus::Error) { + } + else if (status == VisitorStatus::Error) + { /// ON ERROR } current_element = root; } out << "]"; - if (!success) { + if (!success) + { return false; } ColumnString & col_str = assert_cast(dest); diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPath.h b/src/Functions/JSONPath/ASTs/ASTJSONPath.h index 092fe16bd9e..dfc117db846 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPath.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPath.h @@ -1,22 +1,16 @@ #pragma once -#include #include +#include namespace DB { class ASTJSONPath : public IAST { public: - String getID(char) const override - { - return "ASTJSONPath"; - } + String getID(char) const override { return "ASTJSONPath"; } - ASTPtr clone() const override - { - return std::make_shared(*this); - } + ASTPtr clone() const override { return std::make_shared(*this); } ASTJSONPathQuery * jsonpath_query; }; diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h b/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h index 663859f566f..2c9482b665e 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathMemberAccess.h @@ -7,15 +7,9 @@ namespace DB class ASTJSONPathMemberAccess : public IAST { public: - String getID(char) const override - { - return "ASTJSONPathMemberAccess"; - } + String getID(char) const override { return "ASTJSONPathMemberAccess"; } - ASTPtr clone() const override - { - return std::make_shared(*this); - } + ASTPtr clone() const override { return std::make_shared(*this); } public: /// Member name to lookup in json document (in path: $.some_key.another_key. ...) diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h index 8da8a7baafb..ed2992777b2 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathQuery.h @@ -7,15 +7,9 @@ namespace DB class ASTJSONPathQuery : public IAST { public: - String getID(char) const override - { - return "ASTJSONPathQuery"; - } + String getID(char) const override { return "ASTJSONPathQuery"; } - ASTPtr clone() const override - { - return std::make_shared(*this); - } + ASTPtr clone() const override { return std::make_shared(*this); } }; } diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h index 21af3cff363..8a963d7fc6b 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h @@ -1,23 +1,16 @@ #pragma once -#include #include +#include namespace DB { - class ASTJSONPathRange : public IAST { public: - String getID(char) const override - { - return "ASTJSONPathRange"; - } + String getID(char) const override { return "ASTJSONPathRange"; } - ASTPtr clone() const override - { - return std::make_shared(*this); - } + ASTPtr clone() const override { return std::make_shared(*this); } public: /// Ranges to lookup in json array ($[0, 1, 2, 4 to 9]) diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 68ea5a2a3c5..6eea19cb516 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -100,4 +100,4 @@ private: VisitorList visitors; }; -} // namespace DB +} diff --git a/src/Functions/JSONPath/Generators/IGenerator.h b/src/Functions/JSONPath/Generators/IGenerator.h index 18c0ac7da67..d2cef9fe27b 100644 --- a/src/Functions/JSONPath/Generators/IGenerator.h +++ b/src/Functions/JSONPath/Generators/IGenerator.h @@ -26,4 +26,4 @@ public: virtual ~IGenerator() = default; }; -} // namespace DB +} diff --git a/src/Functions/JSONPath/Generators/IGenerator_fwd.h b/src/Functions/JSONPath/Generators/IGenerator_fwd.h index 27c3976b95b..57ed04d0f6f 100644 --- a/src/Functions/JSONPath/Generators/IGenerator_fwd.h +++ b/src/Functions/JSONPath/Generators/IGenerator_fwd.h @@ -2,8 +2,8 @@ #include -namespace DB { - +namespace DB +{ template class IGenerator; @@ -13,4 +13,4 @@ using IVisitorPtr = std::shared_ptr>; template using VisitorList = std::vector>; -} // namespace DB +} diff --git a/src/Functions/JSONPath/Generators/IVisitor.h b/src/Functions/JSONPath/Generators/IVisitor.h index 78c1efe64fc..d9917087cb0 100644 --- a/src/Functions/JSONPath/Generators/IVisitor.h +++ b/src/Functions/JSONPath/Generators/IVisitor.h @@ -2,12 +2,12 @@ #include -namespace DB { - +namespace DB +{ template -class IVisitor { +class IVisitor +{ public: - virtual const char * getName() const = 0; /** @@ -29,17 +29,14 @@ public: virtual void updateState() = 0; - bool isExhausted() { - return is_exhausted; - } + bool isExhausted() { return is_exhausted; } - void setExhausted(bool exhausted) { - is_exhausted = exhausted; - } + void setExhausted(bool exhausted) { is_exhausted = exhausted; } virtual ~IVisitor() = default; + private: bool is_exhausted = false; }; -} // namespace DB +} diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h index cad36e40e4d..10ee2a0c5d6 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h @@ -12,7 +12,8 @@ public: const char * getName() const override { return "VisitorJSONPathMemberAccess"; } - VisitorStatus apply(typename JSONParser::Element & element) const override { + VisitorStatus apply(typename JSONParser::Element & element) const override + { const auto * member_access = member_access_ptr->as(); typename JSONParser::Element result; element.getObject().find(std::string_view(member_access->member_name), result); @@ -22,13 +23,15 @@ public: VisitorStatus visit(typename JSONParser::Element & element) override { - if (!element.isObject()) { + if (!element.isObject()) + { this->setExhausted(true); return VisitorStatus::Error; } const auto * member_access = member_access_ptr->as(); typename JSONParser::Element result; - if (!element.getObject().find(std::string_view(member_access->member_name), result)) { + if (!element.getObject().find(std::string_view(member_access->member_name), result)) + { this->setExhausted(true); return VisitorStatus::Error; } @@ -37,14 +40,12 @@ public: return VisitorStatus::Ok; } - void reinitialize() override { - this->setExhausted(false); - } + void reinitialize() override { this->setExhausted(false); } - void updateState() override {} + void updateState() override { } private: ASTPtr member_access_ptr; }; -} // namespace DB +} diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h index 0858d9b70da..d3313331593 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h @@ -2,28 +2,34 @@ #include #include -namespace DB { - +namespace DB +{ template class VisitorJSONPathRange : public IVisitor { public: - VisitorJSONPathRange(ASTPtr range_ptr_) : range_ptr(range_ptr_) { + VisitorJSONPathRange(ASTPtr range_ptr_) : range_ptr(range_ptr_) + { const auto * range = range_ptr->as(); current_range = 0; - if (range->is_star) { + if (range->is_star) + { current_index = 0; - } else { + } + else + { current_index = range->ranges[current_range].first; } } const char * getName() const override { return "VisitorJSONPathRange"; } - VisitorStatus apply(typename JSONParser::Element & element) const override { + VisitorStatus apply(typename JSONParser::Element & element) const override + { typename JSONParser::Element result; typename JSONParser::Array array = element.getArray(); - if (current_index >= array.size()) { + if (current_index >= array.size()) + { return VisitorStatus::Error; } result = array[current_index]; @@ -33,26 +39,35 @@ public: VisitorStatus visit(typename JSONParser::Element & element) override { - if (!element.isArray()) { + if (!element.isArray()) + { this->setExhausted(true); return VisitorStatus::Error; } const auto * range = range_ptr->as(); VisitorStatus status; - if (current_index < element.getArray().size()) { + if (current_index < element.getArray().size()) + { apply(element); status = VisitorStatus::Ok; - } else if (!range->is_star) { + } + else if (!range->is_star) + { status = VisitorStatus::Ignore; - } else { + } + else + { status = VisitorStatus::Ignore; this->setExhausted(true); } - if (!range->is_star) { - if (current_index + 1 == range->ranges[current_range].second) { - if (current_range + 1 == range->ranges.size()) { + if (!range->is_star) + { + if (current_index + 1 == range->ranges[current_range].second) + { + if (current_range + 1 == range->ranges.size()) + { this->setExhausted(true); } } @@ -61,24 +76,31 @@ public: return status; } - void reinitialize() override { + void reinitialize() override + { const auto * range = range_ptr->as(); current_range = 0; - if (range->is_star) { + if (range->is_star) + { current_index = 0; - } else { + } + else + { current_index = range->ranges[current_range].first; } this->setExhausted(false); } - void updateState() override { + void updateState() override + { const auto * range = range_ptr->as(); current_index++; - if (range->is_star) { + if (range->is_star) + { return; } - if (current_index == range->ranges[current_range].second) { + if (current_index == range->ranges[current_range].second) + { current_range++; current_index = range->ranges[current_range].first; } diff --git a/src/Functions/JSONPath/Generators/VisitorStatus.h b/src/Functions/JSONPath/Generators/VisitorStatus.h index 17b424a3bf6..96b2ea72f18 100644 --- a/src/Functions/JSONPath/Generators/VisitorStatus.h +++ b/src/Functions/JSONPath/Generators/VisitorStatus.h @@ -1,8 +1,9 @@ #pragma once -namespace DB { - -enum VisitorStatus { +namespace DB +{ +enum VisitorStatus +{ Ok, Exhausted, Error, diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp index b65de621f9a..003e97af38b 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPath.cpp @@ -1,11 +1,10 @@ +#include +#include #include #include -#include -#include namespace DB { - /** * Entry parser for JSONPath */ @@ -19,7 +18,8 @@ bool ParserJSONPath::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) bool res = parser_jsonpath_query.parse(pos, query, expected); - if (res) { + if (res) + { /// Set ASTJSONPathQuery of ASTJSONPath ast_jsonpath->set(ast_jsonpath->jsonpath_query, query); } @@ -28,4 +28,4 @@ bool ParserJSONPath::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return res; } -} // namespace DB +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPath.h b/src/Functions/JSONPath/Parsers/ParserJSONPath.h index 5defc76b515..7d2c2ad642c 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPath.h +++ b/src/Functions/JSONPath/Parsers/ParserJSONPath.h @@ -5,7 +5,6 @@ namespace DB { - /** * Entry parser for JSONPath */ diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h index 49fda6f1ac8..000f20c8551 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h @@ -4,7 +4,7 @@ namespace DB { class ParserJSONPathMemberAccess : public IParserBase { - const char * getName() const override {return "ParserJSONPathMemberAccess";} + const char * getName() const override { return "ParserJSONPathMemberAccess"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; }; diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp index 10cf6f2915c..0f171c0a82c 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp @@ -1,7 +1,7 @@ -#include -#include -#include #include +#include +#include +#include namespace DB @@ -19,15 +19,16 @@ bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expect ParserJSONPathMemberAccess parser_jsonpath_member_access; ParserJSONPathRange parser_jsonpath_range; - if (pos->type != TokenType::DollarSign) { + if (pos->type != TokenType::DollarSign) + { return false; } ++pos; bool res = false; ASTPtr subquery; - while (parser_jsonpath_member_access.parse(pos, subquery, expected) || - parser_jsonpath_range.parse(pos, subquery, expected)) + while (parser_jsonpath_member_access.parse(pos, subquery, expected) + || parser_jsonpath_range.parse(pos, subquery, expected)) { if (subquery) { diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index 4f4a87f15ce..4027ec67ecb 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -28,18 +28,22 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte auto range = std::make_shared(); node = range; - if (pos->type != TokenType::OpeningSquareBracket) { + if (pos->type != TokenType::OpeningSquareBracket) + { return false; } ++pos; - while (pos->type != TokenType::ClosingSquareBracket) { + while (pos->type != TokenType::ClosingSquareBracket) + { if (pos->type != TokenType::Number && pos->type != TokenType::Asterisk) { return false; } - if (pos->type == TokenType::Asterisk) { - if (range->is_star) { + if (pos->type == TokenType::Asterisk) + { + if (range->is_star) + { throw Exception{"Multiple asterisks in square array range are not allowed", ErrorCodes::BAD_ARGUMENTS}; } range->is_star = true; @@ -56,18 +60,23 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte } range_indices.first = number_ptr->as()->value.get(); - if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) { + if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) + { /// Single index case range_indices.second = range_indices.first + 1; - } else if (pos->type == TokenType::BareWord) { + } + else if (pos->type == TokenType::BareWord) + { /// Range case ParserIdentifier name_p; ASTPtr word; - if (!name_p.parse(pos, word, expected)) { + if (!name_p.parse(pos, word, expected)) + { return false; } String to_identifier; - if (!tryGetIdentifierNameInto(word, to_identifier) || to_identifier != "to") { + if (!tryGetIdentifierNameInto(word, to_identifier) || to_identifier != "to") + { return false; } if (!number_p.parse(pos, number_ptr, expected)) @@ -75,17 +84,24 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte return false; } range_indices.second = number_ptr->as()->value.get(); - } else { + } + else + { return false; } - if (range_indices.first >= range_indices.second) { - throw Exception{ErrorCodes::BAD_ARGUMENTS, "Start of range must be greater than end of range, however {} >= {}", - range_indices.first, range_indices.second}; + if (range_indices.first >= range_indices.second) + { + throw Exception{ + ErrorCodes::BAD_ARGUMENTS, + "Start of range must be greater than end of range, however {} >= {}", + range_indices.first, + range_indices.second}; } range->ranges.push_back(std::move(range_indices)); - if (pos->type != TokenType::ClosingSquareBracket) { + if (pos->type != TokenType::ClosingSquareBracket) + { ++pos; } } @@ -95,4 +111,4 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte return !range->ranges.empty() != range->is_star; } -} // namespace DB +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h index 95708e5e7b8..94db29577ab 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.h @@ -5,7 +5,6 @@ namespace DB { - class ParserJSONPathRange : public IParserBase { private: From 3f469fe0cde1f843286e7aaaa1db62da29098a9c Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 5 Jun 2021 15:13:46 +0300 Subject: [PATCH 164/931] Style --- src/Functions/FunctionSQLJSON.h | 8 +-- .../JSONPath/Generators/GeneratorJSONPath.h | 51 ++++++++++--------- .../Parsers/ParserJSONPathMemberAccess.cpp | 5 +- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 9 ++-- src/Parsers/Lexer.cpp | 3 +- 5 files changed, 41 insertions(+), 35 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 5f478cebad8..67380a11235 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -56,18 +56,18 @@ public: const auto & first_column = arguments[0]; if (!isString(first_column.type)) { - throw Exception{ + throw Exception( "JSONPath functions require 1 argument to be JSONPath of type string, illegal type: " + first_column.type->getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } /// Check 2 argument: must be of type String (JSON) const auto & second_column = arguments[1]; if (!isString(second_column.type)) { - throw Exception{ + throw Exception( "JSONPath functions require 2 argument to be JSON of string, illegal type: " + second_column.type->getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } /// If argument is successfully cast to (ColumnConst *) then it is quoted string diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 6eea19cb516..450a4f31dae 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -21,28 +21,22 @@ public: { query_ptr = query_ptr_; const auto * path = query_ptr->as(); - if (!path) { + if (!path) + { throw Exception("Invalid path", ErrorCodes::LOGICAL_ERROR); } const auto * query = path->jsonpath_query; for (auto child_ast : query->children) { - if (child_ast->getID() == "ASTJSONPathMemberAccess") + if (typeid_cast(child_ast.get())) { - auto member_access_visitor = std::make_shared>(child_ast); - if (member_access_visitor) { - visitors.push_back(member_access_visitor); - } else { - throw Exception("member_access_visitor could not be nullptr", ErrorCodes::LOGICAL_ERROR); - } - } else if (child_ast->getID() == "ASTJSONPathRange") { - auto range_visitor = std::make_shared>(child_ast); - if (range_visitor) { - visitors.push_back(range_visitor); - } else { - throw Exception("range_visitor could not be nullptr", ErrorCodes::LOGICAL_ERROR); - } + visitors.push_back(std::make_shared>(child_ast)); + } + else if (child_ast->getID() == "ASTJSONPathRange") + { + + visitors.push_back(std::make_shared>(child_ast)); } } } @@ -56,27 +50,33 @@ public: */ VisitorStatus getNextItem(typename JSONParser::Element & element) override { - while (true) { + while (true) + { auto root = element; - if (current_visitor < 0) { + if (current_visitor < 0) + { return VisitorStatus::Exhausted; } - for (int i = 0; i < current_visitor; ++i) { + for (int i = 0; i < current_visitor; ++i) + { visitors[i]->apply(root); } VisitorStatus status = VisitorStatus::Error; - for (size_t i = current_visitor; i < visitors.size(); ++i) { + for (size_t i = current_visitor; i < visitors.size(); ++i) + { status = visitors[i]->visit(root); current_visitor = i; - if (status == VisitorStatus::Error || status == VisitorStatus::Ignore) { + if (status == VisitorStatus::Error || status == VisitorStatus::Ignore) + { break; } } updateVisitorsForNextRun(); - if (status != VisitorStatus::Ignore) { + if (status != VisitorStatus::Ignore) + { element = root; return status; } @@ -84,12 +84,15 @@ public: } private: - bool updateVisitorsForNextRun() { - while (current_visitor >= 0 && visitors[current_visitor]->isExhausted()) { + bool updateVisitorsForNextRun() + { + while (current_visitor >= 0 && visitors[current_visitor]->isExhausted()) + { visitors[current_visitor]->reinitialize(); current_visitor--; } - if (current_visitor >= 0) { + if (current_visitor >= 0) + { visitors[current_visitor]->updateState(); } return current_visitor >= 0; diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp index 10ae128616b..0acb800dc1b 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp @@ -17,8 +17,6 @@ namespace DB */ bool ParserJSONPathMemberAccess::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - auto member_access = std::make_shared(); - node = member_access; if (pos->type != TokenType::Dot) { return false; } @@ -27,12 +25,15 @@ bool ParserJSONPathMemberAccess::parseImpl(Pos & pos, ASTPtr & node, Expected & if (pos->type != TokenType::BareWord) { return false; } + ParserIdentifier name_p; ASTPtr member_name; if (!name_p.parse(pos, member_name, expected)) { return false; } + auto member_access = std::make_shared(); + node = member_access; if (!tryGetIdentifierNameInto(member_name, member_access->member_name)) { return false; } diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index 4027ec67ecb..25c5c76dd40 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -25,8 +25,6 @@ namespace ErrorCodes */ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - auto range = std::make_shared(); - node = range; if (pos->type != TokenType::OpeningSquareBracket) { @@ -34,6 +32,9 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte } ++pos; + auto range = std::make_shared(); + node = range; + while (pos->type != TokenType::ClosingSquareBracket) { if (pos->type != TokenType::Number && pos->type != TokenType::Asterisk) @@ -107,8 +108,8 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte } ++pos; - /// We cant have both ranges and star present, so parse was successful <=> exactly 1 of these conditions is true - return !range->ranges.empty() != range->is_star; + /// We can't have both ranges and star present, so parse was successful <=> exactly 1 of these conditions is true + return !range->ranges.empty() ^ range->is_star; } } diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index 3a6e7a26700..4de72ebc2fd 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -338,7 +338,8 @@ Token Lexer::nextTokenImpl() } default: - if (*pos == '$' && pos + 1 < end && !isWordCharASCII(pos[1])) { + if (*pos == '$' && pos + 1 < end && !isWordCharASCII(pos[1])) + { return Token(TokenType::DollarSign, token_begin, ++pos); } if (isWordCharASCII(*pos) || *pos == '$') From 582cc3daa91ecccf8c2337ac5e1c306897c839ea Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sat, 5 Jun 2021 15:27:37 +0300 Subject: [PATCH 165/931] Make 1st arg always be const, fix style --- src/Functions/FunctionSQLJSON.cpp | 4 ---- src/Functions/FunctionSQLJSON.h | 24 +++++++++---------- .../JSONPath/Generators/GeneratorJSONPath.h | 2 ++ .../Generators/VisitorJSONPathRange.h | 4 +++- .../Parsers/ParserJSONPathMemberAccess.cpp | 23 +++++++++++------- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 6 +---- 6 files changed, 32 insertions(+), 31 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.cpp b/src/Functions/FunctionSQLJSON.cpp index 7d558dd0950..a316d9de7ab 100644 --- a/src/Functions/FunctionSQLJSON.cpp +++ b/src/Functions/FunctionSQLJSON.cpp @@ -4,10 +4,6 @@ namespace DB { -namespace ErrorCodes -{ -extern const int ILLEGAL_TYPE_OF_ARGUMENT; -} void registerFunctionsSQLJSON(FunctionFactory & factory) { diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 67380a11235..b13b03cb089 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -30,7 +30,6 @@ namespace ErrorCodes { extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; extern const int BAD_ARGUMENTS; } @@ -52,17 +51,24 @@ public: throw Exception{"JSONPath functions require at least 2 arguments", ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION}; } - /// Check 1 argument: must be of type String (JSONPath) const auto & first_column = arguments[0]; + + /// Check 1 argument: must be of type String (JSONPath) if (!isString(first_column.type)) { throw Exception( "JSONPath functions require 1 argument to be JSONPath of type string, illegal type: " + first_column.type->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + /// Check 1 argument: must be const (JSONPath) + if (!isColumnConst(*first_column.column)) + { + throw Exception("1 argument (JSONPath) must be const", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + + const auto & second_column = arguments[1]; /// Check 2 argument: must be of type String (JSON) - const auto & second_column = arguments[1]; if (!isString(second_column.type)) { throw Exception( @@ -78,21 +84,15 @@ public: /// Example: /// SomeFunction(database.table.column) - /// Check 1 argument: must be const String (JSONPath) const ColumnPtr & arg_jsonpath = first_column.column; const auto * arg_jsonpath_const = typeid_cast(arg_jsonpath.get()); - if (!arg_jsonpath_const) - { - throw Exception{"JSONPath argument must be of type const String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - } - /// Retrieve data from 1 argument const auto * arg_jsonpath_string = typeid_cast(arg_jsonpath_const->getDataColumnPtr().get()); + if (!arg_jsonpath_string) { throw Exception{"Illegal column " + arg_jsonpath->getName(), ErrorCodes::ILLEGAL_COLUMN}; } - /// Check 2 argument: must be const or non-const String (JSON) const ColumnPtr & arg_json = second_column.column; const auto * col_json_const = typeid_cast(arg_json.get()); const auto * col_json_string @@ -166,6 +166,7 @@ public: bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } bool useDefaultImplementationForConstants() const override { return true; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; } DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { @@ -182,9 +183,8 @@ public: #if USE_SIMDJSON if (getContext()->getSettingsRef().allow_simdjson) return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); -#else - return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); #endif + return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); } }; diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 450a4f31dae..39f385fafc2 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -1,3 +1,5 @@ +#pragma once + #include #include #include diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h index d3313331593..601c5ea80b9 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h @@ -1,3 +1,5 @@ +#pragma once + #include #include #include @@ -112,4 +114,4 @@ private: UInt32 current_index; }; -} // namespace +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp index 0acb800dc1b..841e3fc4adc 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp @@ -1,13 +1,14 @@ -#include -#include +#pragma once + +#include +#include -#include -#include #include +#include +#include namespace DB { - /** * * @param pos token iterator @@ -17,24 +18,28 @@ namespace DB */ bool ParserJSONPathMemberAccess::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - if (pos->type != TokenType::Dot) { + if (pos->type != TokenType::Dot) + { return false; } ++pos; - if (pos->type != TokenType::BareWord) { + if (pos->type != TokenType::BareWord) + { return false; } ParserIdentifier name_p; ASTPtr member_name; - if (!name_p.parse(pos, member_name, expected)) { + if (!name_p.parse(pos, member_name, expected)) + { return false; } auto member_access = std::make_shared(); node = member_access; - if (!tryGetIdentifierNameInto(member_name, member_access->member_name)) { + if (!tryGetIdentifierNameInto(member_name, member_access->member_name)) + { return false; } return true; diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index 25c5c76dd40..f0d1af47b15 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -10,10 +10,6 @@ namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; extern const int BAD_ARGUMENTS; } /** @@ -45,7 +41,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { if (range->is_star) { - throw Exception{"Multiple asterisks in square array range are not allowed", ErrorCodes::BAD_ARGUMENTS}; + throw Exception("Multiple asterisks in square array range are not allowed", ErrorCodes::BAD_ARGUMENTS); } range->is_star = true; ++pos; From dfba5a479b5eaa1e0f972be7977b463ee1347549 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sun, 6 Jun 2021 14:50:10 +0300 Subject: [PATCH 166/931] Remove pragma once --- src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp index 841e3fc4adc..85b43217867 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp @@ -1,5 +1,3 @@ -#pragma once - #include #include From bad7d56aaa69feb74982159b343483e233a61629 Mon Sep 17 00:00:00 2001 From: Konstantin Rudenskii Date: Sun, 6 Jun 2021 16:00:46 +0300 Subject: [PATCH 167/931] =?UTF-8?q?Style=20again=20=F0=9F=98=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../JSONPath/Generators/VisitorJSONPathMemberAccess.h | 2 ++ src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h | 2 ++ src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp | 4 ++-- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h index 10ee2a0c5d6..fd83c478227 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h @@ -1,3 +1,5 @@ +#pragma once + #include #include #include diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h index 000f20c8551..b28bf37d5ef 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.h @@ -1,3 +1,5 @@ +#pragma once + #include namespace DB diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index f0d1af47b15..3da0d508b27 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -89,11 +89,11 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte if (range_indices.first >= range_indices.second) { - throw Exception{ + throw Exception( ErrorCodes::BAD_ARGUMENTS, "Start of range must be greater than end of range, however {} >= {}", range_indices.first, - range_indices.second}; + range_indices.second); } range->ranges.push_back(std::move(range_indices)); From b40bb00b8d1a50f126a012e120adefa605800068 Mon Sep 17 00:00:00 2001 From: feng lv Date: Mon, 7 Jun 2021 09:14:29 +0000 Subject: [PATCH 168/931] merge support repr database --- src/Storages/StorageMerge.cpp | 167 +++++++++++++++------- src/Storages/StorageMerge.h | 7 +- src/TableFunctions/TableFunctionMerge.cpp | 56 +++++--- 3 files changed, 152 insertions(+), 78 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 7e093fe162a..4c9bb192de6 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -134,15 +134,18 @@ StorageMerge::StorageMerge( template StoragePtr StorageMerge::getFirstTable(F && predicate) const { - auto iterator = getDatabaseIterator(getContext()); + auto database_table_iterators = getDatabaseIterators(getContext()); - while (iterator->isValid()) + for (auto & iterator : database_table_iterators) { - const auto & table = iterator->table(); - if (table.get() != this && predicate(table)) - return table; + while (iterator->isValid()) + { + const auto & table = iterator->table(); + if (table.get() != this && predicate(table)) + return table; - iterator->next(); + iterator->next(); + } } return {}; @@ -164,7 +167,7 @@ bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, Cont size_t i = 0; for (const auto & table : selected_tables) { - const auto & storage_ptr = std::get<0>(table); + const auto & storage_ptr = std::get<1>(table); auto metadata_snapshot = storage_ptr->getInMemoryMetadataPtr(); if (storage_ptr->mayBenefitFromIndexForIn(left_in_operand, query_context, metadata_snapshot)) return true; @@ -197,22 +200,25 @@ QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage( auto stage_in_source_tables = QueryProcessingStage::FetchColumns; - DatabaseTablesIteratorPtr iterator = getDatabaseIterator(local_context); + DatabaseTablesIterators database_table_iterators = getDatabaseIterators(local_context); size_t selected_table_size = 0; - while (iterator->isValid()) + for (const auto & iterator : database_table_iterators) { - const auto & table = iterator->table(); - if (table && table.get() != this) + while (iterator->isValid()) { - ++selected_table_size; - stage_in_source_tables = std::max( - stage_in_source_tables, - table->getQueryProcessingStage(local_context, to_stage, table->getInMemoryMetadataPtr(), query_info)); - } + const auto & table = iterator->table(); + if (table && table.get() != this) + { + ++selected_table_size; + stage_in_source_tables = std::max( + stage_in_source_tables, + table->getQueryProcessingStage(local_context, to_stage, table->getInMemoryMetadataPtr(), query_info)); + } - iterator->next(); + iterator->next(); + } } return selected_table_size == 1 ? stage_in_source_tables : std::min(stage_in_source_tables, QueryProcessingStage::WithMergeableState); @@ -230,13 +236,16 @@ Pipe StorageMerge::read( { Pipes pipes; + bool has_database_virtual_column = false; bool has_table_virtual_column = false; Names real_column_names; real_column_names.reserve(column_names.size()); for (const auto & column_name : column_names) { - if (column_name == "_table" && isVirtualColumn(column_name, metadata_snapshot)) + if (column_name == "_database" && isVirtualColumn(column_name, metadata_snapshot)) + has_database_virtual_column = true; + else if (column_name == "_table" && isVirtualColumn(column_name, metadata_snapshot)) has_table_virtual_column = true; else real_column_names.push_back(column_name); @@ -259,7 +268,17 @@ Pipe StorageMerge::read( if (selected_tables.empty()) /// FIXME: do we support sampling in this case? return createSources( - {}, query_info, processed_stage, max_block_size, header, {}, real_column_names, modified_context, 0, has_table_virtual_column); + {}, + query_info, + processed_stage, + max_block_size, + header, + {}, + real_column_names, + modified_context, + 0, + has_database_virtual_column, + has_table_virtual_column); size_t tables_count = selected_tables.size(); Float64 num_streams_multiplier @@ -272,7 +291,7 @@ Pipe StorageMerge::read( { for (auto it = selected_tables.begin(); it != selected_tables.end(); ++it) { - auto storage_ptr = std::get<0>(*it); + auto storage_ptr = std::get<1>(*it); auto storage_metadata_snapshot = storage_ptr->getInMemoryMetadataPtr(); auto current_info = query_info.order_optimizer->getInputOrder(storage_metadata_snapshot, local_context); if (it == selected_tables.begin()) @@ -294,7 +313,7 @@ Pipe StorageMerge::read( remaining_streams -= current_streams; current_streams = std::max(size_t(1), current_streams); - const auto & storage = std::get<0>(table); + const auto & storage = std::get<1>(table); /// If sampling requested, then check that table supports it. if (query_info.query->as()->sampleSize() && !storage->supportsSampling()) @@ -303,9 +322,17 @@ Pipe StorageMerge::read( auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); auto source_pipe = createSources( - storage_metadata_snapshot, query_info, processed_stage, - max_block_size, header, table, real_column_names, modified_context, - current_streams, has_table_virtual_column); + storage_metadata_snapshot, + query_info, + processed_stage, + max_block_size, + header, + table, + real_column_names, + modified_context, + current_streams, + has_database_virtual_column, + has_table_virtual_column); pipes.emplace_back(std::move(source_pipe)); } @@ -330,10 +357,11 @@ Pipe StorageMerge::createSources( Names & real_column_names, ContextMutablePtr modified_context, size_t streams_num, + bool has_database_virtual_column, bool has_table_virtual_column, bool concat_streams) { - const auto & [storage, struct_lock, table_name] = storage_with_lock; + const auto & [database_name, storage, struct_lock, table_name] = storage_with_lock; SelectQueryInfo modified_query_info = query_info; modified_query_info.query = query_info.query->clone(); @@ -343,6 +371,7 @@ Pipe StorageMerge::createSources( modified_query_info.syntax_analyzer_result = std::make_shared(std::move(new_analyzer_res)); VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", table_name); + VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_database", database_name); Pipe pipe; @@ -376,7 +405,7 @@ Pipe StorageMerge::createSources( } else if (processed_stage > storage_stage) { - modified_select.replaceDatabaseAndTable(source_database, table_name); + modified_select.replaceDatabaseAndTable(database_name, table_name); /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); @@ -401,6 +430,24 @@ Pipe StorageMerge::createSources( // Using concat instead. pipe.addTransform(std::make_shared(pipe.getHeader(), pipe.numOutputPorts())); + if (has_database_virtual_column) + { + ColumnWithTypeAndName column; + column.name = "_database"; + column.type = std::make_shared(); + column.column = column.type->createColumnConst(0, Field(database_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), + ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes)); + + pipe.addSimpleTransform([&](const Block & stream_header) + { + return std::make_shared(stream_header, adding_column_actions); + }); + } + if (has_table_virtual_column) { ColumnWithTypeAndName column; @@ -440,30 +487,33 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( const Settings & settings = query_context->getSettingsRef(); StorageListWithLocks selected_tables; - DatabaseTablesIteratorPtr iterator = getDatabaseIterator(getContext()); + DatabaseTablesIterators database_table_iterators = getDatabaseIterators(getContext()); MutableColumnPtr table_name_virtual_column; if (filter_by_virtual_column) table_name_virtual_column = ColumnString::create(); - while (iterator->isValid()) + for (const auto & iterator : database_table_iterators) { - StoragePtr storage = iterator->table(); - if (!storage) - continue; - - if (query && query->as()->prewhere() && !storage->supportsPrewhere()) - throw Exception("Storage " + storage->getName() + " doesn't support PREWHERE.", ErrorCodes::ILLEGAL_PREWHERE); - - if (storage.get() != this) + while (iterator->isValid()) { - auto table_lock = storage->lockForShare(query_context->getCurrentQueryId(), settings.lock_acquire_timeout); - selected_tables.emplace_back(storage, std::move(table_lock), iterator->name()); - if (filter_by_virtual_column) - table_name_virtual_column->insert(iterator->name()); - } + StoragePtr storage = iterator->table(); + if (!storage) + continue; - iterator->next(); + if (query && query->as()->prewhere() && !storage->supportsPrewhere()) + throw Exception("Storage " + storage->getName() + " doesn't support PREWHERE.", ErrorCodes::ILLEGAL_PREWHERE); + + if (storage.get() != this) + { + auto table_lock = storage->lockForShare(query_context->getCurrentQueryId(), settings.lock_acquire_timeout); + selected_tables.emplace_back(iterator->databaseName(), storage, std::move(table_lock), iterator->name()); + if (filter_by_virtual_column) + table_name_virtual_column->insert(iterator->name()); + } + + iterator->next(); + } } if (filter_by_virtual_column) @@ -474,13 +524,13 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( auto values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_table"); /// Remove unused tables from the list - selected_tables.remove_if([&] (const auto & elem) { return values.find(std::get<2>(elem)) == values.end(); }); + selected_tables.remove_if([&](const auto & elem) { return values.find(std::get<3>(elem)) == values.end(); }); } return selected_tables; } -DatabaseTablesIteratorPtr StorageMerge::getDatabaseIterator(ContextPtr local_context) const +StorageMerge::DatabaseTablesIterators StorageMerge::getDatabaseIterators(ContextPtr local_context) const { try { @@ -492,17 +542,28 @@ DatabaseTablesIteratorPtr StorageMerge::getDatabaseIterator(ContextPtr local_con throw; } - auto database = DatabaseCatalog::instance().getDatabase(source_database); + DatabaseTablesIterators database_table_iterators; - auto table_name_match = [this](const String & table_name_) -> bool + auto databases = DatabaseCatalog::instance().getDatabases(); + + for (const auto & db : databases) { - if (source_tables) - return source_tables->count(table_name_); - else - return source_table_regexp->match(table_name_); - }; + if (source_database_regexp->match(db.first)) + { + auto table_name_match = [this, &db](const String & table_name_) -> bool { + if (source_databases_and_tables) + { + const auto & source_tables = (*source_databases_and_tables).at(db.first); + return source_tables.count(table_name_); + } + else + return source_table_regexp->match(table_name_); + }; + database_table_iterators.emplace_back(db.second->getTablesIterator(local_context, table_name_match)); + } + } - return database->getTablesIterator(local_context, table_name_match); + return database_table_iterators; } @@ -630,7 +691,7 @@ void registerStorageMerge(StorageFactory & factory) NamesAndTypesList StorageMerge::getVirtuals() const { - NamesAndTypesList virtuals{{"_table", std::make_shared()}}; + NamesAndTypesList virtuals{{"_database", std::make_shared()}, {"_table", std::make_shared()}}; auto first_table = getFirstTable([](auto && table) { return table; }); if (first_table) diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index cbefa550204..676a1c61681 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -53,8 +53,10 @@ private: std::optional source_table_regexp; std::optional>> source_databases_and_tables; - using StorageWithLockAndName = std::tuple; + /// (Database, Table, Lock, TableName) + using StorageWithLockAndName = std::tuple; using StorageListWithLocks = std::list; + using DatabaseTablesIterators = std::vector; StorageMerge::StorageListWithLocks getSelectedTables( ContextPtr query_context, const ASTPtr & query = nullptr, bool filter_by_virtual_column = false) const; @@ -62,7 +64,7 @@ private: template StoragePtr getFirstTable(F && predicate) const; - DatabaseTablesIteratorPtr getDatabaseIterator(ContextPtr context) const; + DatabaseTablesIterators getDatabaseIterators(ContextPtr context) const; NamesAndTypesList getVirtuals() const override; ColumnSizeByName getColumnSizes() const override; @@ -93,6 +95,7 @@ protected: Names & real_column_names, ContextMutablePtr modified_context, size_t streams_num, + bool has_database_virtual_column, bool has_table_virtual_column, bool concat_streams = false); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index e1629035180..83dd2dcf020 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -23,11 +23,11 @@ namespace ErrorCodes namespace { - [[noreturn]] void throwNoTablesMatchRegexp(const String & source_database, const String & source_table_regexp) + [[noreturn]] void throwNoTablesMatchRegexp(const String & source_database_regexp, const String & source_table_regexp) { throw Exception( - "Error while executing table function merge. In database " + source_database - + " no one matches regular expression: " + source_table_regexp, + "Error while executing table function merge. Neither no one database matches regular expression " + source_database_regexp + + " nor in database matches " + source_database_regexp + " no one table matches regular expression: " + source_table_regexp, ErrorCodes::UNKNOWN_TABLE); } } @@ -62,47 +62,57 @@ const std::unordered_map> & TableFunctionMerg if (source_databases_and_tables) return *source_databases_and_tables; - // auto database = DatabaseCatalog::instance().getDatabase(source_database); OptimizedRegularExpression database_re(source_database_regexp); OptimizedRegularExpression table_re(source_table_regexp); - auto database_name_match = [&](const String & database_name_) { return database_re.match(database_name_); }; + auto table_name_match = [&](const String & table_name_) { return table_re.match(table_name_); }; auto access = context->getAccess(); - bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, source_database); - bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, source_database); - source_tables.emplace(); - for (auto it = database->getTablesIterator(context, table_name_match); it->isValid(); it->next()) + auto databases = DatabaseCatalog::instance().getDatabases(); + + for (const auto & db : databases) { - if (!it->table()) - continue; - bool granted_show = granted_show_on_all_tables || access->isGranted(AccessType::SHOW_TABLES, source_database, it->name()); - if (!granted_show) - continue; - if (!granted_select_on_all_tables) - access->checkAccess(AccessType::SELECT, source_database, it->name()); - source_tables->emplace_back(it->name()); + if (database_re.match(db.first)) + { + bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, db.first); + bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, db.first); + std::unordered_set source_tables; + for (auto it = db.second->getTablesIterator(context, table_name_match); it->isValid(); it->next()) + { + if (!it->table()) + continue; + bool granted_show = granted_show_on_all_tables || access->isGranted(AccessType::SHOW_TABLES, db.first, it->name()); + if (!granted_show) + continue; + if (!granted_select_on_all_tables) + access->checkAccess(AccessType::SELECT, db.first, it->name()); + source_tables.insert(it->name()); + } + + if (!source_tables.empty()) + (*source_databases_and_tables)[db.first] = source_tables; + } } - if (source_tables->empty()) - throwNoTablesMatchRegexp(source_database, source_table_regexp); + if ((*source_databases_and_tables).empty()) + throwNoTablesMatchRegexp(source_database_regexp, source_table_regexp); - return *source_tables; + return *source_databases_and_tables; } ColumnsDescription TableFunctionMerge::getActualTableStructure(ContextPtr context) const { - for (const auto & table_name : getSourceTables(context)) + for (const auto & db_with_tables : getSourceDatabasesAndTables(context)) { - auto storage = DatabaseCatalog::instance().tryGetTable(StorageID{source_database, table_name}, context); + auto storage = DatabaseCatalog::instance().tryGetTable(StorageID{db_with_tables.first, *db_with_tables.second.begin()}, context); if (storage) return ColumnsDescription{storage->getInMemoryMetadataPtr()->getColumns().getAllPhysical()}; } - throwNoTablesMatchRegexp(source_database, source_table_regexp); + throwNoTablesMatchRegexp(source_database_regexp, source_table_regexp); } From 41b518012f36e34ec73e8bffc704b4dabefc409b Mon Sep 17 00:00:00 2001 From: feng lv Date: Tue, 8 Jun 2021 03:11:27 +0000 Subject: [PATCH 169/931] fix and add test --- src/Storages/StorageMerge.cpp | 10 +- src/Storages/StorageMerge.h | 1 + src/TableFunctions/TableFunctionMerge.cpp | 13 +- ...902_table_function_merge_db_repr.reference | 146 ++++++++++++++++++ .../01902_table_function_merge_db_repr.sql | 46 ++++++ 5 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/01902_table_function_merge_db_repr.reference create mode 100644 tests/queries/0_stateless/01902_table_function_merge_db_repr.sql diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 4c9bb192de6..ff59d7e50a6 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -103,9 +103,13 @@ StorageMerge::StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, + const String & source_database_regexp_, const std::unordered_map> & source_databases_and_tables_, ContextPtr context_) - : IStorage(table_id_), WithContext(context_->getGlobalContext()), source_databases_and_tables(source_databases_and_tables_) + : IStorage(table_id_) + , WithContext(context_->getGlobalContext()) + , source_database_regexp(source_database_regexp_) + , source_databases_and_tables(source_databases_and_tables_) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -684,6 +688,10 @@ void registerStorageMerge(StorageFactory & factory) String source_database_regexp = engine_args[0]->as().value.safeGet(); String table_name_regexp = engine_args[1]->as().value.safeGet(); + /// If database argument is not String literal, we should not treat it as regexp + if (!engine_args[0]->as()) + source_database_regexp = "^" + source_database_regexp + "$"; + return StorageMerge::create( args.table_id, args.columns, args.comment, source_database_regexp, table_name_regexp, args.getContext()); }); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 676a1c61681..74ccb7b397b 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -74,6 +74,7 @@ protected: const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, + const String & source_database_regexp_, const std::unordered_map> & source_databases_and_tables_, ContextPtr context_); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 83dd2dcf020..5f487f3e8ba 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -49,11 +49,15 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - args[0] = evaluateConstantExpressionForDatabaseName(args[0], context); - args[1] = evaluateConstantExpressionAsLiteral(args[1], context); + auto db_arg = evaluateConstantExpressionForDatabaseName(args[0], context); + auto table_arg = evaluateConstantExpressionAsLiteral(args[1], context); - source_database_regexp = args[0]->as().value.safeGet(); - source_table_regexp = args[1]->as().value.safeGet(); + source_database_regexp = db_arg->as().value.safeGet(); + source_table_regexp = table_arg->as().value.safeGet(); + + /// If database argument is not String literal, we should not treat it as regexp + if (!args[0]->as()) + source_database_regexp = "^" + source_database_regexp + "$"; } @@ -122,6 +126,7 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & /*ast_function*/, Cont StorageID(getDatabaseName(), table_name), getActualTableStructure(context), String{}, + source_database_regexp, getSourceDatabasesAndTables(context), context); diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference new file mode 100644 index 00000000000..54d168eba7c --- /dev/null +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference @@ -0,0 +1,146 @@ +CREATE TABLE t_merge as db.t ENGINE=Merge(^db, ^t) +SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n +db t 0 +db t 1 +db t 2 +db t 3 +db t 4 +db t 5 +db t 6 +db t 7 +db t 8 +db t 9 +db1 t1 0 +db1 t1 1 +db1 t1 2 +db1 t1 3 +db1 t1 4 +db1 t1 5 +db1 t1 6 +db1 t1 7 +db1 t1 8 +db1 t1 9 +db2 t2 0 +db2 t2 1 +db2 t2 2 +db2 t2 3 +db2 t2 4 +db2 t2 5 +db2 t2 6 +db2 t2 7 +db2 t2 8 +db2 t2 9 +db3 t3 0 +db3 t3 1 +db3 t3 2 +db3 t3 3 +db3 t3 4 +db3 t3 5 +db3 t3 6 +db3 t3 7 +db3 t3 8 +db3 t3 9 +SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n +db t 0 +db t 1 +db t 2 +db t 3 +db t 4 +db t 5 +db t 6 +db t 7 +db t 8 +db t 9 +db t_merge 0 +db t_merge 0 +db t_merge 0 +db t_merge 0 +db t_merge 1 +db t_merge 1 +db t_merge 1 +db t_merge 1 +db t_merge 2 +db t_merge 2 +db t_merge 2 +db t_merge 2 +db t_merge 3 +db t_merge 3 +db t_merge 3 +db t_merge 3 +db t_merge 4 +db t_merge 4 +db t_merge 4 +db t_merge 4 +db t_merge 5 +db t_merge 5 +db t_merge 5 +db t_merge 5 +db t_merge 6 +db t_merge 6 +db t_merge 6 +db t_merge 6 +db t_merge 7 +db t_merge 7 +db t_merge 7 +db t_merge 7 +db t_merge 8 +db t_merge 8 +db t_merge 8 +db t_merge 8 +db t_merge 9 +db t_merge 9 +db t_merge 9 +db t_merge 9 +db1 t1 0 +db1 t1 1 +db1 t1 2 +db1 t1 3 +db1 t1 4 +db1 t1 5 +db1 t1 6 +db1 t1 7 +db1 t1 8 +db1 t1 9 +db2 t2 0 +db2 t2 1 +db2 t2 2 +db2 t2 3 +db2 t2 4 +db2 t2 5 +db2 t2 6 +db2 t2 7 +db2 t2 8 +db2 t2 9 +db3 t3 0 +db3 t3 1 +db3 t3 2 +db3 t3 3 +db3 t3 4 +db3 t3 5 +db3 t3 6 +db3 t3 7 +db3 t3 8 +db3 t3 9 +CREATE TABLE t_merge_1 as db.t ENGINE=Merge(currentDatabase(), ^t) +SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n +db1 t1 0 +db1 t1 1 +db1 t1 2 +db1 t1 3 +db1 t1 4 +db1 t1 5 +db1 t1 6 +db1 t1 7 +db1 t1 8 +db1 t1 9 +SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n +db1 t1 0 +db1 t1 1 +db1 t1 2 +db1 t1 3 +db1 t1 4 +db1 t1 5 +db1 t1 6 +db1 t1 7 +db1 t1 8 +db1 t1 9 diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql new file mode 100644 index 00000000000..dc473fba250 --- /dev/null +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -0,0 +1,46 @@ +DROP DATABASE IF EXISTS db; +DROP DATABASE IF EXISTS db1; +DROP DATABASE IF EXISTS db2; +DROP DATABASE IF EXISTS db3; + +CREATE DATABASE db; +CREATE DATABASE db1; +CREATE DATABASE db2; +CREATE DATABASE db3; + +CREATE TABLE db.t (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE db1.t1 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE db2.t2 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE db3.t3 (n Int8) ENGINE=MergeTree ORDER BY n; + +INSERT INTO db.t SELECT * FROM numbers(10); +INSERT INTO db1.t1 SELECT * FROM numbers(10); +INSERT INTO db2.t2 SELECT * FROM numbers(10); +INSERT INTO db3.t3 SELECT * FROM numbers(10); + +SELECT 'CREATE TABLE t_merge as db.t ENGINE=Merge(^db, ^t)'; +CREATE TABLE db.t_merge as db.t ENGINE=Merge('^db', '^t'); + +SELECT 'SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge('^db', '^t') ORDER BY _database, _table, n; + +USE db1; + +-- evaluated value of expression should not be treat as repr +SELECT 'CREATE TABLE t_merge_1 as db.t ENGINE=Merge(currentDatabase(), ^t)'; +CREATE TABLE db.t_merge_1 as db.t ENGINE=Merge(currentDatabase(), '^t'); + +SELECT 'SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n; + +-- evaluated value of expression should not be treat as repr +SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; + +DROP DATABASE db; +DROP DATABASE db1; +DROP DATABASE db2; +DROP DATABASE db3; From ba37ebb1335a36bd5b708870de9c02e10e29e500 Mon Sep 17 00:00:00 2001 From: "d.v.semenov" Date: Mon, 7 Jun 2021 23:54:44 +0300 Subject: [PATCH 170/931] Added functional tests --- .../01889_sql_json_functions.reference | 33 +++++++++++++++++ .../0_stateless/01889_sql_json_functions.sql | 35 +++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 tests/queries/0_stateless/01889_sql_json_functions.reference create mode 100644 tests/queries/0_stateless/01889_sql_json_functions.sql diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference new file mode 100644 index 00000000000..b8dc5d8e416 --- /dev/null +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -0,0 +1,33 @@ +--JSON_VALUE-- +1 +1.2 +true +"world" +null + + + + +--JSON_QUERY-- +[1] +[1.2] +[true] +["world"] +[null] +[["world","world2"]] +[{"world":"!"}] + + +--JSON_EXISTS-- +1 +1 +0 +1 +0 +0 +1 +1 +0 +1 +0 +1 \ No newline at end of file diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql new file mode 100644 index 00000000000..6d67b73f305 --- /dev/null +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -0,0 +1,35 @@ +SELECT '--JSON_VALUE--'; +SELECT JSON_VALUE('$.hello', '{"hello":1}'); +SELECT JSON_VALUE('$.hello', '{"hello":1.2}'); +SELECT JSON_VALUE('$.hello', '{"hello":true}'); +SELECT JSON_VALUE('$.hello', '{"hello":"world"}'); +SELECT JSON_VALUE('$.hello', '{"hello":null}'); +SELECT JSON_VALUE('$.hello', '{"hello":["world","world2"]}'); +SELECT JSON_VALUE('$.hello', '{"hello":{"world":"!"}}'); +SELECT JSON_VALUE('$.hello', '{hello:world}'); -- invalid json => default value (empty string) +SELECT JSON_VALUE('$.hello', ''); + +SELECT '--JSON_QUERY--'; +SELECT JSON_QUERY('$.hello', '{"hello":1}'); +SELECT JSON_QUERY('$.hello', '{"hello":1.2}'); +SELECT JSON_QUERY('$.hello', '{"hello":true}'); +SELECT JSON_QUERY('$.hello', '{"hello":"world"}'); +SELECT JSON_QUERY('$.hello', '{"hello":null}'); +SELECT JSON_QUERY('$.hello', '{"hello":["world","world2"]}'); +SELECT JSON_QUERY('$.hello', '{"hello":{"world":"!"}}'); +SELECT JSON_QUERY('$.hello', '{hello:{"world":"!"}}}'); -- invalid json => default value (empty string) +SELECT JSON_QUERY('$.hello', ''); + +SELECT '--JSON_EXISTS--'; +SELECT JSON_EXISTS('$.hello', '{"hello":1}'); +SELECT JSON_EXISTS('$.world', '{"hello":1,"world":2}'); +SELECT JSON_EXISTS('$.world', '{"hello":{"world":1}}'); +SELECT JSON_EXISTS('$.hello.world', '{"hello":{"world":1}}'); +SELECT JSON_EXISTS('$.hello', '{hello:world}'); -- invalid json => default value (zero integer) +SELECT JSON_EXISTS('$.hello', ''); +SELECT JSON_EXISTS('$.hello[*]', '{"hello":["world"]}'); +SELECT JSON_EXISTS('$.hello[0]', '{"hello":["world"]}'); +SELECT JSON_EXISTS('$.hello[1]', '{"hello":["world"]}'); +SELECT JSON_EXISTS('$.a[*].b', '{"a":[{"b":1},{"c":2}]}'); +SELECT JSON_EXISTS('$.a[*].f', '{"a":[{"b":1},{"c":2}]}'); +SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}'); \ No newline at end of file From 00ce6f69dfc431442e106da62aae7c581bed324d Mon Sep 17 00:00:00 2001 From: "d.v.semenov" Date: Wed, 9 Jun 2021 12:46:17 +0300 Subject: [PATCH 171/931] Added newline --- tests/queries/0_stateless/01889_sql_json_functions.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index b8dc5d8e416..7457aca18ed 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -30,4 +30,4 @@ null 0 1 0 -1 \ No newline at end of file +1 From dde9ce522338450df9e969eac40c177f37e6c95a Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 11 Jun 2021 15:22:35 +0300 Subject: [PATCH 172/931] Use hits_10m_single only for uniqTheta --- tests/performance/uniq.xml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index f6f7ac01c65..e8f3aed62fe 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -1,5 +1,6 @@ + hits_100m_single hits_10m_single 30000000000 @@ -53,10 +54,10 @@ uniqUpTo(10) uniqUpTo(25) uniqUpTo(100) - uniqTheta - SELECT {key} AS k, {func}(UserID) FROM hits_10m_single GROUP BY k FORMAT Null + SELECT {key} AS k, {func}(UserID) FROM hits_100m_single GROUP BY k FORMAT Null + SELECT {key} AS k, uniqTheta(UserID) FROM hits_10m_single GROUP BY k FORMAT Null From 0952c94abd8aea4cdd631e0f2118cc96fb8f5a90 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Sun, 13 Jun 2021 01:47:01 +0800 Subject: [PATCH 173/931] Update index.md --- .../engines/table-engines/integrations/index.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/docs/zh/engines/table-engines/integrations/index.md b/docs/zh/engines/table-engines/integrations/index.md index 17e9d204aa6..0c34ae078a0 100644 --- a/docs/zh/engines/table-engines/integrations/index.md +++ b/docs/zh/engines/table-engines/integrations/index.md @@ -1,8 +1,21 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_folder_title: "\u96C6\u6210" toc_priority: 30 --- +# 集成的表引擎 {#table-engines-for-integrations} +ClickHouse 提供了多种方式来与外部系统集成,包括表引擎。像所有其他的表引擎一样,使用`CREATE TABLE`或`ALTER TABLE`查询语句来完成配置。然后从用户的角度来看,配置的集成看起来像查询一个正常的表,但对它的查询是代理给外部系统的。这种透明的查询是这种方法相对于其他集成方法的主要优势之一,比如外部字典或表函数,它们需要在每次使用时使用自定义查询方法。 + +以下是支持的集成方式: + +- [ODBC](../../../engines/table-engines/integrations/odbc.md) +- [JDBC](../../../engines/table-engines/integrations/jdbc.md) +- [MySQL](../../../engines/table-engines/integrations/mysql.md) +- [MongoDB](../../../engines/table-engines/integrations/mongodb.md) +- [HDFS](../../../engines/table-engines/integrations/hdfs.md) +- [S3](../../../engines/table-engines/integrations/s3.md) +- [Kafka](../../../engines/table-engines/integrations/kafka.md) +- [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md) +- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) +- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) From 1ffd3809037c7c6cb584857a70fc84aedf3814cf Mon Sep 17 00:00:00 2001 From: feng lv Date: Sun, 13 Jun 2021 08:03:19 +0000 Subject: [PATCH 174/931] use full match for storageMerge --- src/Common/OptimizedRegularExpression.cpp | 18 ++ src/Common/OptimizedRegularExpression.h | 2 + src/Storages/StorageMerge.cpp | 12 +- src/TableFunctions/TableFunctionMerge.cpp | 17 +- ...902_table_function_merge_db_repr.reference | 290 +++++++++--------- .../01902_table_function_merge_db_repr.sql | 66 ++-- 6 files changed, 207 insertions(+), 198 deletions(-) diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 1464923e6ab..ba1b82ee2fe 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -2,6 +2,8 @@ #include #include +#include + #define MIN_LENGTH_FOR_STRSTR 3 #define MAX_SUBPATTERNS 1024 @@ -342,6 +344,22 @@ OptimizedRegularExpressionImpl::OptimizedRegularExpressionImpl(cons } } +template +bool OptimizedRegularExpressionImpl::fullMatch(const std::string & subject) const +{ + if (is_trivial) + { + if (required_substring.empty()) + return subject.empty(); + + if (is_case_insensitive) + return Poco::toLower(subject) == Poco::toLower(required_substring); + else + return subject == required_substring; + } + + return RegexType::FullMatch(StringPieceType(subject.data(), subject.size()), *re2); +} template bool OptimizedRegularExpressionImpl::match(const char * subject, size_t subject_size) const diff --git a/src/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h index fddefe596c4..1dc136a0148 100644 --- a/src/Common/OptimizedRegularExpression.h +++ b/src/Common/OptimizedRegularExpression.h @@ -64,6 +64,8 @@ public: OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); + bool fullMatch(const std::string & subject) const; + bool match(const std::string & subject) const { return match(subject.data(), subject.size()); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index ff59d7e50a6..6c439ba0dc6 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -144,8 +144,8 @@ StoragePtr StorageMerge::getFirstTable(F && predicate) const { while (iterator->isValid()) { - const auto & table = iterator->table(); - if (table.get() != this && predicate(table)) + const auto & table = iterator->table(); + if (table.get() != this && predicate(table)) return table; iterator->next(); @@ -552,7 +552,7 @@ StorageMerge::DatabaseTablesIterators StorageMerge::getDatabaseIterators(Context for (const auto & db : databases) { - if (source_database_regexp->match(db.first)) + if (source_database_regexp->fullMatch(db.first)) { auto table_name_match = [this, &db](const String & table_name_) -> bool { if (source_databases_and_tables) @@ -561,7 +561,7 @@ StorageMerge::DatabaseTablesIterators StorageMerge::getDatabaseIterators(Context return source_tables.count(table_name_); } else - return source_table_regexp->match(table_name_); + return source_table_regexp->fullMatch(table_name_); }; database_table_iterators.emplace_back(db.second->getTablesIterator(local_context, table_name_match)); } @@ -688,10 +688,6 @@ void registerStorageMerge(StorageFactory & factory) String source_database_regexp = engine_args[0]->as().value.safeGet(); String table_name_regexp = engine_args[1]->as().value.safeGet(); - /// If database argument is not String literal, we should not treat it as regexp - if (!engine_args[0]->as()) - source_database_regexp = "^" + source_database_regexp + "$"; - return StorageMerge::create( args.table_id, args.columns, args.comment, source_database_regexp, table_name_regexp, args.getContext()); }); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 5f487f3e8ba..330f4f2c25f 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -49,15 +49,11 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - auto db_arg = evaluateConstantExpressionForDatabaseName(args[0], context); - auto table_arg = evaluateConstantExpressionAsLiteral(args[1], context); + args[0] = evaluateConstantExpressionForDatabaseName(args[0], context); + args[1] = evaluateConstantExpressionAsLiteral(args[1], context); - source_database_regexp = db_arg->as().value.safeGet(); - source_table_regexp = table_arg->as().value.safeGet(); - - /// If database argument is not String literal, we should not treat it as regexp - if (!args[0]->as()) - source_database_regexp = "^" + source_database_regexp + "$"; + source_database_regexp = args[0]->as().value.safeGet(); + source_table_regexp = args[1]->as().value.safeGet(); } @@ -66,11 +62,10 @@ const std::unordered_map> & TableFunctionMerg if (source_databases_and_tables) return *source_databases_and_tables; - OptimizedRegularExpression database_re(source_database_regexp); OptimizedRegularExpression table_re(source_table_regexp); - auto table_name_match = [&](const String & table_name_) { return table_re.match(table_name_); }; + auto table_name_match = [&](const String & table_name_) { return table_re.fullMatch(table_name_); }; auto access = context->getAccess(); @@ -78,7 +73,7 @@ const std::unordered_map> & TableFunctionMerg for (const auto & db : databases) { - if (database_re.match(db.first)) + if (database_re.fullMatch(db.first)) { bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, db.first); bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, db.first); diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference index 54d168eba7c..20436d8a267 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference @@ -1,146 +1,146 @@ -CREATE TABLE t_merge as db.t ENGINE=Merge(^db, ^t) -SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n -db t 0 -db t 1 -db t 2 -db t 3 -db t 4 -db t 5 -db t 6 -db t 7 -db t 8 -db t 9 -db1 t1 0 -db1 t1 1 -db1 t1 2 -db1 t1 3 -db1 t1 4 -db1 t1 5 -db1 t1 6 -db1 t1 7 -db1 t1 8 -db1 t1 9 -db2 t2 0 -db2 t2 1 -db2 t2 2 -db2 t2 3 -db2 t2 4 -db2 t2 5 -db2 t2 6 -db2 t2 7 -db2 t2 8 -db2 t2 9 -db3 t3 0 -db3 t3 1 -db3 t3 2 -db3 t3 3 -db3 t3 4 -db3 t3 5 -db3 t3 6 -db3 t3 7 -db3 t3 8 -db3 t3 9 +CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(^01902_db.*, ^t.*) +SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n +01902_db t 0 +01902_db t 1 +01902_db t 2 +01902_db t 3 +01902_db t 4 +01902_db t 5 +01902_db t 6 +01902_db t 7 +01902_db t 8 +01902_db t 9 +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 +01902_db2 t2 0 +01902_db2 t2 1 +01902_db2 t2 2 +01902_db2 t2 3 +01902_db2 t2 4 +01902_db2 t2 5 +01902_db2 t2 6 +01902_db2 t2 7 +01902_db2 t2 8 +01902_db2 t2 9 +01902_db3 t3 0 +01902_db3 t3 1 +01902_db3 t3 2 +01902_db3 t3 3 +01902_db3 t3 4 +01902_db3 t3 5 +01902_db3 t3 6 +01902_db3 t3 7 +01902_db3 t3 8 +01902_db3 t3 9 SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n -db t 0 -db t 1 -db t 2 -db t 3 -db t 4 -db t 5 -db t 6 -db t 7 -db t 8 -db t 9 -db t_merge 0 -db t_merge 0 -db t_merge 0 -db t_merge 0 -db t_merge 1 -db t_merge 1 -db t_merge 1 -db t_merge 1 -db t_merge 2 -db t_merge 2 -db t_merge 2 -db t_merge 2 -db t_merge 3 -db t_merge 3 -db t_merge 3 -db t_merge 3 -db t_merge 4 -db t_merge 4 -db t_merge 4 -db t_merge 4 -db t_merge 5 -db t_merge 5 -db t_merge 5 -db t_merge 5 -db t_merge 6 -db t_merge 6 -db t_merge 6 -db t_merge 6 -db t_merge 7 -db t_merge 7 -db t_merge 7 -db t_merge 7 -db t_merge 8 -db t_merge 8 -db t_merge 8 -db t_merge 8 -db t_merge 9 -db t_merge 9 -db t_merge 9 -db t_merge 9 -db1 t1 0 -db1 t1 1 -db1 t1 2 -db1 t1 3 -db1 t1 4 -db1 t1 5 -db1 t1 6 -db1 t1 7 -db1 t1 8 -db1 t1 9 -db2 t2 0 -db2 t2 1 -db2 t2 2 -db2 t2 3 -db2 t2 4 -db2 t2 5 -db2 t2 6 -db2 t2 7 -db2 t2 8 -db2 t2 9 -db3 t3 0 -db3 t3 1 -db3 t3 2 -db3 t3 3 -db3 t3 4 -db3 t3 5 -db3 t3 6 -db3 t3 7 -db3 t3 8 -db3 t3 9 -CREATE TABLE t_merge_1 as db.t ENGINE=Merge(currentDatabase(), ^t) -SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n -db1 t1 0 -db1 t1 1 -db1 t1 2 -db1 t1 3 -db1 t1 4 -db1 t1 5 -db1 t1 6 -db1 t1 7 -db1 t1 8 -db1 t1 9 -SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n -db1 t1 0 -db1 t1 1 -db1 t1 2 -db1 t1 3 -db1 t1 4 -db1 t1 5 -db1 t1 6 -db1 t1 7 -db1 t1 8 -db1 t1 9 +01902_db t 0 +01902_db t 1 +01902_db t 2 +01902_db t 3 +01902_db t 4 +01902_db t 5 +01902_db t 6 +01902_db t 7 +01902_db t 8 +01902_db t 9 +01902_db t_merge 0 +01902_db t_merge 0 +01902_db t_merge 0 +01902_db t_merge 0 +01902_db t_merge 1 +01902_db t_merge 1 +01902_db t_merge 1 +01902_db t_merge 1 +01902_db t_merge 2 +01902_db t_merge 2 +01902_db t_merge 2 +01902_db t_merge 2 +01902_db t_merge 3 +01902_db t_merge 3 +01902_db t_merge 3 +01902_db t_merge 3 +01902_db t_merge 4 +01902_db t_merge 4 +01902_db t_merge 4 +01902_db t_merge 4 +01902_db t_merge 5 +01902_db t_merge 5 +01902_db t_merge 5 +01902_db t_merge 5 +01902_db t_merge 6 +01902_db t_merge 6 +01902_db t_merge 6 +01902_db t_merge 6 +01902_db t_merge 7 +01902_db t_merge 7 +01902_db t_merge 7 +01902_db t_merge 7 +01902_db t_merge 8 +01902_db t_merge 8 +01902_db t_merge 8 +01902_db t_merge 8 +01902_db t_merge 9 +01902_db t_merge 9 +01902_db t_merge 9 +01902_db t_merge 9 +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 +01902_db2 t2 0 +01902_db2 t2 1 +01902_db2 t2 2 +01902_db2 t2 3 +01902_db2 t2 4 +01902_db2 t2 5 +01902_db2 t2 6 +01902_db2 t2 7 +01902_db2 t2 8 +01902_db2 t2 9 +01902_db3 t3 0 +01902_db3 t3 1 +01902_db3 t3 2 +01902_db3 t3 3 +01902_db3 t3 4 +01902_db3 t3 5 +01902_db3 t3 6 +01902_db3 t3 7 +01902_db3 t3 8 +01902_db3 t3 9 +CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*) +SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 +SELECT _database, _table, n FROM merge(currentDatabase(), ^t.*) ORDER BY _database, _table, n +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index dc473fba250..bb8e744246f 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -1,46 +1,44 @@ -DROP DATABASE IF EXISTS db; -DROP DATABASE IF EXISTS db1; -DROP DATABASE IF EXISTS db2; -DROP DATABASE IF EXISTS db3; +DROP DATABASE IF EXISTS 01902_db; +DROP DATABASE IF EXISTS 01902_db1; +DROP DATABASE IF EXISTS 01902_db2; +DROP DATABASE IF EXISTS 01902_db3; -CREATE DATABASE db; -CREATE DATABASE db1; -CREATE DATABASE db2; -CREATE DATABASE db3; +CREATE DATABASE 01902_db; +CREATE DATABASE 01902_db1; +CREATE DATABASE 01902_db2; +CREATE DATABASE 01902_db3; -CREATE TABLE db.t (n Int8) ENGINE=MergeTree ORDER BY n; -CREATE TABLE db1.t1 (n Int8) ENGINE=MergeTree ORDER BY n; -CREATE TABLE db2.t2 (n Int8) ENGINE=MergeTree ORDER BY n; -CREATE TABLE db3.t3 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db.t (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db1.t1 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db2.t2 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db3.t3 (n Int8) ENGINE=MergeTree ORDER BY n; -INSERT INTO db.t SELECT * FROM numbers(10); -INSERT INTO db1.t1 SELECT * FROM numbers(10); -INSERT INTO db2.t2 SELECT * FROM numbers(10); -INSERT INTO db3.t3 SELECT * FROM numbers(10); +INSERT INTO 01902_db.t SELECT * FROM numbers(10); +INSERT INTO 01902_db1.t1 SELECT * FROM numbers(10); +INSERT INTO 01902_db2.t2 SELECT * FROM numbers(10); +INSERT INTO 01902_db3.t3 SELECT * FROM numbers(10); -SELECT 'CREATE TABLE t_merge as db.t ENGINE=Merge(^db, ^t)'; -CREATE TABLE db.t_merge as db.t ENGINE=Merge('^db', '^t'); +SELECT 'CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(^01902_db.*, ^t.*)'; +CREATE TABLE 01902_db.t_merge as 01902_db.t ENGINE=Merge('^01902_db.*', '^t.*'); -SELECT 'SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM db.t_merge ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n; SELECT 'SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM merge('^db', '^t') ORDER BY _database, _table, n; +SELECT _database, _table, n FROM merge('^01902_db.*', '^t.*') ORDER BY _database, _table, n; -USE db1; +USE 01902_db1; --- evaluated value of expression should not be treat as repr -SELECT 'CREATE TABLE t_merge_1 as db.t ENGINE=Merge(currentDatabase(), ^t)'; -CREATE TABLE db.t_merge_1 as db.t ENGINE=Merge(currentDatabase(), '^t'); +SELECT 'CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*)'; +CREATE TABLE 01902_db.t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), '^t.*'); -SELECT 'SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM db.t_merge_1 ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n; --- evaluated value of expression should not be treat as repr -SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t.*) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(currentDatabase(), '^t.*') ORDER BY _database, _table, n; -DROP DATABASE db; -DROP DATABASE db1; -DROP DATABASE db2; -DROP DATABASE db3; +DROP DATABASE 01902_db; +DROP DATABASE 01902_db1; +DROP DATABASE 01902_db2; +DROP DATABASE 01902_db3; From 323cfdf40c8381611af85d5fd05f20e598bf450a Mon Sep 17 00:00:00 2001 From: feng lv Date: Mon, 14 Jun 2021 05:35:12 +0000 Subject: [PATCH 175/931] add test to skip list --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 2deefff3057..e6e4bcda1e7 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -832,6 +832,7 @@ "01870_modulo_partition_key", "01870_buffer_flush", // creates database "01889_postgresql_protocol_null_fields", - "01889_check_row_policy_defined_using_user_function" + "01889_check_row_policy_defined_using_user_function", + "01902_table_function_merge_db_repr " ] } From 190b4435182b428b5689fdef7f8937694bd47931 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 14 Jun 2021 18:34:56 +0300 Subject: [PATCH 176/931] First draft --- .../reference/quantileexact.md | 95 +++++++++++++++++++ .../reference/quantiles.md | 95 +++++++++++++++++++ 2 files changed, 190 insertions(+) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index 06ef7ccfbd3..84cf187cf20 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -158,6 +158,101 @@ Result: │ 5 │ └───────────────────────────┘ ``` + +## quantileExactExclusive {#quantileexactexclusive} + +Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. + +**Syntax** + +``` sql +quantileExactExclusive(level)(expr) +``` + +**Arguments** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). + +**Returned value** + +- Quantile of the specified level. + +Type: + +- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input. +- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type. +- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num); +``` + +Result: + +``` text +┌─quantileExactExclusive(0.6)(x)─┐ +│ 599.6 │ +└────────────────────────────────┘ +``` + +## quantileExactInclusive {#quantileexactinclusive} + +Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. + +**Syntax** + +``` sql +quantileExactInclusive(level)(expr) +``` + +**Arguments** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). + +**Returned value** + +- Quantile of the specified level. + +Type: + +- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input. +- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type. +- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantileExactInclusive(0.6)(x) FROM (SELECT number AS x FROM num); +``` + +Result: + +``` text +┌─quantileExactInclusive(0.6)(x)─┐ +│ 599.4 │ +└────────────────────────────────┘ +``` + **See Also** - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index abce6a9e7f0..72b53f307db 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -7,3 +7,98 @@ toc_priority: 201 Syntax: `quantiles(level1, level2, …)(x)` All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. + +## quantilesExactExclusive {#quantilesexactexclusive} + +Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. + +Works more efficiently with sets of levels than [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). + +**Syntax** + +``` sql +quantilesExactExclusive(level1, level2, ...)(expr) +``` + +**Arguments** + +- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0.01, 0.99]`. +- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). + +**Returned value** + +- [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels. + +Type of array values: + +- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input. +- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type. +- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +``` + +Result: + +``` text +┌─quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐ +│ [249.25,499.5,749.75,899.9,949.9499999999999,989.99,998.999] │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +## quantilesExactInclusive {#quantilesexactinclusive} + +Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. + +Works more efficiently with sets of levels than [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). + +**Syntax** + +``` sql +quantilesExactInclusive(level1, level2, ...)(expr) +``` + +**Arguments** + +- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0.01, 0.99]`. +- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). + +**Returned value** + +- [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels. + +Type of array values: + +- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input. +- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type. +- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type. + + +**Example** + +Query: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +``` + +Result: + +``` text +┌─quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐ +│ [249.75,499.5,749.25,899.1,949.05,989.01,998.001] │ +└─────────────────────────────────────────────────────────────────────┘ +``` From d08e6ebc8024ba97b323875fcd51532724a20667 Mon Sep 17 00:00:00 2001 From: Mikhail Date: Tue, 15 Jun 2021 00:16:30 +0300 Subject: [PATCH 177/931] Added setting doc --- docs/en/operations/settings/settings.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index eee27dacceb..4afedb2c32f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1201,6 +1201,17 @@ Enable compilation of queries. By default, 0 (disabled). The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). If this portion of the pipeline was compiled, the query may run faster due to the deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution. +## compile_expressions {#compile_expressions} + +Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime. + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: `1`. + ## min_count_to_compile {#min-count-to-compile} How many times to potentially use a compiled chunk of code before running compilation. By default, 3. From 39e843d9c7a32301ee2a0ed32ca29f6137fb2a92 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 15 Jun 2021 09:45:19 +0300 Subject: [PATCH 178/931] Some code for snapshot deserialization --- src/Coordination/KeeperSnapshotManager.cpp | 14 +- src/Coordination/ZooKeeperSnapshotReader.cpp | 183 +++++++++++++++++++ src/Coordination/ZooKeeperSnapshotReader.h | 23 +++ 3 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 src/Coordination/ZooKeeperSnapshotReader.cpp create mode 100644 src/Coordination/ZooKeeperSnapshotReader.h diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index 7520f9b3ba2..6dfc0d787d5 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -345,11 +345,23 @@ KeeperSnapshotManager::KeeperSnapshotManager(const std::string & snapshots_path_ for (const auto & p : fs::directory_iterator(snapshots_path)) { - if (startsWith(p.path(), "tmp_")) /// Unfinished tmp files + const auto & path = p.path(); + + if (!path.has_filename()) + continue; + + if (startsWith(path.filename(), "tmp_")) /// Unfinished tmp files { std::filesystem::remove(p); continue; } + + /// Not snapshot file + if (!startsWith(path.filename(), "snapshot_")) + { + continue; + } + size_t snapshot_up_to = getSnapshotPathUpToLogIdx(p.path()); existing_snapshots[snapshot_up_to] = p.path(); } diff --git a/src/Coordination/ZooKeeperSnapshotReader.cpp b/src/Coordination/ZooKeeperSnapshotReader.cpp new file mode 100644 index 00000000000..df758f870ee --- /dev/null +++ b/src/Coordination/ZooKeeperSnapshotReader.cpp @@ -0,0 +1,183 @@ +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +static String parentPath(const String & path) +{ + auto rslash_pos = path.rfind('/'); + if (rslash_pos > 0) + return path.substr(0, rslash_pos); + return "/"; +} + +static std::string getBaseName(const String & path) +{ + size_t basename_start = path.rfind('/'); + return std::string{&path[basename_start + 1], path.length() - basename_start - 1}; +} + +int64_t getZxidFromName(const std::string & filename) +{ + std::filesystem::path path(filename); + std::string extension = path.extension(); + //std::cerr << "Extension:" << extension << std::endl; + char * end; + int64_t zxid = std::strtoul(extension.data() + 1, &end, 16); + return zxid; +} + +void deserializeMagic(ReadBuffer & in) +{ + int32_t magic_header, version; + int64_t dbid; + Coordination::read(magic_header, in); + Coordination::read(version, in); + Coordination::read(dbid, in); + //const char * data = "ZKSN"; + //std::cerr << "Expected Hedader:" << *reinterpret_cast(data) << std::endl; + //std::cerr << "MAGIC HEADER:" << magic_header << std::endl; + //std::cerr << "VERSION:" << version << std::endl; + //std::cerr << "DBID:" << dbid << std::endl; +} + +int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in) +{ + int32_t count; + Coordination::read(count, in); + //std::cerr << "Total session and timeout:" << count << std::endl; + int64_t max_session_id = 0; + while (count > 0) + { + int64_t session_id; + int32_t timeout; + + Coordination::read(session_id, in); + Coordination::read(timeout, in); + //std::cerr << "Session id:" << session_id << std::endl; + //std::cerr << "Timeout:" << timeout << std::endl; + storage.addSessionID(session_id, timeout); + max_session_id = std::max(session_id, max_session_id); + count--; + } + std::cerr << "Done deserializing sessions\n"; + return max_session_id; +} + +void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) +{ + int32_t count; + Coordination::read(count, in); + //std::cerr << "ACLs Count:" << count << "\n"; + while (count > 0) + { + int64_t map_index; + Coordination::read(map_index, in); + //std::cerr << "Map index:" << map_index << "\n"; + + Coordination::ACLs acls; + int32_t acls_len; + Coordination::read(acls_len, in); + + //std::cerr << "ACLs len:" << acls_len << "\n"; + while (acls_len > 0) + { + Coordination::ACL acl; + Coordination::read(acl.permissions, in); + Coordination::read(acl.scheme, in); + Coordination::read(acl.id, in); + //std::cerr << "ACL perms:" << acl.permissions << "\n"; + //std::cerr << "ACL scheme:" << acl.scheme << "\n"; + //std::cerr << "ACL id:" << acl.id << "\n"; + acls.push_back(acl); + acls_len--; + } + storage.acl_map.addMapping(map_index, acls); + + count--; + } + std::cerr << "Done deserializing ACLs Total" << count << "\n"; +} + +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) +{ + int64_t max_zxid = 0; + std::string path; + Coordination::read(path, in); + //std::cerr << "Read path FIRST length:" << path.length() << std::endl; + //std::cerr << "Read path FIRST data:" << path << std::endl; + size_t count = 0; + while (path != "/") + { + KeeperStorage::Node node{}; + Coordination::read(node.data, in); + Coordination::read(node.acl_id, in); + + /// Deserialize stat + Coordination::read(node.stat.czxid, in); + Coordination::read(node.stat.mzxid, in); + /// For some reason ZXID specified in filename can be smaller + /// then actual zxid from nodes. + max_zxid = std::max(max_zxid, node.stat.mzxid); + + Coordination::read(node.stat.ctime, in); + Coordination::read(node.stat.mtime, in); + Coordination::read(node.stat.version, in); + Coordination::read(node.stat.cversion, in); + Coordination::read(node.stat.aversion, in); + Coordination::read(node.stat.ephemeralOwner, in); + Coordination::read(node.stat.pzxid, in); + if (!path.empty()) + { + node.stat.dataLength = node.data.length(); + node.seq_num = node.stat.cversion; + storage.container.insertOrReplace(path, node); + + if (node.stat.ephemeralOwner != 0) + storage.ephemerals[node.stat.ephemeralOwner].insert(path); + + storage.acl_map.addUsage(node.acl_id); + } + Coordination::read(path, in); + count++; + if (count % 1000 == 0) + std::cerr << "Deserialized nodes:" << count << std::endl; + } + + for (const auto & itr : storage.container) + { + if (itr.key != "/") + { + auto parent_path = parentPath(itr.key); + storage.container.updateValue(parent_path, [&path = itr.key] (KeeperStorage::Node & value) { value.children.insert(getBaseName(path)); value.stat.numChildren++; }); + } + } + + return max_zxid; +} + +void deserializeKeeperStorage(KeeperStorage & storage, const std::string & path) +{ + int64_t zxid = getZxidFromName(path); + //std::cerr << "Got ZXID:" << zxid << std::endl; + + ReadBufferFromFile reader(path); + + deserializeMagic(reader); + auto max_session_id = deserializeSessionAndTimeout(storage, reader); + + storage.session_id_counter = max_session_id; + deserializeACLMap(storage, reader); + + int64_t zxid_from_nodes = deserializeStorageData(storage, reader); + storage.zxid = std::max(zxid, zxid_from_nodes); +} + +} diff --git a/src/Coordination/ZooKeeperSnapshotReader.h b/src/Coordination/ZooKeeperSnapshotReader.h new file mode 100644 index 00000000000..8006f69a6f8 --- /dev/null +++ b/src/Coordination/ZooKeeperSnapshotReader.h @@ -0,0 +1,23 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace DB +{ + +int64_t getZxidFromName(const std::string & filename); + +void deserializeMagic(ReadBuffer & in); + +int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in); + +void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in); + +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in); + +void deserializeKeeperStorage(KeeperStorage & storage, const std::string & path); + +} From 40101cbf827ba9a9f17d3687a23090465cf0dfb4 Mon Sep 17 00:00:00 2001 From: Evgeniia Sudarikova Date: Tue, 15 Jun 2021 22:29:11 +0300 Subject: [PATCH 179/931] edited EN, added RU --- docs/en/getting-started/install.md | 6 ++-- .../external-dicts-dict-lifetime.md | 12 +++---- docs/ru/getting-started/install.md | 13 +++++-- .../external-dicts-dict-lifetime.md | 36 +++++++++++++++++-- 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 4256de49e4a..3de90156a41 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -94,11 +94,11 @@ For production environments, it’s recommended to use the latest `stable`-versi To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Those images use official `deb` packages inside. -### Single Binary +### Single Binary {#from-single-binary} -You can install ClickHouse on Linux using single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse]. +You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse]. -``` +``` bash curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse sudo ./clickhouse install ``` diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index e339461e428..1d79c9a28bf 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -57,7 +57,7 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): - For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. -- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`. +- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`). - Dictionaries from other sources are updated every time by default. For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: @@ -88,13 +88,13 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. -It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after previous update. If `update_field` is specified in as part of dictionary source configuration value of previous update time in seconds will be added to data request. Depends of source type Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC different logic will be applied to `update_field` before request data from external source. +It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration value of the previous update time in seconds will be added to the data request. Depends on source type Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC different logic will be applied to `update_field` before request data from an external source. -- If source is HTTP then `update_field` will be added as query parameter with last update time as parameter value. -- If source is Executable then `update_field` will be added as executable script argument with last update time as argument value. -- If source is ClickHouse, MySQL, PostgreSQL, ODBC there will be additional part of WHERE, where `update_field` is compared as greater or equal with last update time. +- If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value. +- If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value. +- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of WHERE, where `update_field` is compared as greater or equal with the last update time. -If `update_field` option is set. Additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data. +If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data. Example of settings: diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index d0a54d9043a..c273d64f783 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -87,9 +87,18 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Внутри образов используются официальные `deb` пакеты. +### Из единого бинарника {#from-single-binary} + +Для установки ClickHouse на Linux можно использовать единый переносимый бинарник из последнего коммита ветки `master`: [https://builds.clickhouse.tech/master/amd64/clickhouse]. + +``` bash +curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse +sudo ./clickhouse install +``` + ### Из исполняемых файлов для нестандартных окружений {#from-binaries-non-linux} -Для других операционных систем и архитектуры AArch64, сборки ClickHouse предоставляются в виде кросс-компилированного бинарника с последнего коммита ветки master (с задержкой в несколько часов). +Для других операционных систем и архитектуры AArch64, сборки ClickHouse предоставляются в виде кросс-компилированного бинарника из последнего коммита ветки `master` (с задержкой в несколько часов). - [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` @@ -97,7 +106,7 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных. -Чтобы установить ClickHouse в рамках всей системы (с необходимыми конфигурационными файлами, настройками пользователей и т.д.), выполните `sudo ./clickhouse install`. Затем выполните команды `clickhouse start` (чтобы запустить сервер) и `clickhouse-client` (чтобы подключиться к нему). +Чтобы установить ClickHouse в рамках всей системы (с необходимыми конфигурационными файлами, настройками пользователей и т.д.), выполните `sudo ./clickhouse install`. Затем выполните команды `clickhouse start` (чтобы запустить сервер) и `clickhouse-client` (чтобы подключиться к нему). Данные сборки не рекомендуются для использования в продакшене, так как они недостаточно тщательно протестированны. Также, в них присутствуют не все возможности ClickHouse. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 1298f05eca0..388d54c21a0 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -52,12 +52,12 @@ LIFETIME(MIN 300 MAX 360) ``` Если `0` и `0`, ClickHouse не перегружает словарь по истечению времени. -В этм случае, ClickHouse может перезагрузить данные словаря если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. +В этом случае, ClickHouse может перезагрузить данные словаря если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. При обновлении словарей сервер ClickHouse применяет различную логику в зависимости от типа [источника](external-dicts-dict-sources.md): - У текстового файла проверяется время модификации. Если время изменилось по отношению к запомненному ранее, то словарь обновляется. -- Для MySQL источника, время модификации проверяется запросом `SHOW TABLE STATUS` (для MySQL 8 необходимо отключить кеширование мета-информации в MySQL `set global information_schema_stats_expiry=0`. +- Для MySQL источника время модификации проверяется запросом `SHOW TABLE STATUS` (для MySQL 8 необходимо отключить кеширование мета-информации в MySQL `set global information_schema_stats_expiry=0`). - Словари из других источников по умолчанию обновляются каждый раз. Для других источников (ODBC, PostgreSQL, ClickHouse и т.д.) можно настроить запрос, который позволит обновлять словари только в случае их фактического изменения, а не каждый раз. Чтобы это сделать необходимо выполнить следующие условия/действия: @@ -86,4 +86,34 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` -Для словарей `Cache`, `ComplexKeyCache`, `SSDCache` и `SSDComplexKeyCache` поддерживается как синхронное, так и асинхронное обновление. +Для словарей `Cache`, `ComplexKeyCache`, `SSDCache` и `SSDComplexKeyCache` поддерживается как синхронное, так и асинхронное обновление. + +Также словари `Flat`, `Hashed`, `ComplexKeyHashed` могут запрашивать только те данные, которые были изменены после предыдущего обновления. Если `update_field` указана как часть конфигурации источника словаря, к запросу данных будет добавлено время предыдущего обновления в секундах. В зависимости от типа источника (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC) к `update_field` будет применена соответствующая логика перед запросом данных из внешнего источника. + +- Если источник HTTP, то `update_field` будет добавлена в качестве параметра запроса, а время последнего обновления — в качестве значения параметра. +- Если источник Executable, то `update_field` будет добавлена в качестве аргумента исполняемого скрипта, время последнего обновления — в качестве значения аргумента. +- Если источник ClickHouse, MySQL, PostgreSQL или ODBC, то будет дополнительная часть запроса `WHERE`, где `update_field` будет больше или равна времени последнего обновления. + +Если установлена опция `update_field`, то может быть установлена дополнительная опция `update_lag`. Значение параметра `update_lag` вычитается из времени предыдущего обновления перед запросом обновленных данных. + +Пример настройки: + +``` xml + + ... + + ... + added_time + 15 + + ... + +``` + +или + +``` sql +... +SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) +... +``` \ No newline at end of file From eef8f367417abdeace8acc232e4a881a9642dd91 Mon Sep 17 00:00:00 2001 From: Evgeniia Sudarikova Date: Tue, 15 Jun 2021 22:54:02 +0300 Subject: [PATCH 180/931] edited link --- docs/en/getting-started/install.md | 2 +- docs/ru/getting-started/install.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 3de90156a41..5cec83c3819 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -96,7 +96,7 @@ To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.doc ### Single Binary {#from-single-binary} -You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse]. +You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: https://builds.clickhouse.tech/master/amd64/clickhouse. ``` bash curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index c273d64f783..2924958ddf4 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -89,7 +89,7 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh ### Из единого бинарника {#from-single-binary} -Для установки ClickHouse на Linux можно использовать единый переносимый бинарник из последнего коммита ветки `master`: [https://builds.clickhouse.tech/master/amd64/clickhouse]. +Для установки ClickHouse на Linux можно использовать единый переносимый бинарник из последнего коммита ветки `master`: https://builds.clickhouse.tech/master/amd64/clickhouse. ``` bash curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse From 8bd7cbd925c6a4895b79854d69da9a89b0130295 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 16 Jun 2021 20:56:20 +0300 Subject: [PATCH 181/931] add compatibility setting for name of columns --- src/Core/Settings.h | 1 + src/Interpreters/ActionsVisitor.cpp | 28 +++++----- src/Interpreters/ExpressionAnalyzer.cpp | 54 ++++++++++--------- src/Interpreters/InterpreterSelectQuery.cpp | 22 ++++---- .../evaluateConstantExpression.cpp | 2 +- src/Parsers/ASTFunction.cpp | 29 ++++++++-- src/Parsers/ASTFunction.h | 4 ++ src/Parsers/ASTLiteral.cpp | 43 ++++++++++++++- src/Parsers/ASTLiteral.h | 7 +++ src/Parsers/ASTWithAlias.cpp | 8 +++ src/Parsers/ASTWithAlias.h | 3 ++ src/Parsers/IAST.cpp | 8 +++ src/Parsers/IAST.h | 7 +++ .../configs/legacy.xml | 7 +++ .../test.py | 4 +- .../01913_names_of_tuple_literal.reference | 4 ++ .../01913_names_of_tuple_literal.sql | 2 + 17 files changed, 174 insertions(+), 59 deletions(-) create mode 100644 tests/integration/test_distributed_backward_compatability/configs/legacy.xml create mode 100644 tests/queries/0_stateless/01913_names_of_tuple_literal.reference create mode 100644 tests/queries/0_stateless/01913_names_of_tuple_literal.sql diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2aed174c088..e5122720380 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -464,6 +464,7 @@ class IColumn; M(UnionMode, union_default_mode, UnionMode::Unspecified, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \ M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \ M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \ + M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibity reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \ \ M(Bool, query_plan_enable_optimizations, true, "Apply optimizations to query plan", 0) \ M(UInt64, query_plan_max_optimizations_to_apply, 10000, "Limit the total number of optimizations applied to query plan. If zero, ignored. If limit reached, throw exception", 0) \ diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 066a5d20223..3dc4172db54 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -348,7 +348,7 @@ SetPtr makeExplicitSet( const ASTPtr & left_arg = args.children.at(0); const ASTPtr & right_arg = args.children.at(1); - auto column_name = left_arg->getColumnName(); + auto column_name = left_arg->getColumnName(context->getSettingsRef()); const auto & dag_node = actions.findInIndex(column_name); const DataTypePtr & left_arg_type = dag_node.result_type; @@ -638,7 +638,7 @@ std::optional ActionsMatcher::getNameAndTypeFromAST(const ASTPt { // If the argument is a literal, we generated a unique column name for it. // Use it instead of a generic display name. - auto child_column_name = ast->getColumnName(); + auto child_column_name = ast->getColumnName(data.getContext()->getSettingsRef()); const auto * as_literal = ast->as(); if (as_literal) { @@ -700,7 +700,7 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat func->setAlias(data.getUniqueName("_ut_" + name)); auto function_builder = FunctionFactory::instance().get(func->name, data.getContext()); - data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName()}, func->getColumnName()); + data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName(data.getContext()->getSettingsRef())}, func->getColumnName(data.getContext()->getSettingsRef())); columns.push_back(std::move(func)); } @@ -737,7 +737,7 @@ void ActionsMatcher::visit(ASTExpressionList & expression_list, const ASTPtr &, void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr & ast, Data & data) { - auto column_name = ast->getColumnName(); + auto column_name = ast->getColumnName(data.getContext()->getSettingsRef()); if (data.hasColumn(column_name)) return; @@ -763,7 +763,7 @@ void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr & ast, void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & data) { - auto column_name = ast->getColumnName(); + auto column_name = ast->getColumnName(data.getContext()->getSettingsRef()); if (data.hasColumn(column_name)) return; @@ -779,7 +779,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & ASTPtr arg = node.arguments->children.at(0); visit(arg, data); if (!data.only_consts) - data.addArrayJoin(arg->getColumnName(), column_name); + data.addArrayJoin(arg->getColumnName(data.getContext()->getSettingsRef()), column_name); return; } @@ -801,7 +801,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// We are in the part of the tree that we are not going to compute. You just need to define types. /// Do not subquery and create sets. We replace "in*" function to "in*IgnoreSet". - auto argument_name = node.arguments->children.at(0)->getColumnName(); + auto argument_name = node.arguments->children.at(0)->getColumnName(data.getContext()->getSettingsRef()); data.addFunction( FunctionFactory::instance().get(node.name + "IgnoreSet", data.getContext()), @@ -930,7 +930,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (!prepared_set->empty()) column.name = data.getUniqueName("__set"); else - column.name = child->getColumnName(); + column.name = child->getColumnName(data.getContext()->getSettingsRef()); if (!data.hasColumn(column.name)) { @@ -1009,7 +1009,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & visit(lambda->arguments->children.at(1), data); auto lambda_dag = data.actions_stack.popLevel(); - String result_name = lambda->arguments->children.at(1)->getColumnName(); + String result_name = lambda->arguments->children.at(1)->getColumnName(data.getContext()->getSettingsRef()); lambda_dag->removeUnusedActions(Names(1, result_name)); auto lambda_actions = std::make_shared( @@ -1024,7 +1024,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (findColumn(required_arg, lambda_arguments) == lambda_arguments.end()) captured.push_back(required_arg); - /// We can not name `getColumnName()`, + /// We can not name `getColumnName(data.getContext()->getSettingsRef())`, /// because it does not uniquely define the expression (the types of arguments can be different). String lambda_name = data.getUniqueName("__lambda"); @@ -1054,7 +1054,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (arguments_present) { /// Calculate column name here again, because AST may be changed here (in case of untuple). - data.addFunction(function_builder, argument_names, ast->getColumnName()); + data.addFunction(function_builder, argument_names, ast->getColumnName(data.getContext()->getSettingsRef())); } } @@ -1068,7 +1068,7 @@ void ActionsMatcher::visit(const ASTLiteral & literal, const ASTPtr & /* ast */, // AST here? Anyway, do not modify the column name if it is set already. if (literal.unique_column_name.empty()) { - const auto default_name = literal.getColumnName(); + const auto default_name = literal.getColumnName(data.getContext()->getSettingsRef()); const auto & index = data.actions_stack.getLastActionsIndex(); const auto * existing_column = index.tryGetNode(default_name); @@ -1148,7 +1148,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su } /// We get the stream of blocks for the subquery. Create Set and put it in place of the subquery. - String set_id = right_in_operand->getColumnName(); + String set_id = right_in_operand->getColumnName(data.getContext()->getSettingsRef()); SubqueryForSet & subquery_for_set = data.subqueries_for_sets[set_id]; @@ -1184,7 +1184,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su { const auto & last_actions = data.actions_stack.getLastActions(); const auto & index = data.actions_stack.getLastActionsIndex(); - if (index.contains(left_in_operand->getColumnName())) + if (index.contains(left_in_operand->getColumnName(data.getContext()->getSettingsRef()))) /// An explicit enumeration of values in parentheses. return makeExplicitSet(&node, last_actions, false, data.getContext(), data.set_size_limit, data.prepared_sets); else diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 216d9fecf5b..1ec88fe9389 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -244,7 +244,7 @@ void ExpressionAnalyzer::analyzeAggregation() ssize_t size = group_asts.size(); getRootActionsNoMakeSet(group_asts[i], true, temp_actions, false); - const auto & column_name = group_asts[i]->getColumnName(); + const auto & column_name = group_asts[i]->getColumnName(getContext()->getSettingsRef()); const auto * node = temp_actions->tryFindInIndex(column_name); if (!node) throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER); @@ -398,7 +398,7 @@ void SelectQueryExpressionAnalyzer::makeSetsForIndex(const ASTPtr & node) auto temp_actions = std::make_shared(columns_after_join); getRootActions(left_in_operand, true, temp_actions); - if (temp_actions->tryFindInIndex(left_in_operand->getColumnName())) + if (temp_actions->tryFindInIndex(left_in_operand->getColumnName(getContext()->getSettingsRef()))) makeExplicitSet(func, *temp_actions, true, getContext(), settings.size_limits_for_set, prepared_sets); } } @@ -446,7 +446,7 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) if (node->arguments) getRootActionsNoMakeSet(node->arguments, true, actions); - aggregate.column_name = node->getColumnName(); + aggregate.column_name = node->getColumnName(getContext()->getSettingsRef()); const ASTs & arguments = node->arguments ? node->arguments->children : ASTs(); aggregate.argument_names.resize(arguments.size()); @@ -454,7 +454,7 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) for (size_t i = 0; i < arguments.size(); ++i) { - const std::string & name = arguments[i]->getColumnName(); + const std::string & name = arguments[i]->getColumnName(getContext()->getSettingsRef()); const auto * dag_node = actions->tryFindInIndex(name); if (!dag_node) { @@ -647,7 +647,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) WindowFunctionDescription window_function; window_function.function_node = function_node; window_function.column_name - = window_function.function_node->getColumnName(); + = window_function.function_node->getColumnName(getContext()->getSettingsRef()); window_function.function_parameters = window_function.function_node->parameters ? getAggregateFunctionParametersArray( @@ -666,7 +666,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) window_function.argument_names.resize(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { - const std::string & name = arguments[i]->getColumnName(); + const std::string & name = arguments[i]->getColumnName(getContext()->getSettingsRef()); const auto * node = actions->tryFindInIndex(name); if (!node) @@ -964,7 +964,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( auto & step = chain.lastStep(sourceColumns()); getRootActions(select_query->prewhere(), only_types, step.actions()); - String prewhere_column_name = select_query->prewhere()->getColumnName(); + String prewhere_column_name = select_query->prewhere()->getColumnName(getContext()->getSettingsRef()); step.addRequiredOutput(prewhere_column_name); const auto & node = step.actions()->findInIndex(prewhere_column_name); @@ -1061,7 +1061,7 @@ bool SelectQueryExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain, getRootActions(select_query->where(), only_types, step.actions()); - auto where_column_name = select_query->where()->getColumnName(); + auto where_column_name = select_query->where()->getColumnName(getContext()->getSettingsRef()); step.addRequiredOutput(where_column_name); const auto & node = step.actions()->findInIndex(where_column_name); @@ -1086,7 +1086,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ASTs asts = select_query->groupBy()->children; for (const auto & ast : asts) { - step.addRequiredOutput(ast->getColumnName()); + step.addRequiredOutput(ast->getColumnName(getContext()->getSettingsRef())); getRootActions(ast, only_types, step.actions()); } @@ -1114,7 +1114,7 @@ void SelectQueryExpressionAnalyzer::appendAggregateFunctionsArguments(Expression for (const auto & name : desc.argument_names) step.addRequiredOutput(name); - /// Collect aggregates removing duplicates by node.getColumnName() + /// Collect aggregates removing duplicates by node.getColumnName(getContext()->getSettingsRef()) /// It's not clear why we recollect aggregates (for query parts) while we're able to use previously collected ones (for entire query) /// @note The original recollection logic didn't remove duplicates. GetAggregatesVisitor::Data data; @@ -1169,7 +1169,7 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( // (2b) Required function argument columns. for (const auto & a : f.function_node->arguments->children) { - step.addRequiredOutput(a->getColumnName()); + step.addRequiredOutput(a->getColumnName(getContext()->getSettingsRef())); } } @@ -1191,7 +1191,7 @@ bool SelectQueryExpressionAnalyzer::appendHaving(ExpressionActionsChain & chain, ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns); getRootActionsForHaving(select_query->having(), only_types, step.actions()); - step.addRequiredOutput(select_query->having()->getColumnName()); + step.addRequiredOutput(select_query->having()->getColumnName(getContext()->getSettingsRef())); return true; } @@ -1215,7 +1215,7 @@ void SelectQueryExpressionAnalyzer::appendSelect(ExpressionActionsChain & chain, continue; } - step.addRequiredOutput(child->getColumnName()); + step.addRequiredOutput(child->getColumnName(getContext()->getSettingsRef())); } } @@ -1243,7 +1243,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai if (!ast || ast->children.empty()) throw Exception("Bad order expression AST", ErrorCodes::UNKNOWN_TYPE_OF_AST_NODE); ASTPtr order_expression = ast->children.at(0); - step.addRequiredOutput(order_expression->getColumnName()); + step.addRequiredOutput(order_expression->getColumnName(getContext()->getSettingsRef())); if (ast->with_fill) with_fill = true; @@ -1293,7 +1293,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain for (const auto & child : select_query->limitBy()->children) { - auto child_name = child->getColumnName(); + auto child_name = child->getColumnName(getContext()->getSettingsRef()); if (!aggregated_names.count(child_name)) step.addRequiredOutput(std::move(child_name)); } @@ -1309,13 +1309,15 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendProjectResult(ExpressionActio NamesWithAliases result_columns; + const auto & settings = getContext()->getSettingsRef(); + ASTs asts = select_query->select()->children; for (const auto & ast : asts) { - String result_name = ast->getAliasOrColumnName(); + String result_name = ast->getAliasOrColumnName(settings); if (required_result_columns.empty() || required_result_columns.count(result_name)) { - std::string source_name = ast->getColumnName(); + std::string source_name = ast->getColumnName(settings); /* * For temporary columns created by ExpressionAnalyzer for literals, @@ -1357,7 +1359,7 @@ void ExpressionAnalyzer::appendExpression(ExpressionActionsChain & chain, const { ExpressionActionsChain::Step & step = chain.lastStep(sourceColumns()); getRootActions(expr, only_types, step.actions()); - step.addRequiredOutput(expr->getColumnName()); + step.addRequiredOutput(expr->getColumnName(getContext()->getSettingsRef())); } @@ -1374,12 +1376,13 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r else asts = ASTs(1, query); + const auto & settings = getContext()->getSettingsRef(); for (const auto & ast : asts) { - std::string name = ast->getColumnName(); + std::string name = ast->getColumnName(settings); std::string alias; if (add_aliases) - alias = ast->getAliasOrColumnName(); + alias = ast->getAliasOrColumnName(settings); else alias = name; result_columns.emplace_back(name, alias); @@ -1514,7 +1517,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (auto actions = query_analyzer.appendPrewhere(chain, !first_stage, additional_required_columns_after_prewhere)) { - prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName()); + prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName(settings)); if (allowEarlyConstantFolding(*prewhere_info->prewhere_actions, settings)) { @@ -1524,7 +1527,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( ExpressionActions( prewhere_info->prewhere_actions, ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); - auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); + auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName(settings)); /// If the filter column is a constant, record it. if (column_elem.column) prewhere_constant_filter_description = ConstantFilterDescription(*column_elem.column); @@ -1559,7 +1562,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( ExpressionActions( before_where, ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); - auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); + auto & column_elem = before_where_sample.getByName(query.where()->getColumnName(settings)); /// If the filter column is a constant, record it. if (column_elem.column) where_constant_filter_description = ConstantFilterDescription(*column_elem.column); @@ -1650,7 +1653,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( const auto * select_query = query_analyzer.getSelectQuery(); for (const auto & child : select_query->select()->children) { - step.addRequiredOutput(child->getColumnName()); + step.addRequiredOutput(child->getColumnName(settings)); } } @@ -1706,7 +1709,8 @@ void ExpressionAnalysisResult::finalize(const ExpressionActionsChain & chain, si if (hasWhere()) { - where_column_name = query.where()->getColumnName(); + const auto & settings = chain.getContext()->getSettingsRef(); + where_column_name = query.where()->getColumnName(settings); remove_where_filter = chain.steps.at(where_step_num)->required_output.find(where_column_name)->second; } } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 85b9026c642..1b68628a7b2 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -142,7 +142,7 @@ String InterpreterSelectQuery::generateFilterActions(ActionsDAGPtr & actions, co SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot); actions = analyzer.simpleSelectActions(); - auto column_name = expr_list->children.at(0)->getColumnName(); + auto column_name = expr_list->children.at(0)->getColumnName(context->getSettingsRef()); actions->removeUnusedActions(NameSet{column_name}); actions->projectInput(false); @@ -778,7 +778,7 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP order_descr.reserve(query.orderBy()->children.size()); for (const auto & elem : query.orderBy()->children) { - String name = elem->children.front()->getColumnName(); + String name = elem->children.front()->getColumnName(context->getSettingsRef()); const auto & order_by_elem = elem->as(); std::shared_ptr collator; @@ -797,14 +797,14 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP return order_descr; } -static SortDescription getSortDescriptionFromGroupBy(const ASTSelectQuery & query) +static SortDescription getSortDescriptionFromGroupBy(const ASTSelectQuery & query, ContextPtr context) { SortDescription order_descr; order_descr.reserve(query.groupBy()->children.size()); for (const auto & elem : query.groupBy()->children) { - String name = elem->getColumnName(); + String name = elem->getColumnName(context->getSettingsRef()); order_descr.emplace_back(name, 1, 1); } @@ -1945,13 +1945,13 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc { query_info.projection->order_optimizer = std::make_shared( query_info.projection->group_by_elements_actions, - getSortDescriptionFromGroupBy(query), + getSortDescriptionFromGroupBy(query, context), query_info.syntax_analyzer_result); } else { query_info.order_optimizer = std::make_shared( - analysis_result.group_by_elements_actions, getSortDescriptionFromGroupBy(query), query_info.syntax_analyzer_result); + analysis_result.group_by_elements_actions, getSortDescriptionFromGroupBy(query, context), query_info.syntax_analyzer_result); } } @@ -2030,7 +2030,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter) { auto where_step = std::make_unique( - query_plan.getCurrentDataStream(), expression, getSelectQuery().where()->getColumnName(), remove_filter); + query_plan.getCurrentDataStream(), expression, getSelectQuery().where()->getColumnName(context->getSettingsRef()), remove_filter); where_step->setStepDescription("WHERE"); query_plan.addStep(std::move(where_step)); @@ -2077,7 +2077,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac SortDescription group_by_sort_description; if (group_by_info && settings.optimize_aggregation_in_order) - group_by_sort_description = getSortDescriptionFromGroupBy(getSelectQuery()); + group_by_sort_description = getSortDescriptionFromGroupBy(getSelectQuery(), context); else group_by_info = nullptr; @@ -2125,7 +2125,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const ActionsDAGPtr & expression) { auto having_step - = std::make_unique(query_plan.getCurrentDataStream(), expression, getSelectQuery().having()->getColumnName(), false); + = std::make_unique(query_plan.getCurrentDataStream(), expression, getSelectQuery().having()->getColumnName(context->getSettingsRef()), false); having_step->setStepDescription("HAVING"); query_plan.addStep(std::move(having_step)); @@ -2141,7 +2141,7 @@ void InterpreterSelectQuery::executeTotalsAndHaving( query_plan.getCurrentDataStream(), overflow_row, expression, - has_having ? getSelectQuery().having()->getColumnName() : "", + has_having ? getSelectQuery().having()->getColumnName(context->getSettingsRef()) : "", settings.totals_mode, settings.totals_auto_threshold, final); @@ -2458,7 +2458,7 @@ void InterpreterSelectQuery::executeLimitBy(QueryPlan & query_plan) Names columns; for (const auto & elem : query.limitBy()->children) - columns.emplace_back(elem->getColumnName()); + columns.emplace_back(elem->getColumnName(context->getSettingsRef())); UInt64 length = getLimitUIntValue(query.limitByLength(), context, "LIMIT"); UInt64 offset = (query.limitByOffset() ? getLimitUIntValue(query.limitByOffset(), context, "OFFSET") : 0); diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 2525f9672ed..90f6ac84afc 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -39,7 +39,7 @@ std::pair> evaluateConstantExpression(co if (context->getSettingsRef().normalize_function_names) FunctionNameNormalizer().visit(ast.get()); - String name = ast->getColumnName(); + String name = ast->getColumnName(context->getSettingsRef()); auto syntax_result = TreeRewriter(context).analyze(ast, source_columns); ExpressionActionsPtr expr_for_constant_folding = ExpressionAnalyzer(ast, syntax_result, context).getConstActions(); diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index cc460f600dd..5a666310083 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -22,6 +22,16 @@ namespace ErrorCodes } void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const +{ + appendColumnNameImpl(ostr, nullptr); +} + +void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const +{ + appendColumnNameImpl(ostr, &settings); +} + +void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings * settings) const { if (name == "view") throw Exception("Table function view cannot be used as an expression", ErrorCodes::UNEXPECTED_EXPRESSION); @@ -35,19 +45,30 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const { if (it != parameters->children.begin()) writeCString(", ", ostr); - (*it)->appendColumnName(ostr); + + if (settings) + (*it)->appendColumnName(ostr, *settings); + else + (*it)->appendColumnName(ostr); } writeChar(')', ostr); } writeChar('(', ostr); if (arguments) + { for (auto it = arguments->children.begin(); it != arguments->children.end(); ++it) { if (it != arguments->children.begin()) writeCString(", ", ostr); - (*it)->appendColumnName(ostr); + + if (settings) + (*it)->appendColumnName(ostr, *settings); + else + (*it)->appendColumnName(ostr); } + } + writeChar(')', ostr); if (is_window_function) @@ -59,11 +80,11 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const } else { - FormatSettings settings{ostr, true /* one_line */}; + FormatSettings format_settings{ostr, true /* one_line */}; FormatState state; FormatStateStacked frame; writeCString("(", ostr); - window_definition->formatImpl(settings, state, frame); + window_definition->formatImpl(format_settings, state, frame); writeCString(")", ostr); } } diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 685aaaadd26..8e657afbf6e 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -54,6 +54,10 @@ public: protected: void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; void appendColumnNameImpl(WriteBuffer & ostr) const override; + void appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const override; + +private: + void appendColumnNameImpl(WriteBuffer & ostr, const Settings * settings) const; }; diff --git a/src/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp index ed6790499fb..ddad6b45e8d 100644 --- a/src/Parsers/ASTLiteral.cpp +++ b/src/Parsers/ASTLiteral.cpp @@ -16,8 +16,10 @@ void ASTLiteral::updateTreeHashImpl(SipHash & hash_state) const applyVisitor(FieldVisitorHash(hash_state), value); } +namespace +{ + /// Writes 'tuple' word before tuple literals for backward compatibility reasons. -/// TODO: remove, when versions lower than 20.3 will be rarely used. class FieldVisitorToColumnName : public StaticVisitor { public: @@ -45,14 +47,51 @@ String FieldVisitorToColumnName::operator() (const Tuple & x) const return wb.str(); } +} + +void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const +{ + if (settings.legacy_column_name_of_tuple_literal) + appendColumnNameImplLegacy(ostr); + else + appendColumnNameImpl(ostr); +} + void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const { /// 100 - just arbitrary value. constexpr auto min_elements_for_hashing = 100; + /// Special case for very large arrays and tuples. Instead of listing all elements, will use hash of them. + /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) + auto type = value.getType(); + if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing) + || (type == Field::Types::Tuple && value.get().size() > min_elements_for_hashing)) + { + SipHash hash; + applyVisitor(FieldVisitorHash(hash), value); + UInt64 low, high; + hash.get128(low, high); + + writeCString(type == Field::Types::Array ? "__array_" : "__tuple_", ostr); + writeText(low, ostr); + ostr.write('_'); + writeText(high, ostr); + } + else + { + String column_name = applyVisitor(FieldVisitorToString(), value); + writeString(column_name, ostr); + } +} + +void ASTLiteral::appendColumnNameImplLegacy(WriteBuffer & ostr) const +{ + /// 100 - just arbitrary value. + constexpr auto min_elements_for_hashing = 100; + /// Special case for very large arrays. Instead of listing all elements, will use hash of them. /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) - /// TODO: Also do hashing for large tuples, when versions lower than 20.3 will be rarely used, because it breaks backward compatibility. auto type = value.getType(); if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing)) { diff --git a/src/Parsers/ASTLiteral.h b/src/Parsers/ASTLiteral.h index 7e472a16bdd..5292ab3c6c0 100644 --- a/src/Parsers/ASTLiteral.h +++ b/src/Parsers/ASTLiteral.h @@ -44,6 +44,13 @@ protected: void formatImplWithoutAlias(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; void appendColumnNameImpl(WriteBuffer & ostr) const override; + void appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const override; + +private: + /// Legacy version of 'appendColumnNameImpl'. It differs only with tuple literals. + /// It's only needed to continue working of queries with tuple literals + /// in distributed tables while rolling update. + void appendColumnNameImplLegacy(WriteBuffer & ostr) const; }; } diff --git a/src/Parsers/ASTWithAlias.cpp b/src/Parsers/ASTWithAlias.cpp index 88f6568a719..0f5b86763e0 100644 --- a/src/Parsers/ASTWithAlias.cpp +++ b/src/Parsers/ASTWithAlias.cpp @@ -48,6 +48,14 @@ void ASTWithAlias::appendColumnName(WriteBuffer & ostr) const appendColumnNameImpl(ostr); } +void ASTWithAlias::appendColumnName(WriteBuffer & ostr, const Settings & settings) const +{ + if (prefer_alias_to_column_name && !alias.empty()) + writeString(alias, ostr); + else + appendColumnNameImpl(ostr, settings); +} + void ASTWithAlias::appendColumnNameWithoutAlias(WriteBuffer & ostr) const { appendColumnNameImpl(ostr); diff --git a/src/Parsers/ASTWithAlias.h b/src/Parsers/ASTWithAlias.h index ea4419402b0..249be17b74c 100644 --- a/src/Parsers/ASTWithAlias.h +++ b/src/Parsers/ASTWithAlias.h @@ -21,8 +21,10 @@ public: using IAST::IAST; void appendColumnName(WriteBuffer & ostr) const final; + void appendColumnName(WriteBuffer & ostr, const Settings & settings) const final; void appendColumnNameWithoutAlias(WriteBuffer & ostr) const final; String getAliasOrColumnName() const override { return alias.empty() ? getColumnName() : alias; } + String getAliasOrColumnName(const Settings & settings) const override { return alias.empty() ? getColumnName(settings) : alias; } String tryGetAlias() const override { return alias; } void setAlias(const String & to) override { alias = to; } @@ -33,6 +35,7 @@ public: protected: virtual void appendColumnNameImpl(WriteBuffer & ostr) const = 0; + virtual void appendColumnNameImpl(WriteBuffer & ostr, const Settings &) const { appendColumnNameImpl(ostr); } }; /// helper for setting aliases and chaining result to other functions diff --git a/src/Parsers/IAST.cpp b/src/Parsers/IAST.cpp index 3a21d704eb9..0f38fcf98dd 100644 --- a/src/Parsers/IAST.cpp +++ b/src/Parsers/IAST.cpp @@ -109,6 +109,14 @@ String IAST::getColumnName() const } +String IAST::getColumnName(const Settings & settings) const +{ + WriteBufferFromOwnString write_buffer; + appendColumnName(write_buffer, settings); + return write_buffer.str(); +} + + String IAST::getColumnNameWithoutAlias() const { WriteBufferFromOwnString write_buffer; diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index 54e08b2700e..143094e1d7a 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -41,13 +42,18 @@ public: /** Get the canonical name of the column if the element is a column */ String getColumnName() const; + String getColumnName(const Settings & settings) const; + /** Same as the above but ensure no alias names are used. This is for index analysis */ String getColumnNameWithoutAlias() const; + virtual void appendColumnName(WriteBuffer &) const { throw Exception("Trying to get name of not a column: " + getID(), ErrorCodes::LOGICAL_ERROR); } + virtual void appendColumnName(WriteBuffer & ostr, const Settings &) const { appendColumnName(ostr); } + virtual void appendColumnNameWithoutAlias(WriteBuffer &) const { throw Exception("Trying to get name of not a column: " + getID(), ErrorCodes::LOGICAL_ERROR); @@ -55,6 +61,7 @@ public: /** Get the alias, if any, or the canonical name of the column, if it is not. */ virtual String getAliasOrColumnName() const { return getColumnName(); } + virtual String getAliasOrColumnName(const Settings & settings) const { return getColumnName(settings); } /** Get the alias, if any, or an empty string if it does not exist, or if the element does not support aliases. */ virtual String tryGetAlias() const { return String(); } diff --git a/tests/integration/test_distributed_backward_compatability/configs/legacy.xml b/tests/integration/test_distributed_backward_compatability/configs/legacy.xml new file mode 100644 index 00000000000..01bd56de845 --- /dev/null +++ b/tests/integration/test_distributed_backward_compatability/configs/legacy.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_distributed_backward_compatability/test.py b/tests/integration/test_distributed_backward_compatability/test.py index eb18019c8df..0d36aaa23f4 100644 --- a/tests/integration/test_distributed_backward_compatability/test.py +++ b/tests/integration/test_distributed_backward_compatability/test.py @@ -5,8 +5,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node_old = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], image='yandex/clickhouse-server', - tag='19.17.8.54', stay_alive=True, with_installed_binary=True) -node_new = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) + tag='20.8.9.6', stay_alive=True, with_installed_binary=True) +node_new = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/legacy.xml']) @pytest.fixture(scope="module") diff --git a/tests/queries/0_stateless/01913_names_of_tuple_literal.reference b/tests/queries/0_stateless/01913_names_of_tuple_literal.reference new file mode 100644 index 00000000000..a4c05ad853a --- /dev/null +++ b/tests/queries/0_stateless/01913_names_of_tuple_literal.reference @@ -0,0 +1,4 @@ +((1, 2), (2, 3), (3, 4)) +((1,2),(2,3),(3,4)) +tuple(tuple(1, 2), tuple(2, 3), tuple(3, 4)) +((1,2),(2,3),(3,4)) diff --git a/tests/queries/0_stateless/01913_names_of_tuple_literal.sql b/tests/queries/0_stateless/01913_names_of_tuple_literal.sql new file mode 100644 index 00000000000..09de9e8cf37 --- /dev/null +++ b/tests/queries/0_stateless/01913_names_of_tuple_literal.sql @@ -0,0 +1,2 @@ +SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames; +SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames SETTINGS legacy_column_name_of_tuple_literal = 1; From 80cf037f5c416ba1cfdc2e7ce5f9152d428098e9 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 17 Jun 2021 03:42:08 +0300 Subject: [PATCH 182/931] More work --- .../aggregate-functions/reference/quantileexact.md | 14 +++++++++----- .../aggregate-functions/reference/quantiles.md | 8 ++++++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index 84cf187cf20..f8cf5c5e70d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -165,7 +165,9 @@ Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a num To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. +This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)). + +When using multiple `quantileExactExclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. **Syntax** @@ -175,7 +177,7 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `(0, 1)`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -208,11 +210,13 @@ Result: ## quantileExactInclusive {#quantileexactinclusive} -Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. +Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. +This function is equivalent to [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed) Excel function, ([type R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)). + +When using multiple `quantileExactInclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. **Syntax** @@ -222,7 +226,7 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0, 1]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 72b53f307db..297f87b6e95 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -14,6 +14,8 @@ Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a nu To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. +This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)). + Works more efficiently with sets of levels than [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). **Syntax** @@ -24,7 +26,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0.01, 0.99]`. +- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `(0, 1)`. - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -61,6 +63,8 @@ Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a nu To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. +This function is equivalent to [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed) Excel function, ([type R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)). + Works more efficiently with sets of levels than [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). **Syntax** @@ -71,7 +75,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0.01, 0.99]`. +- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0, 1]`. - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** From 0a959f87076fd5fe8e571ec572a8d313d41dbe59 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 17 Jun 2021 03:55:24 +0300 Subject: [PATCH 183/931] small fixes --- .../aggregate-functions/reference/quantileexact.md | 4 ++-- .../sql-reference/aggregate-functions/reference/quantiles.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index f8cf5c5e70d..005d039e7c5 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -177,7 +177,7 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `(0, 1)`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional parameter. Constant floating-point number in the range `(0, 1)`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -226,7 +226,7 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0, 1]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional parameter. Constant floating-point number in the range `[0, 1]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 297f87b6e95..9723e0ee29c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -26,7 +26,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `(0, 1)`. +- `level` — Leveles of quantiles. Constant floating-point numbers in the range `(0, 1)`. - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -75,7 +75,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers from 0 to 1. We recommend using a `level` values in the range of `[0, 1]`. +- `level` — Leveles of quantiles. Constant floating-point numbers in the range `[0, 1]`. - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** From 2bb1c75fb5dc94a371a90ed5f31b94e4edce7aa4 Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Thu, 17 Jun 2021 10:58:42 +0300 Subject: [PATCH 184/931] Rename & reimport murmurhash sources from smhasher repo --- contrib/murmurhash/CMakeLists.txt | 8 +- contrib/murmurhash/include/MurmurHash2.h | 39 ++ .../include/{murmurhash3.h => MurmurHash3.h} | 11 +- contrib/murmurhash/include/murmurhash2.h | 31 -- contrib/murmurhash/src/MurmurHash2.cpp | 523 ++++++++++++++++++ .../src/{murmurhash3.cpp => MurmurHash3.cpp} | 11 +- contrib/murmurhash/src/murmurhash2.cpp | 423 -------------- src/Functions/FunctionsHashing.h | 4 +- 8 files changed, 577 insertions(+), 473 deletions(-) create mode 100644 contrib/murmurhash/include/MurmurHash2.h rename contrib/murmurhash/include/{murmurhash3.h => MurmurHash3.h} (92%) delete mode 100644 contrib/murmurhash/include/murmurhash2.h create mode 100644 contrib/murmurhash/src/MurmurHash2.cpp rename contrib/murmurhash/src/{murmurhash3.cpp => MurmurHash3.cpp} (97%) delete mode 100644 contrib/murmurhash/src/murmurhash2.cpp diff --git a/contrib/murmurhash/CMakeLists.txt b/contrib/murmurhash/CMakeLists.txt index c5e467a2d6d..2d9cb3e6382 100644 --- a/contrib/murmurhash/CMakeLists.txt +++ b/contrib/murmurhash/CMakeLists.txt @@ -1,7 +1,7 @@ add_library(murmurhash - src/murmurhash2.cpp - src/murmurhash3.cpp - include/murmurhash2.h - include/murmurhash3.h) + src/MurmurHash2.cpp + src/MurmurHash3.cpp + include/MurmurHash2.h + include/MurmurHash3.h) target_include_directories (murmurhash PUBLIC include) diff --git a/contrib/murmurhash/include/MurmurHash2.h b/contrib/murmurhash/include/MurmurHash2.h new file mode 100644 index 00000000000..6d289edee29 --- /dev/null +++ b/contrib/murmurhash/include/MurmurHash2.h @@ -0,0 +1,39 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH2_H_ +#define _MURMURHASH2_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ); +uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ); +uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ); +uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ); +uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ); +uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ); + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH2_H_ + diff --git a/contrib/murmurhash/include/murmurhash3.h b/contrib/murmurhash/include/MurmurHash3.h similarity index 92% rename from contrib/murmurhash/include/murmurhash3.h rename to contrib/murmurhash/include/MurmurHash3.h index eb16425576a..e1c6d34976c 100644 --- a/contrib/murmurhash/include/murmurhash3.h +++ b/contrib/murmurhash/include/MurmurHash3.h @@ -2,7 +2,8 @@ // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. -#pragma once +#ifndef _MURMURHASH3_H_ +#define _MURMURHASH3_H_ //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -23,10 +24,6 @@ typedef unsigned __int64 uint64_t; #endif // !defined(_MSC_VER) -#ifdef __cplusplus -extern "C" { -#endif - //----------------------------------------------------------------------------- void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); @@ -37,6 +34,4 @@ void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out //----------------------------------------------------------------------------- -#ifdef __cplusplus -} -#endif +#endif // _MURMURHASH3_H_ diff --git a/contrib/murmurhash/include/murmurhash2.h b/contrib/murmurhash/include/murmurhash2.h deleted file mode 100644 index 0fc95ef1c42..00000000000 --- a/contrib/murmurhash/include/murmurhash2.h +++ /dev/null @@ -1,31 +0,0 @@ -//----------------------------------------------------------------------------- -// MurmurHash2 was written by Austin Appleby, and is placed in the public -// domain. The author hereby disclaims copyright to this source code. - -#pragma once - -//----------------------------------------------------------------------------- -// Platform-specific functions and macros - -// Microsoft Visual Studio - -#if defined(_MSC_VER) && (_MSC_VER < 1600) - -typedef unsigned char uint8_t; -typedef unsigned int uint32_t; -typedef unsigned __int64 uint64_t; - -// Other compilers - -#else // defined(_MSC_VER) - -#include - -#endif // !defined(_MSC_VER) - -uint32_t MurmurHash2 (const void * key, int len, uint32_t seed); -uint64_t MurmurHash64A (const void * key, int len, uint64_t seed); -uint64_t MurmurHash64B (const void * key, int len, uint64_t seed); -uint32_t MurmurHash2A (const void * key, int len, uint32_t seed); -uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed); -uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed); diff --git a/contrib/murmurhash/src/MurmurHash2.cpp b/contrib/murmurhash/src/MurmurHash2.cpp new file mode 100644 index 00000000000..cd1e53a9b92 --- /dev/null +++ b/contrib/murmurhash/src/MurmurHash2.cpp @@ -0,0 +1,523 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - This code makes a few assumptions about how your machine behaves - + +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 + +// And it has a few limitations - + +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +#include "MurmurHash2.h" + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ) +{ + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + + const uint32_t m = 0x5bd1e995; + const int r = 24; + + // Initialize the hash to a 'random' value + + uint32_t h = seed ^ len; + + // Mix 4 bytes at a time into the hash + + const unsigned char * data = (const unsigned char *)key; + + while(len >= 4) + { + uint32_t k = *(uint32_t*)data; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + // Handle the last few bytes of the input array + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + // Do a few final mixes of the hash to ensure the last few + // bytes are well-incorporated. + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHash2, 64-bit versions, by Austin Appleby + +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. + +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ) +{ + const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); + const int r = 47; + + uint64_t h = seed ^ (len * m); + + const uint64_t * data = (const uint64_t *)key; + const uint64_t * end = data + (len/8); + + while(data != end) + { + uint64_t k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + const unsigned char * data2 = (const unsigned char*)data; + + switch(len & 7) + { + case 7: h ^= uint64_t(data2[6]) << 48; + case 6: h ^= uint64_t(data2[5]) << 40; + case 5: h ^= uint64_t(data2[4]) << 32; + case 4: h ^= uint64_t(data2[3]) << 24; + case 3: h ^= uint64_t(data2[2]) << 16; + case 2: h ^= uint64_t(data2[1]) << 8; + case 1: h ^= uint64_t(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + + +// 64-bit hash for 32-bit platforms + +uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h1 = uint32_t(seed) ^ len; + uint32_t h2 = uint32_t(seed >> 32); + + const uint32_t * data = (const uint32_t *)key; + + while(len >= 8) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + + uint32_t k2 = *data++; + k2 *= m; k2 ^= k2 >> r; k2 *= m; + h2 *= m; h2 ^= k2; + len -= 4; + } + + if(len >= 4) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + } + + switch(len) + { + case 3: h2 ^= ((unsigned char*)data)[2] << 16; + case 2: h2 ^= ((unsigned char*)data)[1] << 8; + case 1: h2 ^= ((unsigned char*)data)[0]; + h2 *= m; + }; + + h1 ^= h2 >> 18; h1 *= m; + h2 ^= h1 >> 22; h2 *= m; + h1 ^= h2 >> 17; h1 *= m; + h2 ^= h1 >> 19; h2 *= m; + + uint64_t h = h1; + + h = (h << 32) | h2; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHash2A, by Austin Appleby + +// This is a variant of MurmurHash2 modified to use the Merkle-Damgard +// construction. Bulk speed should be identical to Murmur2, small-key speed +// will be 10%-20% slower due to the added overhead at the end of the hash. + +// This variant fixes a minor issue where null keys were more likely to +// collide with each other than expected, and also makes the function +// more amenable to incremental implementations. + +#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + +uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t l = len; + + const unsigned char * data = (const unsigned char *)key; + + uint32_t h = seed; + + while(len >= 4) + { + uint32_t k = *(uint32_t*)data; + + mmix(h,k); + + data += 4; + len -= 4; + } + + uint32_t t = 0; + + switch(len) + { + case 3: t ^= data[2] << 16; + case 2: t ^= data[1] << 8; + case 1: t ^= data[0]; + }; + + mmix(h,t); + mmix(h,l); + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// CMurmurHash2A, by Austin Appleby + +// This is a sample implementation of MurmurHash2A designed to work +// incrementally. + +// Usage - + +// CMurmurHash2A hasher +// hasher.Begin(seed); +// hasher.Add(data1,size1); +// hasher.Add(data2,size2); +// ... +// hasher.Add(dataN,sizeN); +// uint32_t hash = hasher.End() + +class CMurmurHash2A +{ +public: + + void Begin ( uint32_t seed = 0 ) + { + m_hash = seed; + m_tail = 0; + m_count = 0; + m_size = 0; + } + + void Add ( const unsigned char * data, int len ) + { + m_size += len; + + MixTail(data,len); + + while(len >= 4) + { + uint32_t k = *(uint32_t*)data; + + mmix(m_hash,k); + + data += 4; + len -= 4; + } + + MixTail(data,len); + } + + uint32_t End ( void ) + { + mmix(m_hash,m_tail); + mmix(m_hash,m_size); + + m_hash ^= m_hash >> 13; + m_hash *= m; + m_hash ^= m_hash >> 15; + + return m_hash; + } + +private: + + static const uint32_t m = 0x5bd1e995; + static const int r = 24; + + void MixTail ( const unsigned char * & data, int & len ) + { + while( len && ((len<4) || m_count) ) + { + m_tail |= (*data++) << (m_count * 8); + + m_count++; + len--; + + if(m_count == 4) + { + mmix(m_hash,m_tail); + m_tail = 0; + m_count = 0; + } + } + } + + uint32_t m_hash; + uint32_t m_tail; + uint32_t m_count; + uint32_t m_size; +}; + +//----------------------------------------------------------------------------- +// MurmurHashNeutral2, by Austin Appleby + +// Same as MurmurHash2, but endian- and alignment-neutral. +// Half the speed though, alas. + +uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h = seed ^ len; + + const unsigned char * data = (const unsigned char *)key; + + while(len >= 4) + { + uint32_t k; + + k = data[0]; + k |= data[1] << 8; + k |= data[2] << 16; + k |= data[3] << 24; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHashAligned2, by Austin Appleby + +// Same algorithm as MurmurHash2, but only does aligned reads - should be safer +// on certain platforms. + +// Performance will be lower than MurmurHash2 + +#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + + +uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + const unsigned char * data = (const unsigned char *)key; + + uint32_t h = seed ^ len; + + int align = (uint64_t)data & 3; + + if(align && (len >= 4)) + { + // Pre-load the temp registers + + uint32_t t = 0, d = 0; + + switch(align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + int sl = 8 * (4-align); + int sr = 8 * align; + + // Mix + + while(len >= 4) + { + d = *(uint32_t *)data; + t = (t >> sr) | (d << sl); + + uint32_t k = t; + + MIX(h,k,m); + + t = d; + + data += 4; + len -= 4; + } + + // Handle leftover data in temp registers + + d = 0; + + if(len >= align) + { + switch(align) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + } + + uint32_t k = (t >> sr) | (d << sl); + MIX(h,k,m); + + data += align; + len -= align; + + //---------- + // Handle tail bytes + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + } + else + { + switch(len) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: h ^= (t >> sr) | (d << sl); + h *= m; + } + } + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } + else + { + while(len >= 4) + { + uint32_t k = *(uint32_t *)data; + + MIX(h,k,m); + + data += 4; + len -= 4; + } + + //---------- + // Handle tail bytes + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } +} + +//----------------------------------------------------------------------------- + diff --git a/contrib/murmurhash/src/murmurhash3.cpp b/contrib/murmurhash/src/MurmurHash3.cpp similarity index 97% rename from contrib/murmurhash/src/murmurhash3.cpp rename to contrib/murmurhash/src/MurmurHash3.cpp index d6062340d03..aa7982d3eef 100644 --- a/contrib/murmurhash/src/murmurhash3.cpp +++ b/contrib/murmurhash/src/MurmurHash3.cpp @@ -1,3 +1,4 @@ +//----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. @@ -6,8 +7,7 @@ // compile and run any of them on any platform, but your performance with the // non-native version will be less than optimal. -#include "murmurhash3.h" -#include +#include "MurmurHash3.h" //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -54,9 +54,7 @@ inline uint64_t rotl64 ( uint64_t x, int8_t r ) FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) { - uint32_t res; - memcpy(&res, p + i, sizeof(res)); - return res; + return p[i]; } FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) @@ -332,3 +330,6 @@ void MurmurHash3_x64_128 ( const void * key, const int len, ((uint64_t*)out)[0] = h1; ((uint64_t*)out)[1] = h2; } + +//----------------------------------------------------------------------------- + diff --git a/contrib/murmurhash/src/murmurhash2.cpp b/contrib/murmurhash/src/murmurhash2.cpp deleted file mode 100644 index 7b659f50b4c..00000000000 --- a/contrib/murmurhash/src/murmurhash2.cpp +++ /dev/null @@ -1,423 +0,0 @@ -// MurmurHash2 was written by Austin Appleby, and is placed in the public -// domain. The author hereby disclaims copyright to this source code. - -// Note - This code makes a few assumptions about how your machine behaves - - -// 1. We can read a 4-byte value from any address without crashing -// 2. sizeof(int) == 4 - -// And it has a few limitations - - -// 1. It will not work incrementally. -// 2. It will not produce the same results on little-endian and big-endian -// machines. - -#include "murmurhash2.h" -#include - -// Platform-specific functions and macros -// Microsoft Visual Studio - -#if defined(_MSC_VER) - -#define BIG_CONSTANT(x) (x) - -// Other compilers - -#else // defined(_MSC_VER) - -#define BIG_CONSTANT(x) (x##LLU) - -#endif // !defined(_MSC_VER) - - -uint32_t MurmurHash2(const void * key, int len, uint32_t seed) -{ - // 'm' and 'r' are mixing constants generated offline. - // They're not really 'magic', they just happen to work well. - - const uint32_t m = 0x5bd1e995; - const int r = 24; - - // Initialize the hash to a 'random' value - - uint32_t h = seed ^ len; - - // Mix 4 bytes at a time into the hash - - const unsigned char * data = reinterpret_cast(key); - - while (len >= 4) - { - uint32_t k; - memcpy(&k, data, sizeof(k)); - k *= m; - k ^= k >> r; - k *= m; - - h *= m; - h ^= k; - - data += 4; - len -= 4; - } - - // Handle the last few bytes of the input array - - switch (len) - { - case 3: h ^= data[2] << 16; - case 2: h ^= data[1] << 8; - case 1: h ^= data[0]; - h *= m; - }; - - // Do a few final mixes of the hash to ensure the last few - // bytes are well-incorporated. - - h ^= h >> 13; - h *= m; - h ^= h >> 15; - - return h; -} - -// MurmurHash2, 64-bit versions, by Austin Appleby - -// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment -// and endian-ness issues if used across multiple platforms. - -// 64-bit hash for 64-bit platforms - -uint64_t MurmurHash64A(const void * key, int len, uint64_t seed) -{ - const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); - const int r = 47; - - uint64_t h = seed ^ (len * m); - - const uint64_t * data = reinterpret_cast(key); - const uint64_t * end = data + (len/8); - - while (data != end) - { - uint64_t k = *data++; - - k *= m; - k ^= k >> r; - k *= m; - - h ^= k; - h *= m; - } - - const unsigned char * data2 = reinterpret_cast(data); - - switch (len & 7) - { - case 7: h ^= static_cast(data2[6]) << 48; - case 6: h ^= static_cast(data2[5]) << 40; - case 5: h ^= static_cast(data2[4]) << 32; - case 4: h ^= static_cast(data2[3]) << 24; - case 3: h ^= static_cast(data2[2]) << 16; - case 2: h ^= static_cast(data2[1]) << 8; - case 1: h ^= static_cast(data2[0]); - h *= m; - }; - - h ^= h >> r; - h *= m; - h ^= h >> r; - - return h; -} - - -// 64-bit hash for 32-bit platforms - -uint64_t MurmurHash64B(const void * key, int len, uint64_t seed) -{ - const uint32_t m = 0x5bd1e995; - const int r = 24; - - uint32_t h1 = static_cast(seed) ^ len; - uint32_t h2 = static_cast(seed >> 32); - - const uint32_t * data = reinterpret_cast(key); - - while (len >= 8) - { - uint32_t k1 = *data++; - k1 *= m; k1 ^= k1 >> r; k1 *= m; - h1 *= m; h1 ^= k1; - len -= 4; - - uint32_t k2 = *data++; - k2 *= m; k2 ^= k2 >> r; k2 *= m; - h2 *= m; h2 ^= k2; - len -= 4; - } - - if (len >= 4) - { - uint32_t k1 = *data++; - k1 *= m; k1 ^= k1 >> r; k1 *= m; - h1 *= m; h1 ^= k1; - len -= 4; - } - - switch (len) - { - case 3: h2 ^= reinterpret_cast(data)[2] << 16; - case 2: h2 ^= reinterpret_cast(data)[1] << 8; - case 1: h2 ^= reinterpret_cast(data)[0]; - h2 *= m; - }; - - h1 ^= h2 >> 18; h1 *= m; - h2 ^= h1 >> 22; h2 *= m; - h1 ^= h2 >> 17; h1 *= m; - h2 ^= h1 >> 19; h2 *= m; - - uint64_t h = h1; - - h = (h << 32) | h2; - - return h; -} - -// MurmurHash2A, by Austin Appleby - -// This is a variant of MurmurHash2 modified to use the Merkle-Damgard -// construction. Bulk speed should be identical to Murmur2, small-key speed -// will be 10%-20% slower due to the added overhead at the end of the hash. - -// This variant fixes a minor issue where null keys were more likely to -// collide with each other than expected, and also makes the function -// more amenable to incremental implementations. - -#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } - -uint32_t MurmurHash2A(const void * key, int len, uint32_t seed) -{ - const uint32_t m = 0x5bd1e995; - const int r = 24; - uint32_t l = len; - - const unsigned char * data = reinterpret_cast(key); - - uint32_t h = seed; - - while (len >= 4) - { - uint32_t k = *reinterpret_cast(data); - mmix(h,k); - data += 4; - len -= 4; - } - - uint32_t t = 0; - - switch (len) - { - case 3: t ^= data[2] << 16; - case 2: t ^= data[1] << 8; - case 1: t ^= data[0]; - }; - - mmix(h,t); - mmix(h,l); - - h ^= h >> 13; - h *= m; - h ^= h >> 15; - - return h; -} - -// MurmurHashNeutral2, by Austin Appleby - -// Same as MurmurHash2, but endian- and alignment-neutral. -// Half the speed though, alas. - -uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed) -{ - const uint32_t m = 0x5bd1e995; - const int r = 24; - - uint32_t h = seed ^ len; - - const unsigned char * data = reinterpret_cast(key); - - while (len >= 4) - { - uint32_t k; - - k = data[0]; - k |= data[1] << 8; - k |= data[2] << 16; - k |= data[3] << 24; - - k *= m; - k ^= k >> r; - k *= m; - - h *= m; - h ^= k; - - data += 4; - len -= 4; - } - - switch (len) - { - case 3: h ^= data[2] << 16; - case 2: h ^= data[1] << 8; - case 1: h ^= data[0]; - h *= m; - }; - - h ^= h >> 13; - h *= m; - h ^= h >> 15; - - return h; -} - -//----------------------------------------------------------------------------- -// MurmurHashAligned2, by Austin Appleby - -// Same algorithm as MurmurHash2, but only does aligned reads - should be safer -// on certain platforms. - -// Performance will be lower than MurmurHash2 - -#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } - - -uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed) -{ - const uint32_t m = 0x5bd1e995; - const int r = 24; - - const unsigned char * data = reinterpret_cast(key); - - uint32_t h = seed ^ len; - - int align = reinterpret_cast(data) & 3; - - if (align && (len >= 4)) - { - // Pre-load the temp registers - - uint32_t t = 0, d = 0; - - switch (align) - { - case 1: t |= data[2] << 16; - case 2: t |= data[1] << 8; - case 3: t |= data[0]; - } - - t <<= (8 * align); - - data += 4-align; - len -= 4-align; - - int sl = 8 * (4-align); - int sr = 8 * align; - - // Mix - - while (len >= 4) - { - d = *(reinterpret_cast(data)); - t = (t >> sr) | (d << sl); - - uint32_t k = t; - - MIX(h,k,m); - - t = d; - - data += 4; - len -= 4; - } - - // Handle leftover data in temp registers - - d = 0; - - if (len >= align) - { - switch (align) - { - case 3: d |= data[2] << 16; - case 2: d |= data[1] << 8; - case 1: d |= data[0]; - } - - uint32_t k = (t >> sr) | (d << sl); - MIX(h,k,m); - - data += align; - len -= align; - - //---------- - // Handle tail bytes - - switch (len) - { - case 3: h ^= data[2] << 16; - case 2: h ^= data[1] << 8; - case 1: h ^= data[0]; - h *= m; - }; - } - else - { - switch (len) - { - case 3: d |= data[2] << 16; - case 2: d |= data[1] << 8; - case 1: d |= data[0]; - case 0: h ^= (t >> sr) | (d << sl); - h *= m; - } - } - - h ^= h >> 13; - h *= m; - h ^= h >> 15; - - return h; - } - else - { - while (len >= 4) - { - uint32_t k = *reinterpret_cast(data); - - MIX(h,k,m); - - data += 4; - len -= 4; - } - - // Handle tail bytes - - switch (len) - { - case 3: h ^= data[2] << 16; - case 2: h ^= data[1] << 8; - case 1: h ^= data[0]; - h *= m; - }; - - h ^= h >> 13; - h *= m; - h ^= h >> 15; - - return h; - } -} diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index 690991759a3..311d89590e0 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -4,8 +4,8 @@ #include #include #if !defined(ARCADIA_BUILD) -# include -# include +# include +# include # include "config_functions.h" # include "config_core.h" #endif From 37180e084276b6573fc6382ce5b33f6384e2311c Mon Sep 17 00:00:00 2001 From: "Matwey V. Kornilov" Date: Thu, 17 Jun 2021 13:14:56 +0300 Subject: [PATCH 185/931] Add missed #include std::optional<> is used multiple times here. Signed-off-by: Matwey V. Kornilov --- src/Core/NamesAndTypes.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/NamesAndTypes.h b/src/Core/NamesAndTypes.h index fc86c7f6a1d..0c871f08d8b 100644 --- a/src/Core/NamesAndTypes.h +++ b/src/Core/NamesAndTypes.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include From ff3857fbe73d354bf446d85466b127462212e5b3 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Thu, 17 Jun 2021 12:24:10 +0300 Subject: [PATCH 186/931] Add root parsing --- src/Functions/JSONPath/ASTs/ASTJSONPathRoot.h | 15 ++++++++ .../JSONPath/Generators/GeneratorJSONPath.h | 21 ++++++++--- src/Functions/JSONPath/Generators/IVisitor.h | 4 +++ .../JSONPath/Generators/VisitorJSONPathRoot.h | 35 +++++++++++++++++++ .../JSONPath/Parsers/ParserJSONPathQuery.cpp | 28 +++++++-------- .../JSONPath/Parsers/ParserJSONPathRoot.cpp | 28 +++++++++++++++ .../JSONPath/Parsers/ParserJSONPathRoot.h | 18 ++++++++++ src/Parsers/Lexer.cpp | 3 +- .../01889_sql_json_functions.reference | 5 +++ .../0_stateless/01889_sql_json_functions.sql | 5 +++ 10 files changed, 143 insertions(+), 19 deletions(-) create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPathRoot.h create mode 100644 src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathRoot.h diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathRoot.h b/src/Functions/JSONPath/ASTs/ASTJSONPathRoot.h new file mode 100644 index 00000000000..1c6469c5b75 --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathRoot.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace DB +{ +class ASTJSONPathRoot : public IAST +{ +public: + String getID(char) const override { return "ASTJSONPathRoot"; } + + ASTPtr clone() const override { return std::make_shared(*this); } +}; + +} diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 39f385fafc2..2583ef8c921 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -19,6 +20,10 @@ template class GeneratorJSONPath : public IGenerator { public: + /** + * Traverses children ASTs of ASTJSONPathQuery and creates a vector of corresponding visitors + * @param query_ptr_ pointer to ASTJSONPathQuery + */ GeneratorJSONPath(ASTPtr query_ptr_) { query_ptr = query_ptr_; @@ -31,13 +36,15 @@ public: for (auto child_ast : query->children) { - if (typeid_cast(child_ast.get())) + if (typeid_cast(child_ast.get())) { + visitors.push_back(std::make_shared>(child_ast)); + } + else if (typeid_cast(child_ast.get())) { visitors.push_back(std::make_shared>(child_ast)); } - else if (child_ast->getID() == "ASTJSONPathRange") + else if (typeid_cast(child_ast.get())) { - visitors.push_back(std::make_shared>(child_ast)); } } @@ -46,7 +53,13 @@ public: const char * getName() const override { return "GeneratorJSONPath"; } /** - * The only generator which is called from JSONPath functions. + * This method exposes API of traversing all paths, described by JSONPath, + * to SQLJSON Functions. + * Expected usage is to iteratively call this method from inside the function + * and to execute custom logic with received element or handle an error. + * On each such call getNextItem will yield next item into element argument + * and modify its internal state to prepare for next call. + * * @param element root of JSON document * @return is the generator exhausted */ diff --git a/src/Functions/JSONPath/Generators/IVisitor.h b/src/Functions/JSONPath/Generators/IVisitor.h index d9917087cb0..1461b842829 100644 --- a/src/Functions/JSONPath/Generators/IVisitor.h +++ b/src/Functions/JSONPath/Generators/IVisitor.h @@ -36,6 +36,10 @@ public: virtual ~IVisitor() = default; private: + /** + * This variable is for detecting whether a visitor's next visit will be able + * to yield a new item. + */ bool is_exhausted = false; }; diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h b/src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h new file mode 100644 index 00000000000..d8b88ce0255 --- /dev/null +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ +template +class VisitorJSONPathRoot : public IVisitor +{ +public: + VisitorJSONPathRoot(ASTPtr) { } + + const char * getName() const override { return "VisitorJSONPathRoot"; } + + VisitorStatus apply(typename JSONParser::Element & /*element*/) const override + { + /// No-op on document, since we are already passed document's root + return VisitorStatus::Ok; + } + + VisitorStatus visit(typename JSONParser::Element & element) override + { + apply(element); + this->setExhausted(true); + return VisitorStatus::Ok; + } + + void reinitialize() override { this->setExhausted(false); } + + void updateState() override { } +}; + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp index 0f171c0a82c..0ab09733890 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp @@ -1,6 +1,7 @@ #include -#include #include +#include +#include #include namespace DB @@ -18,27 +19,26 @@ bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expect query = std::make_shared(); ParserJSONPathMemberAccess parser_jsonpath_member_access; ParserJSONPathRange parser_jsonpath_range; + ParserJSONPathRoot parser_jsonpath_root; - if (pos->type != TokenType::DollarSign) - { + ASTPtr path_root; + if (!parser_jsonpath_root.parse(pos, path_root, expected)) { return false; } - ++pos; + query->children.push_back(path_root); - bool res = false; - ASTPtr subquery; - while (parser_jsonpath_member_access.parse(pos, subquery, expected) - || parser_jsonpath_range.parse(pos, subquery, expected)) + ASTPtr accessor; + while (parser_jsonpath_member_access.parse(pos, accessor, expected) + || parser_jsonpath_range.parse(pos, accessor, expected)) { - if (subquery) + if (accessor) { - query->children.push_back(subquery); - subquery = nullptr; + query->children.push_back(accessor); + accessor = nullptr; } - res = true; } - /// if we had at least one success and no fails - return res && pos->type == TokenType::EndOfStream; + /// parsing was successful if we reached the end of query by this point + return pos->type == TokenType::EndOfStream; } } diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp new file mode 100644 index 00000000000..a67d284e40c --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp @@ -0,0 +1,28 @@ +#include +#include + +#include + +namespace DB +{ +/** + * + * @param pos token iterator + * @param node node of ASTJSONPathRoot + * @param expected stuff for logging + * @return was parse successful + */ +bool ParserJSONPathRoot::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + if (pos->type != TokenType::DollarSign) + { + expected.add(pos, "dollar sign (start of jsonpath)"); + return false; + } + auto path_root = std::make_shared(); + node = path_root; + ++pos; + return true; +} + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.h b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.h new file mode 100644 index 00000000000..59fed28d63e --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.h @@ -0,0 +1,18 @@ +#pragma once + +#include + + +namespace DB +{ +class ParserJSONPathRoot : public IParserBase +{ +private: + const char * getName() const override { return "ParserJSONPathRoot"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +public: + explicit ParserJSONPathRoot() = default; +}; + +} diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index 4de72ebc2fd..be956ee705a 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -338,8 +338,9 @@ Token Lexer::nextTokenImpl() } default: - if (*pos == '$' && pos + 1 < end && !isWordCharASCII(pos[1])) + if (*pos == '$' && ((pos + 1 < end && !isWordCharASCII(pos[1])) || pos + 1 == end)) { + /// Capture standalone dollar sign return Token(TokenType::DollarSign, token_begin, ++pos); } if (isWordCharASCII(*pos) || *pos == '$') diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index 7457aca18ed..e38058ffc50 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -1,4 +1,5 @@ --JSON_VALUE-- + 1 1.2 true @@ -9,6 +10,7 @@ null --JSON_QUERY-- +[{"hello":1}] [1] [1.2] [true] @@ -20,6 +22,9 @@ null --JSON_EXISTS-- 1 +0 +1 +1 1 0 1 diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 6d67b73f305..a1749b3be24 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -1,4 +1,5 @@ SELECT '--JSON_VALUE--'; +SELECT JSON_VALUE('$', '{"hello":1}'); -- root is a complex object => default value (empty string) SELECT JSON_VALUE('$.hello', '{"hello":1}'); SELECT JSON_VALUE('$.hello', '{"hello":1.2}'); SELECT JSON_VALUE('$.hello', '{"hello":true}'); @@ -10,6 +11,7 @@ SELECT JSON_VALUE('$.hello', '{hello:world}'); -- invalid json => default value SELECT JSON_VALUE('$.hello', ''); SELECT '--JSON_QUERY--'; +SELECT JSON_QUERY('$', '{"hello":1}'); SELECT JSON_QUERY('$.hello', '{"hello":1}'); SELECT JSON_QUERY('$.hello', '{"hello":1.2}'); SELECT JSON_QUERY('$.hello', '{"hello":true}'); @@ -21,6 +23,9 @@ SELECT JSON_QUERY('$.hello', '{hello:{"world":"!"}}}'); -- invalid json => defau SELECT JSON_QUERY('$.hello', ''); SELECT '--JSON_EXISTS--'; +SELECT JSON_EXISTS('$', '{"hello":1}'); +SELECT JSON_EXISTS('$', ''); +SELECT JSON_EXISTS('$', '{}'); SELECT JSON_EXISTS('$.hello', '{"hello":1}'); SELECT JSON_EXISTS('$.world', '{"hello":1,"world":2}'); SELECT JSON_EXISTS('$.world', '{"hello":{"world":1}}'); From aa1ac2a5061745180e008bfa48f8e69afcb22caa Mon Sep 17 00:00:00 2001 From: "Matwey V. Kornilov" Date: Thu, 17 Jun 2021 14:41:28 +0300 Subject: [PATCH 187/931] Add missed #include This is required for std::optional language; Signed-off-by: Matwey V. Kornilov --- src/Columns/Collator.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Columns/Collator.h b/src/Columns/Collator.h index df60aaba434..93748f21f7f 100644 --- a/src/Columns/Collator.h +++ b/src/Columns/Collator.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include From f44d6792b07733c39980f4b0ff633610ccd4e3fa Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Thu, 17 Jun 2021 14:48:05 +0300 Subject: [PATCH 188/931] Fix ext::range --- src/Functions/FunctionSQLJSON.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index b13b03cb089..b605645499e 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include #if !defined(ARCADIA_BUILD) # include "config_functions.h" @@ -133,7 +133,7 @@ public: /// Parse JSON for every row Impl impl; - for (const auto i : ext::range(0, input_rows_count)) + for (const auto i : collections::range(0, input_rows_count)) { std::string_view json{ reinterpret_cast(&chars_json[offsets_json[i - 1]]), offsets_json[i] - offsets_json[i - 1] - 1}; From d513d14b2c3367111f8784656c720cf2bc737ed1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 17 Jun 2021 16:29:11 +0300 Subject: [PATCH 189/931] Add some functions for data conversion --- src/Coordination/ZooKeeperDataReader.cpp | 516 +++++++++++++++++++ src/Coordination/ZooKeeperDataReader.h | 17 + src/Coordination/ZooKeeperSnapshotReader.cpp | 183 ------- src/Coordination/ZooKeeperSnapshotReader.h | 23 - 4 files changed, 533 insertions(+), 206 deletions(-) create mode 100644 src/Coordination/ZooKeeperDataReader.cpp create mode 100644 src/Coordination/ZooKeeperDataReader.h delete mode 100644 src/Coordination/ZooKeeperSnapshotReader.cpp delete mode 100644 src/Coordination/ZooKeeperSnapshotReader.h diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp new file mode 100644 index 00000000000..60882993c0f --- /dev/null +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -0,0 +1,516 @@ +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int CORRUPTED_DATA; +} + +static String parentPath(const String & path) +{ + auto rslash_pos = path.rfind('/'); + if (rslash_pos > 0) + return path.substr(0, rslash_pos); + return "/"; +} + +static std::string getBaseName(const String & path) +{ + size_t basename_start = path.rfind('/'); + return std::string{&path[basename_start + 1], path.length() - basename_start - 1}; +} + +int64_t getZxidFromName(const std::string & filename) +{ + std::filesystem::path path(filename); + std::string extension = path.extension(); + char * end; + int64_t zxid = std::strtoul(extension.data() + 1, &end, 16); + return zxid; +} + +void deserializeSnapshotMagic(ReadBuffer & in) +{ + int32_t magic_header, version; + int64_t dbid; + Coordination::read(magic_header, in); + Coordination::read(version, in); + Coordination::read(dbid, in); + static constexpr int32_t SNP_HEADER = 1514885966; /// "ZKSN" + if (magic_header != SNP_HEADER) + throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", SNP_HEADER, magic_header); +} + +int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in) +{ + int32_t count; + Coordination::read(count, in); + int64_t max_session_id = 0; + while (count > 0) + { + int64_t session_id; + int32_t timeout; + + Coordination::read(session_id, in); + Coordination::read(timeout, in); + storage.addSessionID(session_id, timeout); + max_session_id = std::max(session_id, max_session_id); + count--; + } + return max_session_id; +} + +void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) +{ + int32_t count; + Coordination::read(count, in); + while (count > 0) + { + int64_t map_index; + Coordination::read(map_index, in); + + Coordination::ACLs acls; + int32_t acls_len; + Coordination::read(acls_len, in); + + while (acls_len > 0) + { + Coordination::ACL acl; + Coordination::read(acl.permissions, in); + Coordination::read(acl.scheme, in); + Coordination::read(acl.id, in); + acls.push_back(acl); + acls_len--; + } + storage.acl_map.addMapping(map_index, acls); + + count--; + } +} + +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) +{ + int64_t max_zxid = 0; + std::string path; + Coordination::read(path, in); + size_t count = 0; + while (path != "/") + { + KeeperStorage::Node node{}; + Coordination::read(node.data, in); + Coordination::read(node.acl_id, in); + + /// Deserialize stat + Coordination::read(node.stat.czxid, in); + Coordination::read(node.stat.mzxid, in); + /// For some reason ZXID specified in filename can be smaller + /// then actual zxid from nodes. In this case we will use zxid from nodes. + max_zxid = std::max(max_zxid, node.stat.mzxid); + + Coordination::read(node.stat.ctime, in); + Coordination::read(node.stat.mtime, in); + Coordination::read(node.stat.version, in); + Coordination::read(node.stat.cversion, in); + Coordination::read(node.stat.aversion, in); + Coordination::read(node.stat.ephemeralOwner, in); + Coordination::read(node.stat.pzxid, in); + if (!path.empty()) + { + node.stat.dataLength = node.data.length(); + node.seq_num = node.stat.cversion; + storage.container.insertOrReplace(path, node); + + if (node.stat.ephemeralOwner != 0) + storage.ephemerals[node.stat.ephemeralOwner].insert(path); + + storage.acl_map.addUsage(node.acl_id); + } + Coordination::read(path, in); + count++; + if (count % 1000 == 0) + std::cerr << "Deserialized nodes from snapshot:" << count << std::endl; + } + + for (const auto & itr : storage.container) + { + if (itr.key != "/") + { + auto parent_path = parentPath(itr.key); + storage.container.updateValue(parent_path, [&path = itr.key] (KeeperStorage::Node & value) { value.children.insert(getBaseName(path)); value.stat.numChildren++; }); + } + } + + return max_zxid; +} + +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path) +{ + int64_t zxid = getZxidFromName(snapshot_path); + + ReadBufferFromFile reader(snapshot_path); + + deserializeSnapshotMagic(reader); + auto max_session_id = deserializeSessionAndTimeout(storage, reader); + + storage.session_id_counter = max_session_id; + deserializeACLMap(storage, reader); + + int64_t zxid_from_nodes = deserializeStorageData(storage, reader); + storage.zxid = std::max(zxid, zxid_from_nodes); +} + +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path) +{ + namespace fs = std::filesystem; + std::map existing_snapshots; + for (const auto & p : fs::directory_iterator(path)) + { + const auto & log_path = p.path(); + if (!log_path.has_filename() || !startsWith(log_path.filename(), "snapshot.")) + continue; + int64_t zxid = getZxidFromName(log_path); + existing_snapshots[zxid] = p.path(); + } + /// deserialize only from latest snapshot + if (!existing_snapshots.empty()) + deserializeKeeperStorageFromSnapshot(storage, existing_snapshots.rbegin()->second); +} + +void deserializeLogMagic(ReadBuffer & in) +{ + int32_t magic_header, version; + int64_t dbid; + Coordination::read(magic_header, in); + Coordination::read(version, in); + Coordination::read(dbid, in); + + static constexpr int32_t LOG_HEADER = 1514884167; /// "ZKLG" + if (magic_header != LOG_HEADER) + throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header); +} + + +/// For some reason zookeeper stores slightly different records in log then +/// requests. For example: +/// class CreateTxn { +/// ustring path; +/// buffer data; +/// vector acl; +/// boolean ephemeral; +/// int parentCVersion; +/// } +/// But Create Request: +/// class CreateRequest { +/// ustring path; +/// buffer data; +/// vector acl; +/// int flags; +/// } +/// +/// However type is the same OpNum... +/// +/// Also there is a comment in ZooKeeper's code base about log structure, but +/// it's almost completely incorrect. Actual ZooKeeper log structure starting from version 3.6+: +/// +/// Magic Header: "ZKLG" + 4 byte version + 8 byte dbid. +/// After that goes serialized transactions, in the following format: +/// 8 byte checksum +/// 4 byte transaction length +/// 8 byte session_id (author of the transaction) +/// 4 byte user XID +/// 8 byte ZXID +/// 8 byte transaction time +/// 4 byte transaction type (OpNum) +/// [Transaction body depending on transaction type] +/// 12 bytes tail (starting from 3.6+): 4 byte version + 8 byte checksum of data tree +/// 1 byte -- 0x42 +/// +/// Transaction body is quite simple for all kinds of transactions except +/// Multitransactions. Their structure is following: +/// 4 byte sub transactions count +/// 4 byte sub transaction length +/// [Transaction body depending on transaction type] +/// and so on +/// +/// Gotchas: +/// +/// 1) For some reason ZooKeeper store ErrorTxn's in log. It's +/// reasonable for Multitransactions, but why they store standalone errors +/// is not clear. +/// +/// 2) For some reason there is no 12 bytes tail (version + checksum of +/// tree) after standalone ErrorTxn. +/// +/// 3) The most strange thing: In one of our production logs (about 1.2GB +/// size) we have found Multitransaction with two sub transactions: Error1 +/// and Error2, both -1 OpCode. Normal Error transaction has 4 bytes length +/// (for error code), but the Error1 has 550 bytes length. What is more +/// strange, that this 550 bytes obviously was a part of Create transaction, +/// but the operation code was -1. We have added debug prints to original +/// zookeeper (3.6.3) and found that it just reads 550 bytes of this "Error" +/// transaction, tooks the first 4 bytes as an error code (it was 79, non +/// existing code) and skip all remaining 546 bytes. NOTE: it looks like a bug +/// in ZooKeeper. +/// +namespace +{ + +Coordination::ZooKeeperRequestPtr deserializeCreateTxn(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + Coordination::read(result->path, in); + Coordination::read(result->data, in); + Coordination::read(result->acls, in); + Coordination::read(result->is_ephemeral, in); + /// How we should use it? It should just increment on request execution + int32_t parent_c_version; + Coordination::read(parent_c_version, in); + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeDeleteTxn(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + Coordination::read(result->path, in); + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeSetTxn(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + Coordination::read(result->path, in); + Coordination::read(result->data, in); + Coordination::read(result->version, in); + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeCheckVersionTxn(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + Coordination::read(result->path, in); + Coordination::read(result->version, in); + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeCreateSession(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + int32_t timeout; + Coordination::read(timeout, in); + result->session_timeout_ms = timeout; + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeCloseSession(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + std::vector data; + Coordination::read(data, in); + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeErrorTxn(ReadBuffer & in) +{ + int32_t error; + Coordination::read(error, in); + return nullptr; +} + +Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in); + +Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn) +{ + int32_t type; + Coordination::read(type, in); + Coordination::ZooKeeperRequestPtr result; + int32_t sub_txn_length = 0; + if (subtxn) + Coordination::read(sub_txn_length, in); + + int64_t in_count_before = in.count(); + + switch (type) + { + case 1: + result = deserializeCreateTxn(in); + break; + case 2: + result = deserializeDeleteTxn(in); + break; + case 5: + result = deserializeSetTxn(in); + break; + case 13: + result = deserializeCheckVersionTxn(in); + break; + case 14: + result = deserializeMultiTxn(in); + break; + case -10: + result = deserializeCreateSession(in); + break; + case -11: + result = deserializeCloseSession(in); + break; + case -1: + result = deserializeErrorTxn(in); + break; + default: + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented operation {}", type); + } + + if (subtxn) + { + int64_t bytes_read = in.count() - in_count_before; + if (bytes_read < sub_txn_length) + in.ignore(sub_txn_length - bytes_read); + } + + return result; +} + +Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in) +{ + int32_t length; + Coordination::read(length, in); + + std::shared_ptr result = std::make_shared(); + bool error_found = false; + while (length > 0) + { + auto subrequest = deserializeTxnImpl(in, true); + if (subrequest) + result->requests.push_back(subrequest); + else + error_found = true; + length--; + } + return result; +} + +bool isErrorRequest(Coordination::ZooKeeperRequestPtr request) +{ + return request == nullptr; +} + +bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) +{ + for (const auto & subrequest : dynamic_cast(request.get())->requests) + if (dynamic_cast(subrequest.get())->getOpNum() == Coordination::OpNum::Error) + return true; + return false; +} + +} + +bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in) +{ + int64_t checksum; + Coordination::read(checksum, in); + /// Zero padding is possible until file end + if (checksum == 0) + return false; + + int32_t txn_len; + Coordination::read(txn_len, in); + int64_t count_before = in.count(); + int64_t session_id; + Coordination::read(session_id, in); + int32_t xid; + Coordination::read(xid, in); + int64_t zxid; + Coordination::read(zxid, in); + int64_t time; + Coordination::read(time, in); + + Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false); + + /// For Error requests ZooKeeper doesn't store version + tree_digest + if (!isErrorRequest(request)) + { + int32_t version; + int64_t tree_digest; + Coordination::read(version, in); + Coordination::read(tree_digest, in); + } + + int64_t bytes_read = in.count() - count_before; + if (bytes_read < txn_len) + in.ignore(txn_len - bytes_read); + + /// We don't need to apply error requests + if (isErrorRequest(request)) + return true; + + request->xid = xid; + + if (zxid > storage.zxid) + { + /// Separate processing of session id requests + if (request->getOpNum() == Coordination::OpNum::SessionID) + { + const Coordination::ZooKeeperSessionIDRequest & session_id_request = dynamic_cast(*request); + storage.getSessionID(session_id_request.session_timeout_ms); + } + else + { + /// Skip failed multirequests + if (request->getOpNum() == Coordination::OpNum::Multi && hasErrorsInMultiRequest(request)) + return true; + + storage.processRequest(request, session_id, zxid); + } + } + + return true; +} + +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path) +{ + ReadBufferFromFile reader(log_path); + deserializeLogMagic(reader); + size_t counter = 0; + while (!reader.eof() && deserializeTxn(storage, reader)) + { + counter++; + if (counter % 1000 == 0) + std::cerr << "Deserialized from log: " << counter << std::endl; + + int8_t forty_two; + Coordination::read(forty_two, reader); + if (forty_two != 0x42) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Forty two check byte ({}) is not equal 0x42", forty_two); + } +} + +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path) +{ + namespace fs = std::filesystem; + std::map existing_logs; + for (const auto & p : fs::directory_iterator(path)) + { + const auto & log_path = p.path(); + if (!log_path.has_filename() || !startsWith(log_path.filename(), "log.")) + continue; + int64_t zxid = getZxidFromName(log_path); + existing_logs[zxid] = p.path(); + } + + for (auto [zxid, log_path] : existing_logs) + { + if (zxid > storage.zxid) + deserializeLogAndApplyToStorage(storage, log_path); + } +} + +} diff --git a/src/Coordination/ZooKeeperDataReader.h b/src/Coordination/ZooKeeperDataReader.h new file mode 100644 index 00000000000..2716c9487b3 --- /dev/null +++ b/src/Coordination/ZooKeeperDataReader.h @@ -0,0 +1,17 @@ +#pragma once +#include +#include +#include + +namespace DB +{ + +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log = nullptr); + +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log = nullptr); + +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log = nullptr); + +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log = nullptr); + +} diff --git a/src/Coordination/ZooKeeperSnapshotReader.cpp b/src/Coordination/ZooKeeperSnapshotReader.cpp deleted file mode 100644 index df758f870ee..00000000000 --- a/src/Coordination/ZooKeeperSnapshotReader.cpp +++ /dev/null @@ -1,183 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -static String parentPath(const String & path) -{ - auto rslash_pos = path.rfind('/'); - if (rslash_pos > 0) - return path.substr(0, rslash_pos); - return "/"; -} - -static std::string getBaseName(const String & path) -{ - size_t basename_start = path.rfind('/'); - return std::string{&path[basename_start + 1], path.length() - basename_start - 1}; -} - -int64_t getZxidFromName(const std::string & filename) -{ - std::filesystem::path path(filename); - std::string extension = path.extension(); - //std::cerr << "Extension:" << extension << std::endl; - char * end; - int64_t zxid = std::strtoul(extension.data() + 1, &end, 16); - return zxid; -} - -void deserializeMagic(ReadBuffer & in) -{ - int32_t magic_header, version; - int64_t dbid; - Coordination::read(magic_header, in); - Coordination::read(version, in); - Coordination::read(dbid, in); - //const char * data = "ZKSN"; - //std::cerr << "Expected Hedader:" << *reinterpret_cast(data) << std::endl; - //std::cerr << "MAGIC HEADER:" << magic_header << std::endl; - //std::cerr << "VERSION:" << version << std::endl; - //std::cerr << "DBID:" << dbid << std::endl; -} - -int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in) -{ - int32_t count; - Coordination::read(count, in); - //std::cerr << "Total session and timeout:" << count << std::endl; - int64_t max_session_id = 0; - while (count > 0) - { - int64_t session_id; - int32_t timeout; - - Coordination::read(session_id, in); - Coordination::read(timeout, in); - //std::cerr << "Session id:" << session_id << std::endl; - //std::cerr << "Timeout:" << timeout << std::endl; - storage.addSessionID(session_id, timeout); - max_session_id = std::max(session_id, max_session_id); - count--; - } - std::cerr << "Done deserializing sessions\n"; - return max_session_id; -} - -void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) -{ - int32_t count; - Coordination::read(count, in); - //std::cerr << "ACLs Count:" << count << "\n"; - while (count > 0) - { - int64_t map_index; - Coordination::read(map_index, in); - //std::cerr << "Map index:" << map_index << "\n"; - - Coordination::ACLs acls; - int32_t acls_len; - Coordination::read(acls_len, in); - - //std::cerr << "ACLs len:" << acls_len << "\n"; - while (acls_len > 0) - { - Coordination::ACL acl; - Coordination::read(acl.permissions, in); - Coordination::read(acl.scheme, in); - Coordination::read(acl.id, in); - //std::cerr << "ACL perms:" << acl.permissions << "\n"; - //std::cerr << "ACL scheme:" << acl.scheme << "\n"; - //std::cerr << "ACL id:" << acl.id << "\n"; - acls.push_back(acl); - acls_len--; - } - storage.acl_map.addMapping(map_index, acls); - - count--; - } - std::cerr << "Done deserializing ACLs Total" << count << "\n"; -} - -int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) -{ - int64_t max_zxid = 0; - std::string path; - Coordination::read(path, in); - //std::cerr << "Read path FIRST length:" << path.length() << std::endl; - //std::cerr << "Read path FIRST data:" << path << std::endl; - size_t count = 0; - while (path != "/") - { - KeeperStorage::Node node{}; - Coordination::read(node.data, in); - Coordination::read(node.acl_id, in); - - /// Deserialize stat - Coordination::read(node.stat.czxid, in); - Coordination::read(node.stat.mzxid, in); - /// For some reason ZXID specified in filename can be smaller - /// then actual zxid from nodes. - max_zxid = std::max(max_zxid, node.stat.mzxid); - - Coordination::read(node.stat.ctime, in); - Coordination::read(node.stat.mtime, in); - Coordination::read(node.stat.version, in); - Coordination::read(node.stat.cversion, in); - Coordination::read(node.stat.aversion, in); - Coordination::read(node.stat.ephemeralOwner, in); - Coordination::read(node.stat.pzxid, in); - if (!path.empty()) - { - node.stat.dataLength = node.data.length(); - node.seq_num = node.stat.cversion; - storage.container.insertOrReplace(path, node); - - if (node.stat.ephemeralOwner != 0) - storage.ephemerals[node.stat.ephemeralOwner].insert(path); - - storage.acl_map.addUsage(node.acl_id); - } - Coordination::read(path, in); - count++; - if (count % 1000 == 0) - std::cerr << "Deserialized nodes:" << count << std::endl; - } - - for (const auto & itr : storage.container) - { - if (itr.key != "/") - { - auto parent_path = parentPath(itr.key); - storage.container.updateValue(parent_path, [&path = itr.key] (KeeperStorage::Node & value) { value.children.insert(getBaseName(path)); value.stat.numChildren++; }); - } - } - - return max_zxid; -} - -void deserializeKeeperStorage(KeeperStorage & storage, const std::string & path) -{ - int64_t zxid = getZxidFromName(path); - //std::cerr << "Got ZXID:" << zxid << std::endl; - - ReadBufferFromFile reader(path); - - deserializeMagic(reader); - auto max_session_id = deserializeSessionAndTimeout(storage, reader); - - storage.session_id_counter = max_session_id; - deserializeACLMap(storage, reader); - - int64_t zxid_from_nodes = deserializeStorageData(storage, reader); - storage.zxid = std::max(zxid, zxid_from_nodes); -} - -} diff --git a/src/Coordination/ZooKeeperSnapshotReader.h b/src/Coordination/ZooKeeperSnapshotReader.h deleted file mode 100644 index 8006f69a6f8..00000000000 --- a/src/Coordination/ZooKeeperSnapshotReader.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once -#include -#include -#include -#include -#include - -namespace DB -{ - -int64_t getZxidFromName(const std::string & filename); - -void deserializeMagic(ReadBuffer & in); - -int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in); - -void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in); - -int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in); - -void deserializeKeeperStorage(KeeperStorage & storage, const std::string & path); - -} From 3c0a390d3772968438a03730e7cd82683ec3ff3a Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Fri, 18 Jun 2021 00:31:58 +0800 Subject: [PATCH 190/931] Create zh translation for postgresql.md --- .../table-functions/postgresql.md | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 docs/zh/sql-reference/table-functions/postgresql.md diff --git a/docs/zh/sql-reference/table-functions/postgresql.md b/docs/zh/sql-reference/table-functions/postgresql.md new file mode 100644 index 00000000000..df29c2c2891 --- /dev/null +++ b/docs/zh/sql-reference/table-functions/postgresql.md @@ -0,0 +1,120 @@ +--- +toc_priority: 42 +toc_title: postgresql +--- + +# postgresql {#postgresql} + +允许对存储在远程 PostgreSQL 服务器上的数据进行 `SELECT` 和 `INSERT` 查询. + +**语法** + +``` sql +postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`]) +``` + +**参数** + +- `host:port` — PostgreSQL 服务器地址. +- `database` — 远程数据库名称. +- `table` — 远程表名称. +- `user` — PostgreSQL 用户. +- `password` — 用户密码. +- `schema` — 非默认的表结构. 可选. + +**返回值** + +一个表对象,其列数与原 PostgreSQL 表的列数相同。 + +!!! info "Note" + 在`INSERT`查询中,为了区分表函数`postgresql(..)`和表名以及表的列名列表,你必须使用关键字`FUNCTION`或`TABLE FUNCTION`。请看下面的例子。 + +## 实施细节 {#implementation-details} + +`SELECT`查询在 PostgreSQL 上以 `COPY (SELECT ...) TO STDOUT` 的方式在只读的 PostgreSQL 事务中运行,每次在`SELECT`查询后提交。 + +简单的`WHERE`子句,如`=`、`!=`、`>`、`>=`、`<`、`<=`和`IN`,在PostgreSQL服务器上执行。 + +所有的连接、聚合、排序,`IN [ 数组 ]`条件和`LIMIT`采样约束只有在对PostgreSQL的查询结束后才会在ClickHouse中执行。 + +PostgreSQL 上的`INSERT`查询以`COPY "table_name" (field1, field2, ... fieldN) FROM STDIN`的方式在 PostgreSQL 事务中运行,每次`INSERT`语句后自动提交。 + +PostgreSQL 数组类型将转换为 ClickHouse 数组。 + +!!! info "Note" + 要小心,在 PostgreSQL 中,像 Integer[] 这样的数组数据类型列可以在不同的行中包含不同维度的数组,但在 ClickHouse 中,只允许在所有的行中有相同维度的多维数组。 + +支持设置 PostgreSQL 字典源中 Replicas 的优先级。地图中的数字越大,优先级就越低。`0`代表最高的优先级。 + +**示例** + +PostgreSQL 中的表: + +``` text +postgres=# CREATE TABLE "public"."test" ( +"int_id" SERIAL, +"int_nullable" INT NULL DEFAULT NULL, +"float" FLOAT NOT NULL, +"str" VARCHAR(100) NOT NULL DEFAULT '', +"float_nullable" FLOAT NULL DEFAULT NULL, +PRIMARY KEY (int_id)); + +CREATE TABLE + +postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); +INSERT 0 1 + +postgresql> SELECT * FROM test; + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | +(1 row) +``` + +从 ClickHouse 检索数据: + +```sql +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +插入数据: + +```sql +INSERT INTO TABLE FUNCTION postgresql('localhost:5432', 'test', 'test', 'postgrsql_user', 'password') (int_id, float) VALUES (2, 3); +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +│ 2 │ ᴺᵁᴸᴸ │ 3 │ │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +使用非默认的表结构: + +```text +postgres=# CREATE SCHEMA "nice.schema"; + +postgres=# CREATE TABLE "nice.schema"."nice.table" (a integer); + +postgres=# INSERT INTO "nice.schema"."nice.table" SELECT i FROM generate_series(0, 99) as t(i) +``` + +```sql +CREATE TABLE pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('localhost:5432', 'clickhouse', 'nice.table', 'postgrsql_user', 'password', 'nice.schema'); +``` + +**另请参阅** + +- [PostgreSQL 表引擎](../../engines/table-engines/integrations/postgresql.md) +- [使用 PostgreSQL 作为外部字典的来源](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) + +[原始文章](https://clickhouse.tech/docs/en/sql-reference/table-functions/postgresql/) From 1a6abb4db468a12e9858db160c913168c669bf8f Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 17 Jun 2021 19:32:50 +0300 Subject: [PATCH 191/931] Better --- programs/CMakeLists.txt | 76 ++++++++++++++++++- programs/config_tools.h.in | 1 + programs/keeper-converter/CMakeLists.txt | 9 +++ programs/keeper-converter/KeeperConverter.cpp | 61 +++++++++++++++ .../clickhouse-keeper-converter.cpp | 2 + programs/main.cpp | 6 ++ src/Coordination/ZooKeeperDataReader.cpp | 57 +++++++++----- src/Coordination/ZooKeeperDataReader.h | 8 +- 8 files changed, 193 insertions(+), 27 deletions(-) create mode 100644 programs/keeper-converter/CMakeLists.txt create mode 100644 programs/keeper-converter/KeeperConverter.cpp create mode 100644 programs/keeper-converter/clickhouse-keeper-converter.cpp diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 2af0331c70b..c2d56ee6a17 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -49,11 +49,15 @@ option (ENABLE_CLICKHOUSE_GIT_IMPORT "A tool to analyze Git repositories" option (ENABLE_CLICKHOUSE_KEEPER "ClickHouse alternative to ZooKeeper" ${ENABLE_CLICKHOUSE_ALL}) + +option (ENABLE_CLICKHOUSE_KEEPER_CONVERTER "Util allows to convert ZooKeeper logs and snapshots into clickhouse-keeper snapshot" ${ENABLE_CLICKHOUSE_ALL}) + if (NOT USE_NURAFT) # RECONFIGURE_MESSAGE_LEVEL should not be used here, # since USE_NURAFT is set to OFF for FreeBSD and Darwin. - message (STATUS "clickhouse-keeper will not be built (lack of NuRaft)") + message (STATUS "clickhouse-keeper and clickhouse-keeper-converter will not be built (lack of NuRaft)") set(ENABLE_CLICKHOUSE_KEEPER OFF) + set(ENABLE_CLICKHOUSE_KEEPER_CONVERTER OFF) endif() if (CLICKHOUSE_SPLIT_BINARY) @@ -149,6 +153,12 @@ else() message(STATUS "ClickHouse keeper mode: OFF") endif() +if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER) + message(STATUS "ClickHouse keeper-converter mode: ON") +else() + message(STATUS "ClickHouse keeper-converter mode: OFF") +endif() + if(NOT (MAKE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES)) set(CLICKHOUSE_ONE_SHARED ON) endif() @@ -270,6 +280,10 @@ if (ENABLE_CLICKHOUSE_KEEPER) add_subdirectory (keeper) endif() +if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER) + add_subdirectory (keeper-converter) +endif() + if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) add_subdirectory (odbc-bridge) endif () @@ -279,9 +293,51 @@ if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE) endif () if (CLICKHOUSE_ONE_SHARED) - add_library(clickhouse-lib SHARED ${CLICKHOUSE_SERVER_SOURCES} ${CLICKHOUSE_CLIENT_SOURCES} ${CLICKHOUSE_LOCAL_SOURCES} ${CLICKHOUSE_BENCHMARK_SOURCES} ${CLICKHOUSE_COPIER_SOURCES} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} ${CLICKHOUSE_COMPRESSOR_SOURCES} ${CLICKHOUSE_FORMAT_SOURCES} ${CLICKHOUSE_OBFUSCATOR_SOURCES} ${CLICKHOUSE_GIT_IMPORT_SOURCES} ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} ${CLICKHOUSE_KEEPER_SOURCES}) - target_link_libraries(clickhouse-lib ${CLICKHOUSE_SERVER_LINK} ${CLICKHOUSE_CLIENT_LINK} ${CLICKHOUSE_LOCAL_LINK} ${CLICKHOUSE_BENCHMARK_LINK} ${CLICKHOUSE_COPIER_LINK} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} ${CLICKHOUSE_COMPRESSOR_LINK} ${CLICKHOUSE_FORMAT_LINK} ${CLICKHOUSE_OBFUSCATOR_LINK} ${CLICKHOUSE_GIT_IMPORT_LINK} ${CLICKHOUSE_ODBC_BRIDGE_LINK} ${CLICKHOUSE_KEEPER_LINK}) - target_include_directories(clickhouse-lib ${CLICKHOUSE_SERVER_INCLUDE} ${CLICKHOUSE_CLIENT_INCLUDE} ${CLICKHOUSE_LOCAL_INCLUDE} ${CLICKHOUSE_BENCHMARK_INCLUDE} ${CLICKHOUSE_COPIER_INCLUDE} ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} ${CLICKHOUSE_COMPRESSOR_INCLUDE} ${CLICKHOUSE_FORMAT_INCLUDE} ${CLICKHOUSE_OBFUSCATOR_INCLUDE} ${CLICKHOUSE_GIT_IMPORT_INCLUDE} ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} ${CLICKHOUSE_KEEPER_INCLUDE}) + add_library(clickhouse-lib SHARED + ${CLICKHOUSE_SERVER_SOURCES} + ${CLICKHOUSE_CLIENT_SOURCES} + ${CLICKHOUSE_LOCAL_SOURCES} + ${CLICKHOUSE_BENCHMARK_SOURCES} + ${CLICKHOUSE_COPIER_SOURCES} + ${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES} + ${CLICKHOUSE_COMPRESSOR_SOURCES} + ${CLICKHOUSE_FORMAT_SOURCES} + ${CLICKHOUSE_OBFUSCATOR_SOURCES} + ${CLICKHOUSE_GIT_IMPORT_SOURCES} + ${CLICKHOUSE_ODBC_BRIDGE_SOURCES} + ${CLICKHOUSE_KEEPER_SOURCES} + ${CLICKHOUSE_KEEPER_CONVERTER_SOURCES}) + + target_link_libraries(clickhouse-lib + ${CLICKHOUSE_SERVER_LINK} + ${CLICKHOUSE_CLIENT_LINK} + ${CLICKHOUSE_LOCAL_LINK} + ${CLICKHOUSE_BENCHMARK_LINK} + ${CLICKHOUSE_COPIER_LINK} + ${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK} + ${CLICKHOUSE_COMPRESSOR_LINK} + ${CLICKHOUSE_FORMAT_LINK} + ${CLICKHOUSE_OBFUSCATOR_LINK} + ${CLICKHOUSE_GIT_IMPORT_LINK} + ${CLICKHOUSE_ODBC_BRIDGE_LINK} + ${CLICKHOUSE_KEEPER_LINK} + ${CLICKHOUSE_KEEPER_CONVERTER_LINK}) + + target_include_directories(clickhouse-lib + ${CLICKHOUSE_SERVER_INCLUDE} + ${CLICKHOUSE_CLIENT_INCLUDE} + ${CLICKHOUSE_LOCAL_INCLUDE} + ${CLICKHOUSE_BENCHMARK_INCLUDE} + ${CLICKHOUSE_COPIER_INCLUDE} + ${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE} + ${CLICKHOUSE_COMPRESSOR_INCLUDE} + ${CLICKHOUSE_FORMAT_INCLUDE} + ${CLICKHOUSE_OBFUSCATOR_INCLUDE} + ${CLICKHOUSE_GIT_IMPORT_INCLUDE} + ${CLICKHOUSE_ODBC_BRIDGE_INCLUDE} + ${CLICKHOUSE_KEEPER_INCLUDE} + ${CLICKHOUSE_KEEPER_CONVERTER_INCLUDE}) + set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "") install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse) endif() @@ -312,6 +368,10 @@ if (CLICKHOUSE_SPLIT_BINARY) list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-keeper) endif () + if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER) + list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-keeper-converter) + endif () + set_target_properties(${CLICKHOUSE_ALL_TARGETS} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_ALL_TARGETS}) @@ -362,6 +422,9 @@ else () if (ENABLE_CLICKHOUSE_KEEPER) clickhouse_target_link_split_lib(clickhouse keeper) endif() + if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER) + clickhouse_target_link_split_lib(clickhouse keeper-converter) + endif() if (ENABLE_CLICKHOUSE_INSTALL) clickhouse_target_link_split_lib(clickhouse install) endif () @@ -422,6 +485,11 @@ else () install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper) endif () + if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER) + add_custom_target (clickhouse-keeper-converter ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-converter DEPENDS clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-converter" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter) + endif () install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) diff --git a/programs/config_tools.h.in b/programs/config_tools.h.in index 50ba0c16a83..62fc076861c 100644 --- a/programs/config_tools.h.in +++ b/programs/config_tools.h.in @@ -17,3 +17,4 @@ #cmakedefine01 ENABLE_CLICKHOUSE_ODBC_BRIDGE #cmakedefine01 ENABLE_CLICKHOUSE_LIBRARY_BRIDGE #cmakedefine01 ENABLE_CLICKHOUSE_KEEPER +#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER_CONVERTER diff --git a/programs/keeper-converter/CMakeLists.txt b/programs/keeper-converter/CMakeLists.txt new file mode 100644 index 00000000000..d529f94d388 --- /dev/null +++ b/programs/keeper-converter/CMakeLists.txt @@ -0,0 +1,9 @@ +set (CLICKHOUSE_KEEPER_CONVERTER_SOURCES KeeperConverter.cpp) + +set (CLICKHOUSE_KEEPER_CONVERTER_LINK + PRIVATE + boost::program_options + dbms +) + +clickhouse_program_add(keeper-converter) diff --git a/programs/keeper-converter/KeeperConverter.cpp b/programs/keeper-converter/KeeperConverter.cpp new file mode 100644 index 00000000000..15dbc8bd220 --- /dev/null +++ b/programs/keeper-converter/KeeperConverter.cpp @@ -0,0 +1,61 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +int mainEntryClickHouseKeeperConverter(int argc, char ** argv) +{ + using namespace DB; + namespace po = boost::program_options; + + po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); + desc.add_options() + ("help,h", "produce help message") + ("zookeeper-logs-dir", po::value(), "Path to directory with ZooKeeper logs") + ("zookeeper-snapshots-dir", po::value(), "Path to directory with ZooKeeper snapshots") + ("output-dir", po::value(), "Directory to place output clickhouse-keeper snapshot") + ; + po::variables_map options; + po::store(po::command_line_parser(argc, argv).options(desc).run(), options); + Poco::AutoPtr console_channel(new Poco::ConsoleChannel); + + Poco::Logger * logger = &Poco::Logger::get("KeeperConverter"); + logger->setChannel(console_channel); + + if (options.count("help")) + { + std::cout << "Usage: " << argv[0] << " --zookeeper-logs-dir /var/lib/zookeeper/data/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/data/version-2 --output-dir /var/lib/clickhouse/coordination/snapshots" << std::endl; + std::cout << desc << std::endl; + return 0; + } + + try + { + DB::KeeperStorage storage(500, ""); + + DB::deserializeKeeperStorageFromSnapshotsDir(storage, options["zookeeper-snapshots-dir"].as(), logger); + DB::deserializeLogsAndApplyToStorage(storage, options["zookeeper-logs-dir"].as(), logger); + DB::SnapshotMetadataPtr snapshot_meta = std::make_shared(storage.getZXID(), 1, std::make_shared()); + DB::KeeperStorageSnapshot snapshot(&storage, snapshot_meta); + + DB::KeeperSnapshotManager manager(options["output-dir"].as(), 1); + auto snp = manager.serializeSnapshotToBuffer(snapshot); + auto path = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID()); + std::cout << "Snapshot serialized to path:" << path << std::endl; + } + catch (...) + { + std::cerr << getCurrentExceptionMessage(true) << '\n'; + return getCurrentExceptionCode(); + } + + return 0; +} diff --git a/programs/keeper-converter/clickhouse-keeper-converter.cpp b/programs/keeper-converter/clickhouse-keeper-converter.cpp new file mode 100644 index 00000000000..3cb6f99f837 --- /dev/null +++ b/programs/keeper-converter/clickhouse-keeper-converter.cpp @@ -0,0 +1,2 @@ +int mainEntryClickHouseKeeperConverter(int argc, char ** argv); +int main(int argc_, char ** argv_) { return mainEntryClickHouseKeeperConverter(argc_, argv_); } diff --git a/programs/main.cpp b/programs/main.cpp index c5df2596422..b03d6a4a590 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -59,6 +59,9 @@ int mainEntryClickHouseGitImport(int argc, char ** argv); #if ENABLE_CLICKHOUSE_KEEPER int mainEntryClickHouseKeeper(int argc, char ** argv); #endif +#if ENABLE_CLICKHOUSE_KEEPER +int mainEntryClickHouseKeeperConverter(int argc, char ** argv); +#endif #if ENABLE_CLICKHOUSE_INSTALL int mainEntryClickHouseInstall(int argc, char ** argv); int mainEntryClickHouseStart(int argc, char ** argv); @@ -119,6 +122,9 @@ std::pair clickhouse_applications[] = #if ENABLE_CLICKHOUSE_KEEPER {"keeper", mainEntryClickHouseKeeper}, #endif +#if ENABLE_CLICKHOUSE_KEEPER_CONVERTER + {"keeper-converter", mainEntryClickHouseKeeperConverter}, +#endif #if ENABLE_CLICKHOUSE_INSTALL {"install", mainEntryClickHouseInstall}, {"start", mainEntryClickHouseStart}, diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 60882993c0f..a2ab85e0625 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -45,6 +45,8 @@ void deserializeSnapshotMagic(ReadBuffer & in) int64_t dbid; Coordination::read(magic_header, in); Coordination::read(version, in); + if (version != 2) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot deserialize ZooKeeper data other than version 2, got version {}", version); Coordination::read(dbid, in); static constexpr int32_t SNP_HEADER = 1514885966; /// "ZKSN" if (magic_header != SNP_HEADER) @@ -98,7 +100,7 @@ void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) } } -int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * log) { int64_t max_zxid = 0; std::string path; @@ -138,7 +140,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) Coordination::read(path, in); count++; if (count % 1000 == 0) - std::cerr << "Deserialized nodes from snapshot:" << count << std::endl; + LOG_INFO(log, "Deserialized nodes from snapshot: {}", count); } for (const auto & itr : storage.container) @@ -153,23 +155,31 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in) return max_zxid; } -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path) +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log) { + LOG_INFO(log, "Deserializing storage snapshot {}", snapshot_path); int64_t zxid = getZxidFromName(snapshot_path); ReadBufferFromFile reader(snapshot_path); deserializeSnapshotMagic(reader); + + LOG_INFO(log, "Magic deserialized, looks OK"); auto max_session_id = deserializeSessionAndTimeout(storage, reader); + LOG_INFO(log, "Sessions and timeouts deserialized"); storage.session_id_counter = max_session_id; deserializeACLMap(storage, reader); + LOG_INFO(log, "ACLs deserialized"); - int64_t zxid_from_nodes = deserializeStorageData(storage, reader); + LOG_INFO(log, "Deserializing data from snapshot"); + int64_t zxid_from_nodes = deserializeStorageData(storage, reader, log); storage.zxid = std::max(zxid, zxid_from_nodes); + + LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid); } -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path) +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log) { namespace fs = std::filesystem; std::map existing_snapshots; @@ -181,9 +191,13 @@ void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std int64_t zxid = getZxidFromName(log_path); existing_snapshots[zxid] = p.path(); } + + LOG_INFO(log, "Totally have {} snapshots, will use latest", existing_snapshots.size()); /// deserialize only from latest snapshot if (!existing_snapshots.empty()) - deserializeKeeperStorageFromSnapshot(storage, existing_snapshots.rbegin()->second); + deserializeKeeperStorageFromSnapshot(storage, existing_snapshots.rbegin()->second, log); + else + throw Exception(ErrorCodes::CORRUPTED_DATA, "No snapshots found on path {}. At least one snapshot must exist.", path); } void deserializeLogMagic(ReadBuffer & in) @@ -197,6 +211,9 @@ void deserializeLogMagic(ReadBuffer & in) static constexpr int32_t LOG_HEADER = 1514884167; /// "ZKLG" if (magic_header != LOG_HEADER) throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header); + + if (version != 2) + throw Exception(ErrorCodes::NOT_IMPLEMENTED,"Cannot deserialize ZooKeeper data other than version 2, got version {}", version); } @@ -435,15 +452,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in) Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false); - /// For Error requests ZooKeeper doesn't store version + tree_digest - if (!isErrorRequest(request)) - { - int32_t version; - int64_t tree_digest; - Coordination::read(version, in); - Coordination::read(tree_digest, in); - } - + /// Skip all other bytes int64_t bytes_read = in.count() - count_before; if (bytes_read < txn_len) in.ignore(txn_len - bytes_read); @@ -475,25 +484,31 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in) return true; } -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path) +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log) { ReadBufferFromFile reader(log_path); + + LOG_INFO(log, "Deserializing log {}", log_path); deserializeLogMagic(reader); + LOG_INFO(log, "Header looks OK"); + size_t counter = 0; while (!reader.eof() && deserializeTxn(storage, reader)) { counter++; if (counter % 1000 == 0) - std::cerr << "Deserialized from log: " << counter << std::endl; + LOG_INFO(log, "Deserialized txns log: {}", counter); int8_t forty_two; Coordination::read(forty_two, reader); if (forty_two != 0x42) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Forty two check byte ({}) is not equal 0x42", forty_two); } + + LOG_INFO(log, "Finished {} deserialization, totally read {} records", log_path, counter); } -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path) +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log) { namespace fs = std::filesystem; std::map existing_logs; @@ -506,10 +521,14 @@ void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string existing_logs[zxid] = p.path(); } + LOG_INFO(log, "Totally have {} logs", existing_logs.size()); + for (auto [zxid, log_path] : existing_logs) { if (zxid > storage.zxid) - deserializeLogAndApplyToStorage(storage, log_path); + deserializeLogAndApplyToStorage(storage, log_path, log); + else + LOG_INFO(log, "Skipping log {}, it's ZXID {} is smaller than storages ZXID {}", log_path, zxid, storage.zxid); } } diff --git a/src/Coordination/ZooKeeperDataReader.h b/src/Coordination/ZooKeeperDataReader.h index 2716c9487b3..5f26457c113 100644 --- a/src/Coordination/ZooKeeperDataReader.h +++ b/src/Coordination/ZooKeeperDataReader.h @@ -6,12 +6,12 @@ namespace DB { -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log = nullptr); +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log); -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log = nullptr); +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log); -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log = nullptr); +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log); -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log = nullptr); +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log); } From a62957dba67fae2c9710f28698568fa488978e70 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Fri, 18 Jun 2021 00:44:14 +0800 Subject: [PATCH 192/931] Add zh translation for embedded-rocksdb.md --- .../integrations/embedded-rocksdb.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 docs/zh/engines/table-engines/integrations/embedded-rocksdb.md diff --git a/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md b/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md new file mode 100644 index 00000000000..7c04600894e --- /dev/null +++ b/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md @@ -0,0 +1,42 @@ +--- +toc_priority: 9 +toc_title: EmbeddedRocksDB +--- + +# EmbeddedRocksDB 引擎 {#EmbeddedRocksDB-engine} + +这个引擎允许 ClickHouse 与 [rocksdb](http://rocksdb.org/) 进行集成。 + +## 创建一张表 {#table_engine-EmbeddedRocksDB-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = EmbeddedRocksDB PRIMARY KEY(primary_key_name) +``` + +必要参数: + +- `primary_key_name` – any column name in the column list. +- 必须指定 `primary key`, 仅支持主键中的一个列. 主键将被序列化为二进制的`rocksdb key`. +- 主键以外的列将以相应的顺序在二进制中序列化为`rocksdb`值. +- 带有键 `equals` 或 `in` 过滤的查询将被优化为从 `rocksdb` 进行多键查询. + +示例: + +``` sql +CREATE TABLE test +( + `key` String, + `v1` UInt32, + `v2` String, + `v3` Float32, +) +ENGINE = EmbeddedRocksDB +PRIMARY KEY key +``` + +[原始文章](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) From 1747c254dc461a504f8bb3b578fa35d798592896 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 17 Jun 2021 21:36:50 +0300 Subject: [PATCH 193/931] Remove unused flag --- src/Coordination/ZooKeeperDataReader.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index a2ab85e0625..5ce1f418c27 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -402,14 +402,11 @@ Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in) Coordination::read(length, in); std::shared_ptr result = std::make_shared(); - bool error_found = false; while (length > 0) { auto subrequest = deserializeTxnImpl(in, true); if (subrequest) result->requests.push_back(subrequest); - else - error_found = true; length--; } return result; From d69790945dfa591c668eb53d92b5910452a7e815 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Thu, 17 Jun 2021 23:29:01 +0300 Subject: [PATCH 194/931] Add the fail_on_connection_loss configuration parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Добавил описание параметра fail_on_connection_loss. --- .../external-dictionaries/external-dicts-dict-sources.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 619e4b8701b..dfea660bd0f 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -482,6 +482,7 @@ Example of settings: table_name
id=10 SQL_QUERY + true ``` @@ -499,6 +500,7 @@ SOURCE(MYSQL( table 'table_name' where 'id=10' invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' )) ``` @@ -523,6 +525,8 @@ Setting fields: - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. By default, false. ClickHouse connects to MySQL servers using secure protocols ([SSL](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl)/[TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security)). Occasionally errors might occur in the transport communication between client and server, in which case the server would abort the client connection. Note that retrying leads to increased response times. Retries can be avoided by setting the parameter to true. + MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. Example of settings: @@ -538,6 +542,7 @@ Example of settings: table_name
id=10 SQL_QUERY + true ``` @@ -554,6 +559,7 @@ SOURCE(MYSQL( table 'table_name' where 'id=10' invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' )) ``` From 8c97247fe1e7e719f02eff9cd98be8adb869d651 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 18 Jun 2021 00:11:52 +0300 Subject: [PATCH 195/931] unrelated fix --- .../aggregate-functions/reference/quantileexact.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index 005d039e7c5..3953bd81232 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -111,8 +111,7 @@ Result: Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. -All the passed values are combined into an array, which is then fully sorted, -to get the exact value. The sorting [algorithm's](https://en.cppreference.com/w/cpp/algorithm/sort) complexity is `O(N·log(N))`, where `N = std::distance(first, last)` comparisons. +All the passed values are combined into an array, which is then fully sorted, to get the exact value. The sorting [algorithm's](https://en.cppreference.com/w/cpp/algorithm/sort) complexity is `O(N·log(N))`, where `N = std::distance(first, last)` comparisons. The return value depends on the quantile level and the number of elements in the selection, i.e. if the level is 0.5, then the function returns the higher median value for an even number of elements and the middle median value for an odd number of elements. Median is calculated similarly to the [median_high](https://docs.python.org/3/library/statistics.html#statistics.median_high) implementation which is used in python. For all other levels, the element at the index corresponding to the value of `level * size_of_array` is returned. From e2765991b04d3ef312c9e0e3370d1b23ad49cf52 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 18 Jun 2021 01:37:59 +0300 Subject: [PATCH 196/931] Separate star from ranges for better code --- src/Functions/JSONPath/ASTs/ASTJSONPathStar.h | 15 +++++ .../JSONPath/Generators/GeneratorJSONPath.h | 8 ++- .../Generators/VisitorJSONPathMemberAccess.h | 11 ++-- .../Generators/VisitorJSONPathRange.h | 57 +++------------- .../JSONPath/Generators/VisitorJSONPathStar.h | 66 +++++++++++++++++++ .../JSONPath/Parsers/ParserJSONPathQuery.cpp | 8 ++- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 28 ++------ .../JSONPath/Parsers/ParserJSONPathStar.cpp | 31 +++++++++ .../JSONPath/Parsers/ParserJSONPathStar.h | 18 +++++ 9 files changed, 163 insertions(+), 79 deletions(-) create mode 100644 src/Functions/JSONPath/ASTs/ASTJSONPathStar.h create mode 100644 src/Functions/JSONPath/Generators/VisitorJSONPathStar.h create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp create mode 100644 src/Functions/JSONPath/Parsers/ParserJSONPathStar.h diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathStar.h b/src/Functions/JSONPath/ASTs/ASTJSONPathStar.h new file mode 100644 index 00000000000..2aada47c459 --- /dev/null +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathStar.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace DB +{ +class ASTJSONPathStar : public IAST +{ +public: + String getID(char) const override { return "ASTJSONPathStar"; } + + ASTPtr clone() const override { return std::make_shared(*this); } +}; + +} diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 2583ef8c921..071a7ac3089 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -36,7 +37,8 @@ public: for (auto child_ast : query->children) { - if (typeid_cast(child_ast.get())) { + if (typeid_cast(child_ast.get())) + { visitors.push_back(std::make_shared>(child_ast)); } else if (typeid_cast(child_ast.get())) @@ -47,6 +49,10 @@ public: { visitors.push_back(std::make_shared>(child_ast)); } + else if (typeid_cast(child_ast.get())) + { + visitors.push_back(std::make_shared>(child_ast)); + } } } diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h index fd83c478227..b0c601458b6 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h @@ -10,15 +10,15 @@ template class VisitorJSONPathMemberAccess : public IVisitor { public: - VisitorJSONPathMemberAccess(ASTPtr member_access_ptr_) : member_access_ptr(member_access_ptr_) { } + VisitorJSONPathMemberAccess(ASTPtr member_access_ptr_) + : member_access_ptr(member_access_ptr_->as()) { } const char * getName() const override { return "VisitorJSONPathMemberAccess"; } VisitorStatus apply(typename JSONParser::Element & element) const override { - const auto * member_access = member_access_ptr->as(); typename JSONParser::Element result; - element.getObject().find(std::string_view(member_access->member_name), result); + element.getObject().find(std::string_view(member_access_ptr->member_name), result); element = result; return VisitorStatus::Ok; } @@ -30,9 +30,8 @@ public: this->setExhausted(true); return VisitorStatus::Error; } - const auto * member_access = member_access_ptr->as(); typename JSONParser::Element result; - if (!element.getObject().find(std::string_view(member_access->member_name), result)) + if (!element.getObject().find(std::string_view(member_access_ptr->member_name), result)) { this->setExhausted(true); return VisitorStatus::Error; @@ -47,7 +46,7 @@ public: void updateState() override { } private: - ASTPtr member_access_ptr; + ASTJSONPathMemberAccess * member_access_ptr; }; } diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h index 601c5ea80b9..57e208271d0 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h @@ -10,18 +10,10 @@ template class VisitorJSONPathRange : public IVisitor { public: - VisitorJSONPathRange(ASTPtr range_ptr_) : range_ptr(range_ptr_) + VisitorJSONPathRange(ASTPtr range_ptr_) : range_ptr(range_ptr_->as()) { - const auto * range = range_ptr->as(); current_range = 0; - if (range->is_star) - { - current_index = 0; - } - else - { - current_index = range->ranges[current_range].first; - } + current_index = range_ptr->ranges[current_range].first; } const char * getName() const override { return "VisitorJSONPathRange"; } @@ -30,12 +22,7 @@ public: { typename JSONParser::Element result; typename JSONParser::Array array = element.getArray(); - if (current_index >= array.size()) - { - return VisitorStatus::Error; - } - result = array[current_index]; - element = result; + element = array[current_index]; return VisitorStatus::Ok; } @@ -47,32 +34,21 @@ public: return VisitorStatus::Error; } - const auto * range = range_ptr->as(); VisitorStatus status; if (current_index < element.getArray().size()) { apply(element); status = VisitorStatus::Ok; } - else if (!range->is_star) - { - status = VisitorStatus::Ignore; - } else { status = VisitorStatus::Ignore; - this->setExhausted(true); } - if (!range->is_star) + if (current_index + 1 == range_ptr->ranges[current_range].second + && current_range + 1 == range_ptr->ranges.size()) { - if (current_index + 1 == range->ranges[current_range].second) - { - if (current_range + 1 == range->ranges.size()) - { - this->setExhausted(true); - } - } + this->setExhausted(true); } return status; @@ -80,36 +56,23 @@ public: void reinitialize() override { - const auto * range = range_ptr->as(); current_range = 0; - if (range->is_star) - { - current_index = 0; - } - else - { - current_index = range->ranges[current_range].first; - } + current_index = range_ptr->ranges[current_range].first; this->setExhausted(false); } void updateState() override { - const auto * range = range_ptr->as(); current_index++; - if (range->is_star) - { - return; - } - if (current_index == range->ranges[current_range].second) + if (current_index == range_ptr->ranges[current_range].second) { current_range++; - current_index = range->ranges[current_range].first; + current_index = range_ptr->ranges[current_range].first; } } private: - ASTPtr range_ptr; + ASTJSONPathRange * range_ptr; size_t current_range; UInt32 current_index; }; diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathStar.h b/src/Functions/JSONPath/Generators/VisitorJSONPathStar.h new file mode 100644 index 00000000000..bc840597f2a --- /dev/null +++ b/src/Functions/JSONPath/Generators/VisitorJSONPathStar.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ +template +class VisitorJSONPathStar : public IVisitor +{ +public: + VisitorJSONPathStar(ASTPtr) + { + current_index = 0; + } + + const char * getName() const override { return "VisitorJSONPathStar"; } + + VisitorStatus apply(typename JSONParser::Element & element) const override + { + typename JSONParser::Element result; + typename JSONParser::Array array = element.getArray(); + element = array[current_index]; + return VisitorStatus::Ok; + } + + VisitorStatus visit(typename JSONParser::Element & element) override + { + if (!element.isArray()) + { + this->setExhausted(true); + return VisitorStatus::Error; + } + + VisitorStatus status; + if (current_index < element.getArray().size()) + { + apply(element); + status = VisitorStatus::Ok; + } + else + { + status = VisitorStatus::Ignore; + this->setExhausted(true); + } + + return status; + } + + void reinitialize() override + { + current_index = 0; + this->setExhausted(false); + } + + void updateState() override + { + current_index++; + } + +private: + UInt32 current_index; +}; + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp index 0ab09733890..c18b2ad9b31 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB @@ -19,17 +20,20 @@ bool ParserJSONPathQuery::parseImpl(Pos & pos, ASTPtr & query, Expected & expect query = std::make_shared(); ParserJSONPathMemberAccess parser_jsonpath_member_access; ParserJSONPathRange parser_jsonpath_range; + ParserJSONPathStar parser_jsonpath_star; ParserJSONPathRoot parser_jsonpath_root; ASTPtr path_root; - if (!parser_jsonpath_root.parse(pos, path_root, expected)) { + if (!parser_jsonpath_root.parse(pos, path_root, expected)) + { return false; } query->children.push_back(path_root); ASTPtr accessor; while (parser_jsonpath_member_access.parse(pos, accessor, expected) - || parser_jsonpath_range.parse(pos, accessor, expected)) + || parser_jsonpath_range.parse(pos, accessor, expected) + || parser_jsonpath_star.parse(pos, accessor, expected)) { if (accessor) { diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index 3da0d508b27..f8496cd67d0 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -31,26 +32,16 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte auto range = std::make_shared(); node = range; + ParserNumber number_p; + ASTPtr number_ptr; while (pos->type != TokenType::ClosingSquareBracket) { - if (pos->type != TokenType::Number && pos->type != TokenType::Asterisk) + if (pos->type != TokenType::Number) { return false; } - if (pos->type == TokenType::Asterisk) - { - if (range->is_star) - { - throw Exception("Multiple asterisks in square array range are not allowed", ErrorCodes::BAD_ARGUMENTS); - } - range->is_star = true; - ++pos; - continue; - } std::pair range_indices; - ParserNumber number_p; - ASTPtr number_ptr; if (!number_p.parse(pos, number_ptr, expected)) { return false; @@ -64,16 +55,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte } else if (pos->type == TokenType::BareWord) { - /// Range case - ParserIdentifier name_p; - ASTPtr word; - if (!name_p.parse(pos, word, expected)) - { - return false; - } - String to_identifier; - if (!tryGetIdentifierNameInto(word, to_identifier) || to_identifier != "to") - { + if (!ParserKeyword("TO").ignore(pos, expected)) { return false; } if (!number_p.parse(pos, number_ptr, expected)) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp new file mode 100644 index 00000000000..c0d2b376794 --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp @@ -0,0 +1,31 @@ +#include + +#include + +namespace DB +{ +bool ParserJSONPathStar::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + + if (pos->type != TokenType::OpeningSquareBracket) + { + return false; + } + ++pos; + if (pos->type != TokenType::Asterisk) { + return false; + } + ++pos; + if (pos->type != TokenType::ClosingSquareBracket) { + expected.add(pos, "Closing square bracket"); + return false; + } + ++pos; + + auto star = std::make_shared(); + node = star; + + return true; +} + +} diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.h b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.h new file mode 100644 index 00000000000..543823357de --- /dev/null +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.h @@ -0,0 +1,18 @@ +#pragma once + +#include + + +namespace DB +{ +class ParserJSONPathStar : public IParserBase +{ +private: + const char * getName() const override { return "ParserJSONPathStar"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + +public: + explicit ParserJSONPathStar() = default; +}; + +} From 1863a9beb044104e7dd7bab0f97508f46a41a3bb Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 18 Jun 2021 02:15:15 +0300 Subject: [PATCH 197/931] Change stringstream to WriteBuffer --- src/Functions/FunctionSQLJSON.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index b605645499e..9bfb4291ba8 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -279,11 +281,11 @@ public: return false; } - std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + String result; + WriteBufferFromString out(result); out << current_element.getElement(); - auto output_str = out.str(); ColumnString & col_str = assert_cast(dest); - col_str.insertData(output_str.data(), output_str.size()); + col_str.insertData(result.data(), result.size()); return true; } }; @@ -307,7 +309,9 @@ public: GeneratorJSONPath generator_json_path(query_ptr); Element current_element = root; VisitorStatus status; - std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + String result; + WriteBufferFromString out(result); + /// Create json array of results: [res1, res2, ...] out << "["; bool success = false; @@ -334,8 +338,7 @@ public: return false; } ColumnString & col_str = assert_cast(dest); - auto output_str = out.str(); - col_str.insertData(output_str.data(), output_str.size()); + col_str.insertData(reinterpret_cast(result.data()), result.size()); return true; } }; From 9f36eb6210f0f7cac9995c6a537f4a30b2c14243 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 18 Jun 2021 05:13:38 +0300 Subject: [PATCH 198/931] Fixes --- .../aggregate-functions/reference/quantileexact.md | 12 +++++++----- .../aggregate-functions/reference/quantiles.md | 7 ++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index 3953bd81232..e7890f231bb 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -2,7 +2,9 @@ toc_priority: 202 --- -# quantileExact {#quantileexact} +# quantileExact Functions {#quantileexact-functions} + +## quantileExact {#quantileexact} Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -49,7 +51,7 @@ Result: └───────────────────────┘ ``` -# quantileExactLow {#quantileexactlow} +## quantileExactLow {#quantileexactlow} Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -107,7 +109,7 @@ Result: │ 4 │ └──────────────────────────┘ ``` -# quantileExactHigh {#quantileexacthigh} +## quantileExactHigh {#quantileexacthigh} Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. @@ -176,7 +178,7 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number in the range `(0, 1)`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional. Possible values: (0, 1). Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -225,7 +227,7 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional parameter. Constant floating-point number in the range `[0, 1]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `level` — Level of quantile. Optional. Possible values: [0, 1]. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 9723e0ee29c..6fcc7f2d0fe 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -2,7 +2,8 @@ toc_priority: 201 --- -# quantiles {#quantiles} +# quantiles Functions {#quantiles-functions} +## quantiles {#quantiles} Syntax: `quantiles(level1, level2, …)(x)` @@ -26,7 +27,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers in the range `(0, 1)`. +- `level` — Leveles of quantiles. Possible values: (0, 1). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -75,7 +76,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Constant floating-point numbers in the range `[0, 1]`. +- `level` — Leveles of quantiles. Possible values: [0, 1]. - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** From 2d8f45a0981ab15a6bf202d325b9a7783451fc7a Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 18 Jun 2021 11:55:59 +0300 Subject: [PATCH 199/931] Add some initialization --- src/Coordination/ZooKeeperDataReader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 5ce1f418c27..e0d0fbc85b6 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -349,7 +349,7 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx { int32_t type; Coordination::read(type, in); - Coordination::ZooKeeperRequestPtr result; + Coordination::ZooKeeperRequestPtr result = nullptr; int32_t sub_txn_length = 0; if (subtxn) Coordination::read(sub_txn_length, in); From 5a2b01328fe0e7d69b9df4991fe098aa1b72aabe Mon Sep 17 00:00:00 2001 From: Mikhail Date: Fri, 18 Jun 2021 13:41:28 +0300 Subject: [PATCH 200/931] =?UTF-8?q?=D0=9F=D0=B5=D1=80=D0=B5=D0=B2=D0=BE?= =?UTF-8?q?=D0=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/ru/operations/settings/settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index bbafe428838..60d26ac5825 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1189,6 +1189,16 @@ load_balancing = round_robin Компиляция предусмотрена только для части конвейера обработки запроса - для первой стадии агрегации (GROUP BY). В случае, если эта часть конвейера была скомпилирована, запрос может работать быстрее, за счёт разворачивания коротких циклов и инлайнинга вызовов агрегатных функций. Максимальный прирост производительности (до четырёх раз в редких случаях) достигается на запросах с несколькими простыми агрегатными функциями. Как правило, прирост производительности незначителен. В очень редких случаях возможно замедление выполнения запроса. +## compile_expressions {#compile_expressions} + +Включает или выключает компиляцию часто используемых функций и операторов. Компиляция производится в нативный код платформы с помощью LLVM во время выполнения. + +Возможные значения: + +- 0 — выключена. +- 1 — включена. + +Значение по умолчанию: `1`. ## min_count_to_compile {#min-count-to-compile} После скольких раз, когда скомпилированный кусок кода мог пригодиться, выполнить его компиляцию. По умолчанию - 3. From 5a014cb1e1eff5e0255044bc766102f7e0c448fa Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 18 Jun 2021 14:02:15 +0300 Subject: [PATCH 201/931] Add sometest --- docker/test/integration/base/Dockerfile | 19 ++++- .../__init__.py | 1 + .../configs/keeper_config.xml | 23 ++++++ .../configs/logs_conf.xml | 12 +++ .../test_keeper_zookeeper_converter/test.py | 76 +++++++++++++++++++ 5 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_keeper_zookeeper_converter/__init__.py create mode 100644 tests/integration/test_keeper_zookeeper_converter/configs/keeper_config.xml create mode 100644 tests/integration/test_keeper_zookeeper_converter/configs/logs_conf.xml create mode 100644 tests/integration/test_keeper_zookeeper_converter/test.py diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 1c962f1bf8f..e15697da029 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -1,6 +1,8 @@ # docker build -t yandex/clickhouse-integration-test . FROM yandex/clickhouse-test-base +SHELL ["/bin/bash", "-c"] + RUN apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get -y install \ tzdata \ @@ -20,7 +22,9 @@ RUN apt-get update \ krb5-user \ iproute2 \ lsof \ - g++ + g++ \ + default-jre + RUN rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ @@ -30,6 +34,19 @@ RUN apt-get clean # Install MySQL ODBC driver RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so +# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper. +# ZooKeeper is not started by default, but consumes some space in containers. +# 777 perms used to allow anybody to start/stop ZooKeeper +ENV ZOOKEEPER_VERSION='3.6.3' +RUN curl -O "https://mirrors.estointernet.in/apache/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz" +RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz +RUN echo $'tickTime=2500 \n\ +tickTime=2500 \n\ +dataDir=/zookeeper \n\ +clientPort=2181 \n\ +maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg +RUN mkdir /zookeeper && chmod -R 777 /zookeeper + ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone diff --git a/tests/integration/test_keeper_zookeeper_converter/__init__.py b/tests/integration/test_keeper_zookeeper_converter/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_keeper_zookeeper_converter/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_keeper_zookeeper_converter/configs/keeper_config.xml b/tests/integration/test_keeper_zookeeper_converter/configs/keeper_config.xml new file mode 100644 index 00000000000..ceaca04762e --- /dev/null +++ b/tests/integration/test_keeper_zookeeper_converter/configs/keeper_config.xml @@ -0,0 +1,23 @@ + + + 9181 + 1 + /var/lib/clickhouse/coordination/logs + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + trace + 75 + + + + + 1 + localhost + 44444 + + + + diff --git a/tests/integration/test_keeper_zookeeper_converter/configs/logs_conf.xml b/tests/integration/test_keeper_zookeeper_converter/configs/logs_conf.xml new file mode 100644 index 00000000000..318a6bca95d --- /dev/null +++ b/tests/integration/test_keeper_zookeeper_converter/configs/logs_conf.xml @@ -0,0 +1,12 @@ + + 3 + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py new file mode 100644 index 00000000000..97c16b09cd9 --- /dev/null +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +#!/usr/bin/env python3 +import pytest +from helpers.cluster import ClickHouseCluster +from kazoo.client import KazooClient, KazooState +from kazoo.security import ACL, make_digest_acl, make_acl +from kazoo.exceptions import AuthFailedError, InvalidACLError, NoAuthError, KazooException + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance('node', main_configs=['configs/keeper_config.xml', 'configs/logs_conf.xml'], stay_alive=True) + +def start_zookeeper(): + node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh start']) + +def stop_zookeeper(): + node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh stop']) + +def clear_clickhouse_data(): + node.exec_in_container(['bash', '-c', 'rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*']) + +def convert_zookeeper_data(): + cmd = '/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots' + node.exec_in_container(['bash', '-c', cmd]) + +def stop_clickhouse(): + node.stop_clickhouse() + +def start_clickhouse(): + node.start_clickhouse() + +def copy_zookeeper_data(): + stop_zookeeper() + stop_clickhouse() + clear_clickhouse_data() + convert_zookeeper_data() + print(node.exec_in_container) + start_zookeeper() + start_clickhouse() + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + +def get_fake_zk(timeout=30.0): + _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip('node') + ":9181", timeout=timeout) + _fake_zk_instance.start() + return _fake_zk_instance + +def get_genuine_zk(timeout=30.0): + _genuine_zk_instance = KazooClient(hosts=cluster.get_instance_ip('node') + ":2181", timeout=timeout) + _genuine_zk_instance.start() + return _genuine_zk_instance + +def compare_states(zk1, zk2): + +def test_smoke(started_cluster): + start_zookeeper() + + genuine_connection = get_genuine_zk() + genuine_connection.create("/test", b"data") + + assert genuine_connection.get("/test")[0] == b"data" + + copy_zookeeper_data() + + fake_connection = get_fake_zk() + assert fake_connection.get("/test")[0] == b"data" + assert genuine_connection.get("/test")[0] == b"data" From 3173b285fdf5ba7417fb5474c06f12a136b33f87 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Fri, 18 Jun 2021 21:34:14 +0800 Subject: [PATCH 202/931] Add zh translation for s3.md --- .../engines/table-engines/integrations/s3.md | 213 ++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 docs/zh/engines/table-engines/integrations/s3.md diff --git a/docs/zh/engines/table-engines/integrations/s3.md b/docs/zh/engines/table-engines/integrations/s3.md new file mode 100644 index 00000000000..5b934dae2c4 --- /dev/null +++ b/docs/zh/engines/table-engines/integrations/s3.md @@ -0,0 +1,213 @@ +--- +toc_priority: 7 +toc_title: S3 +--- + +# S3 表引擎 {#table-engine-s3} + +这个引擎提供与[Amazon S3](https://aws.amazon.com/s3/)生态系统的集成。这个引擎类似于[HDFS](../../../engines/table-engines/integrations/hdfs.md)引擎,但提供了 S3 特有的功能。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE s3_engine_table (name String, value UInt32) +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) +``` + +**引擎参数** + +- `path` — 带有文件路径的 Bucket url。在只读模式下支持以下通配符: `*`, `?`, `{abc,def}` 和 `{N..M}` 其中 `N`, `M` 是数字, `'abc'`, `'def'` 是字符串. 更多信息见[下文](#wildcards-in-path). +- `format` — 文件的[格式](../../../interfaces/formats.md#formats). +- `aws_access_key_id`, `aws_secret_access_key` - [AWS](https://aws.amazon.com/) 账号的长期凭证. 你可以使用凭证来对你的请求进行认证.参数是可选的. 如果没有指定凭据, 将从配置文件中读取凭据. 更多信息参见 [使用 S3 来存储数据](../mergetree-family/mergetree.md#table_engine-mergetree-s3). +- `compression` — 压缩类型. 支持的值: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. 参数是可选的. 默认情况下,通过文件扩展名自动检测压缩类型. + +**示例** + +1. 创建 `s3_engine_table` 表: + +``` sql +CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); +``` + +2. 填充文件: + +``` sql +INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); +``` + +3. 查询数据: + +``` sql +SELECT * FROM s3_engine_table LIMIT 2; +``` + +```text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` +## 虚拟列 {#virtual-columns} + +- `_path` — 文件路径. +- `_file` — 文件名. + +有关虚拟列的更多信息,见 [这里](../../../engines/table-engines/index.md#table_engines-virtual_columns). + +## 实施细节 {#implementation-details} + +- 读取和写入可以是并行的 +- 以下是不支持的: + - `ALTER` 和 `SELECT...SAMPLE` 操作. + - 索引. + - 复制. + +## 路径中的通配符 {#wildcards-in-path} + +`path` 参数可以使用类 bash 的通配符来指定多个文件。对于正在处理的文件应该存在并匹配到整个路径模式。 文件列表的确定是在 `SELECT` 的时候进行(而不是在 `CREATE` 的时候)。 + +- `*` — 替代任何数量的任何字符,除了 `/` 以及空字符串。 +- `?` — 代替任何单个字符. +- `{some_string,another_string,yet_another_one}` — 替代 `'some_string', 'another_string', 'yet_another_one'`字符串. +- `{N..M}` — 替换 N 到 M 范围内的任何数字,包括两个边界的值. N 和 M 可以以 0 开头,比如 `000..078` + +带 `{}` 的结构类似于 [远程](../../../sql-reference/table-functions/remote.md) 表函数。 + +**示例** + +1. 假设我们在 S3 上有几个 CSV 格式的文件,URI如下: + +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’ + +有几种方法来创建由所有六个文件组成的数据表: + +第一种方式: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); +``` + +另一种方式: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); +``` + +表由两个目录中的所有文件组成(所有文件应满足查询中描述的格式和模式)。 + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); +``` + +如果文件列表中包含有从零开头的数字范围,请对每个数字分别使用带括号的结构,或者使用`?`。 + +**示例** + +使用文件`file-000.csv`, `file-001.csv`, … , `file-999.csv`来创建表: + +``` sql +CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); +``` + +## 虚拟列 {#virtual-columns} + +- `_path` — 文件路径. +- `_file` — 文件名. + +**另请参阅** + +- [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns) + +## S3 相关的设置 {#settings} + +以下设置可以在查询执行前设置,也可以放在配置文件中。 + +- `s3_max_single_part_upload_size` - 使用单文件上传至 S3 的对象的最大文件大小。默认值是`64Mb`。 +- `s3_min_upload_part_size` - 使用[S3多文件块上传](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)时,文件块的最小文件大小。默认值是`512Mb`。 +- `s3_max_redirects` - 允许的最大S3重定向跳数。默认值是`10`。 +- `s3_single_read_retries` - 单次读取时的最大尝试次数。默认值是`4`。 + +安全考虑:如果恶意用户可以指定任意的 S3 网址,`s3_max_redirects`参数必须设置为零,以避免[SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery)攻击;或者,必须在服务器配置中指定`remote_host_filter`。 + +## 基于 Endpoint 的设置 {#endpoint-settings} + +在配置文件中可以为给定的端点指定以下设置(将通过URL的准确前缀来匹配)。 + +- `endpoint` - 指定一个端点的前缀。必要参数。 +- `access_key_id`和`secret_access_key` - 用于指定端点的登陆凭据。可选参数。 +- `use_environment_credentials` - 如果设置为`true`,S3客户端将尝试从环境变量和[Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud)元数据中为指定的端点获取证书。可选参数,默认值是`false`。 +- `region` - 指定S3的区域名称。可选参数。 +- `use_insecure_imds_request` - 如果设置为`true`,S3客户端将使用不安全的 IMDS 请求,同时从Amazon EC2 元数据获取证书。可选参数,默认值是`false`。 +- `header` - 添加指定的HTTP头到给定端点的请求中。可选参数,可以使用多次此参数来添加多个值。 +- `server_side_encryption_customer_key_base64` - 如果指定,需要指定访问 SSE-C 加密的 S3 对象所需的头信息。可选参数。 +- `max_single_read_retries` - 单次读取时的最大尝试次数。默认值是`4`。可选参数。 + +**示例:** + +``` xml + + + https://storage.yandexcloud.net/my-test-bucket-768/ + + + + + + + + + + +``` + +## 用法 {#usage-examples} + +假设我们在 S3 上有几个 CSV 格式的文件,URI 如下: + +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv' + + +1. 有几种方式来制作由所有六个文件组成的表格,其中一种方式如下: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); +``` + +2. 另一种方式: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); +``` + +3. 表由两个目录中的所有文件组成(所有文件应满足查询中描述的格式和模式): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); +``` + +!!! warning "Warning" + 如果文件列表中包含有从0开头的数字范围,请对每个数字分别使用带括号的结构,或者使用`?`. + +4. 从文件`file-000.csv`, `file-001.csv`, … , `file-999.csv`创建表: + +``` sql +CREATE TABLE big_table (name String, value UInt32) +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); +``` + +## 另请参阅 + +- [S3 表函数](../../../sql-reference/table-functions/s3.md) From a1e3a3f2c11c6d389eb28ac3a73ddc33eb640b57 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 18 Jun 2021 17:21:53 +0300 Subject: [PATCH 203/931] update h3 --- contrib/h3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/h3 b/contrib/h3 index e209086ae1b..9cb6ff75836 160000 --- a/contrib/h3 +++ b/contrib/h3 @@ -1 +1 @@ -Subproject commit e209086ae1b5477307f545a0f6111780edc59940 +Subproject commit 9cb6ff758365b9cf4cb5d669b664d2d448a14373 From ccad32fe96150db81c4aa811a23b89668e857540 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Fri, 18 Jun 2021 21:20:53 +0300 Subject: [PATCH 204/931] Translate to Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Выполнил перевод на русский язык. --- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- .../external-dictionaries/external-dicts-dict-sources.md | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index dfea660bd0f..04903cea94c 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -525,7 +525,7 @@ Setting fields: - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. By default, false. ClickHouse connects to MySQL servers using secure protocols ([SSL](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl)/[TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security)). Occasionally errors might occur in the transport communication between client and server, in which case the server would abort the client connection. Note that retrying leads to increased response times. Retries can be avoided by setting the parameter to true. +- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. If `true`, then there will be an exception about connection loss straight away if there was no connection when the query was executed. If `false`, then there will be attempts to retry queries three times. By default, `false`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index a0378251ece..82455c62561 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -451,6 +451,7 @@ LIFETIME(MIN 300 MAX 360) table_name
id=10 SQL_QUERY + true ``` @@ -468,6 +469,7 @@ SOURCE(MYSQL( table 'table_name' where 'id=10' invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' )) ``` @@ -492,6 +494,8 @@ SOURCE(MYSQL( - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). +- `fail_on_connection_loss` – Параметр конфигурации, контролирующий неожиданную потерю соединения во время выполнения запроса. Если `true`, то сразу же возникнет исключение о том, что при выполнении запроса соединения не было. Если `false`, то будут попытки повторить запросы три раза. По умолчанию `false`. + MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. Пример настройки: @@ -507,6 +511,7 @@ MySQL можно подключить на локальном хосте чер table_name
id=10 SQL_QUERY + true ``` @@ -523,6 +528,7 @@ SOURCE(MYSQL( table 'table_name' where 'id=10' invalidate_query 'SQL_QUERY' + fail_on_connection_loss 'true' )) ``` From b5dae909dde10b24618176c296428ea6045b98ae Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 18 Jun 2021 21:36:19 +0300 Subject: [PATCH 205/931] Add some tests --- src/Coordination/KeeperSnapshotManager.cpp | 6 + src/Coordination/KeeperStorage.cpp | 18 +- src/Coordination/ZooKeeperDataReader.cpp | 12 +- .../test_keeper_zookeeper_converter/test.py | 174 +++++++++++++++++- 4 files changed, 196 insertions(+), 14 deletions(-) diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index 42bc810f28e..40c898efdb5 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -99,6 +99,10 @@ namespace node.acl_id = acl_map.convertACLs(acls); } + /// Some strange ACLID during deserialization from ZooKeeper + if (node.acl_id == std::numeric_limits::max()) + node.acl_id = 0; + acl_map.addUsage(node.acl_id); readBinary(node.is_sequental, in); @@ -217,12 +221,14 @@ SnapshotMetadataPtr KeeperStorageSnapshot::deserialize(KeeperStorage & storage, if (current_version >= SnapshotVersion::V1) { size_t acls_map_size; + readBinary(acls_map_size, in); size_t current_map_size = 0; while (current_map_size < acls_map_size) { uint64_t acl_id; readBinary(acl_id, in); + size_t acls_size; readBinary(acls_size, in); Coordination::ACLs acls; diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 3ae29edb77a..5418afb2501 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -263,6 +263,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest } else { + auto & session_auth_ids = storage.session_and_auth[session_id]; KeeperStorage::Node created_node; @@ -280,6 +281,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest created_node.acl_id = acl_id; created_node.stat.czxid = zxid; created_node.stat.mzxid = zxid; + created_node.stat.pzxid = zxid; created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1); created_node.stat.mtime = created_node.stat.ctime; created_node.stat.numChildren = 0; @@ -302,12 +304,15 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest } auto child_path = getBaseName(path_created); - container.updateValue(parent_path, [child_path] (KeeperStorage::Node & parent) + int64_t prev_parent_zxid; + container.updateValue(parent_path, [child_path, zxid, &prev_parent_zxid] (KeeperStorage::Node & parent) { /// Increment sequential number even if node is not sequential ++parent.seq_num; parent.children.insert(child_path); ++parent.stat.cversion; + prev_parent_zxid = parent.stat.pzxid; + parent.stat.pzxid = zxid; ++parent.stat.numChildren; }); @@ -317,7 +322,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest if (request.is_ephemeral) ephemerals[session_id].emplace(path_created); - undo = [&storage, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id] + undo = [&storage, prev_parent_zxid, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id] { storage.container.erase(path_created); storage.acl_map.removeUsage(acl_id); @@ -325,11 +330,12 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest if (is_ephemeral) storage.ephemerals[session_id].erase(path_created); - storage.container.updateValue(parent_path, [child_path] (KeeperStorage::Node & undo_parent) + storage.container.updateValue(parent_path, [child_path, prev_parent_zxid] (KeeperStorage::Node & undo_parent) { --undo_parent.stat.cversion; --undo_parent.stat.numChildren; --undo_parent.seq_num; + undo_parent.stat.pzxid = prev_parent_zxid; undo_parent.children.erase(child_path); }); }; @@ -536,6 +542,7 @@ struct KeeperStorageSetRequest final : public KeeperStorageRequest } else if (request.version == -1 || request.version == it->value.stat.version) { + auto prev_node = it->value; auto itr = container.updateValue(request.path, [zxid, request] (KeeperStorage::Node & value) @@ -901,10 +908,15 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina KeeperStorage::ResponsesForSessions results; if (new_last_zxid) { + LOG_INFO(&Poco::Logger::get("DEBUG"), "GOT ZXID {}", *new_last_zxid); if (zxid >= *new_last_zxid) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got new ZXID {} smaller or equal than current {}. It's a bug", *new_last_zxid, zxid); zxid = *new_last_zxid; } + else + { + LOG_INFO(&Poco::Logger::get("DEBUG"), "NO ZXID PROVIDED"); + } session_expiry_queue.update(session_id, session_and_timeout[session_id]); if (zk_request->getOpNum() == Coordination::OpNum::Close) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index e0d0fbc85b6..4a324abe93d 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -308,6 +308,9 @@ Coordination::ZooKeeperRequestPtr deserializeSetTxn(ReadBuffer & in) Coordination::read(result->path, in); Coordination::read(result->data, in); Coordination::read(result->version, in); + /// It stores version + 1 (which should be, not for request) + result->version -= 1; + return result; } @@ -405,8 +408,7 @@ Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in) while (length > 0) { auto subrequest = deserializeTxnImpl(in, true); - if (subrequest) - result->requests.push_back(subrequest); + result->requests.push_back(subrequest); length--; } return result; @@ -420,14 +422,14 @@ bool isErrorRequest(Coordination::ZooKeeperRequestPtr request) bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) { for (const auto & subrequest : dynamic_cast(request.get())->requests) - if (dynamic_cast(subrequest.get())->getOpNum() == Coordination::OpNum::Error) + if (subrequest == nullptr) return true; return false; } } -bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in) +bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * log) { int64_t checksum; Coordination::read(checksum, in); @@ -490,7 +492,7 @@ void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string LOG_INFO(log, "Header looks OK"); size_t counter = 0; - while (!reader.eof() && deserializeTxn(storage, reader)) + while (!reader.eof() && deserializeTxn(storage, reader, log)) { counter++; if (counter % 1000 == 0) diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index 97c16b09cd9..5c6ed90eb35 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -1,11 +1,10 @@ -#!/usr/bin/env python3 - #!/usr/bin/env python3 import pytest from helpers.cluster import ClickHouseCluster from kazoo.client import KazooClient, KazooState from kazoo.security import ACL, make_digest_acl, make_acl from kazoo.exceptions import AuthFailedError, InvalidACLError, NoAuthError, KazooException +import os cluster = ClickHouseCluster(__file__) @@ -17,6 +16,14 @@ def start_zookeeper(): def stop_zookeeper(): node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh stop']) +def clear_zookeeper(): + node.exec_in_container(['bash', '-c', 'rm -fr /zookeeper/*']) + +def restart_and_clear_zookeeper(): + stop_zookeeper() + clear_zookeeper() + start_zookeeper() + def clear_clickhouse_data(): node.exec_in_container(['bash', '-c', 'rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*']) @@ -59,10 +66,39 @@ def get_genuine_zk(timeout=30.0): _genuine_zk_instance.start() return _genuine_zk_instance -def compare_states(zk1, zk2): +def compare_stats(stat1, stat2, path): + assert stat1.czxid == stat2.czxid, "path " + path + " cxzids not equal for stats: " + str(stat1.czxid) + " != " + str(stat2.zxid) + assert stat1.mzxid == stat2.mzxid, "path " + path + " mxzids not equal for stats: " + str(stat1.mzxid) + " != " + str(stat2.mzxid) + assert stat1.version == stat2.version, "path " + path + " versions not equal for stats: " + str(stat1.version) + " != " + str(stat2.version) + assert stat1.cversion == stat2.cversion, "path " + path + " cversions not equal for stats: " + str(stat1.cversion) + " != " + str(stat2.cversion) + assert stat1.aversion == stat2.aversion, "path " + path + " aversions not equal for stats: " + str(stat1.aversion) + " != " + str(stat2.aversion) + assert stat1.ephemeralOwner == stat2.ephemeralOwner,"path " + path + " ephemeralOwners not equal for stats: " + str(stat1.ephemeralOwner) + " != " + str(stat2.ephemeralOwner) + assert stat1.dataLength == stat2.dataLength , "path " + path + " ephemeralOwners not equal for stats: " + str(stat1.dataLength) + " != " + str(stat2.dataLength) + assert stat1.numChildren == stat2.numChildren, "path " + path + " numChildren not equal for stats: " + str(stat1.numChildren) + " != " + str(stat2.numChildren) + assert stat1.pzxid == stat2.pzxid, "path " + path + " pzxid not equal for stats: " + str(stat1.pzxid) + " != " + str(stat2.pzxid) + +def compare_states(zk1, zk2, path="/"): + data1, stat1 = zk1.get(path) + data2, stat2 = zk2.get(path) + print("Left Stat", stat1) + print("Right Stat", stat2) + assert data1 == data2, "Data not equal on path " + str(path) + # both paths have strange stats + if path not in ("/", "/zookeeper"): + compare_stats(stat1, stat2, path) + + first_children = list(sorted(zk1.get_children(path))) + second_children = list(sorted(zk2.get_children(path))) + print("Got children left", first_children) + print("Got children rigth", second_children) + assert first_children == second_children, "Childrens are not equal on path " + path + + for children in first_children: + print("Checking child", os.path.join(path, children)) + compare_states(zk1, zk2, os.path.join(path, children)) def test_smoke(started_cluster): - start_zookeeper() + restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() genuine_connection.create("/test", b"data") @@ -71,6 +107,132 @@ def test_smoke(started_cluster): copy_zookeeper_data() + genuine_connection = get_genuine_zk() fake_connection = get_fake_zk() - assert fake_connection.get("/test")[0] == b"data" - assert genuine_connection.get("/test")[0] == b"data" + + compare_states(genuine_connection, fake_connection) + +def get_bytes(s): + return s.encode() + +def test_simple_crud_requests(started_cluster): + restart_and_clear_zookeeper() + + genuine_connection = get_genuine_zk() + for i in range(100): + genuine_connection.create("/test_create" + str(i), get_bytes("data" + str(i))) + + # some set queries + for i in range(10): + for j in range(i + 1): + genuine_connection.set("/test_create" + str(i), get_bytes("value" + str(j))) + + for i in range(10, 20): + genuine_connection.delete("/test_create" + str(i)) + + path = "/test_create_deep" + for i in range(10): + genuine_connection.create(path, get_bytes("data" + str(i))) + path = os.path.join(path, str(i)) + + + genuine_connection.create("/test_sequential", b"") + for i in range(10): + genuine_connection.create("/test_sequential/" + "a" * i + "-", get_bytes("dataX" + str(i)), sequence=True) + + genuine_connection.create("/test_ephemeral", b"") + for i in range(10): + genuine_connection.create("/test_ephemeral/" + str(i), get_bytes("dataX" + str(i)), ephemeral=True) + + copy_zookeeper_data() + + genuine_connection = get_genuine_zk() + fake_connection = get_fake_zk() + + compare_states(genuine_connection, fake_connection) + + # especially ensure that counters are the same + genuine_connection.create("/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True) + fake_connection.create("/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True) + + first_children = list(sorted(genuine_connection.get_children("/test_sequential"))) + second_children = list(sorted(fake_connection.get_children("/test_sequential"))) + assert first_children == second_children, "Childrens are not equal on path " + path + + +def test_multi_and_failed_requests(started_cluster): + restart_and_clear_zookeeper() + + genuine_connection = get_genuine_zk() + genuine_connection.create('/test_multitransactions') + for i in range(10): + t = genuine_connection.transaction() + t.create('/test_multitransactions/freddy' + str(i), get_bytes('data' + str(i))) + t.create('/test_multitransactions/fred' + str(i), get_bytes('value' + str(i)), ephemeral=True) + t.create('/test_multitransactions/smith' + str(i), get_bytes('entity' + str(i)), sequence=True) + t.set_data('/test_multitransactions', get_bytes("somedata" + str(i))) + t.commit() + + with pytest.raises(Exception): + genuine_connection.set('/test_multitransactions/freddy0', get_bytes('mustfail' + str(i)), version=1) + + t = genuine_connection.transaction() + + t.create('/test_bad_transaction', get_bytes('data' + str(1))) + t.check('/test_multitransactions', version=32) + t.create('/test_bad_transaction1', get_bytes('data' + str(2))) + # should fail + t.commit() + + assert genuine_connection.exists('/test_bad_transaction') is None + assert genuine_connection.exists('/test_bad_transaction1') is None + + t = genuine_connection.transaction() + t.create('/test_bad_transaction2', get_bytes('data' + str(1))) + t.delete('/test_multitransactions/freddy0', version=5) + + # should fail + t.commit() + assert genuine_connection.exists('/test_bad_transaction2') is None + assert genuine_connection.exists('/test_multitransactions/freddy0') is not None + + copy_zookeeper_data() + + genuine_connection = get_genuine_zk() + fake_connection = get_fake_zk() + + compare_states(genuine_connection, fake_connection) + + +#def test_acls(started_cluster): +# restart_and_clear_zookeeper() +# genuine_connection = get_genuine_zk() +# genuine_connection.add_auth('digest', 'user1:password1') +# genuine_connection.add_auth('digest', 'user2:password2') +# genuine_connection.add_auth('digest', 'user3:password3') +# +# genuine_connection.create("/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) +# +# other_connection = get_genuine_zk() +# other_connection.add_auth('digest', 'user1:password1') +# other_connection.set("/test_multi_all_acl", b"X") +# assert other_connection.get("/test_multi_all_acl")[0] == b"X" +# +# yet_other_auth_connection = get_genuine_zk() +# yet_other_auth_connection.add_auth('digest', 'user2:password2') +# +# yet_other_auth_connection.set("/test_multi_all_acl", b"Y") +# +# copy_zookeeper_data() +# +# genuine_connection = get_genuine_zk() +# genuine_connection.add_auth('digest', 'user1:password1') +# genuine_connection.add_auth('digest', 'user2:password2') +# genuine_connection.add_auth('digest', 'user3:password3') +# +# fake_connection = get_fake_zk() +# fake_connection.add_auth('digest', 'user1:password1') +# fake_connection.add_auth('digest', 'user2:password2') +# fake_connection.add_auth('digest', 'user3:password3') +# +# compare_states(genuine_connection, fake_connection) From 1510a8a01301481abda7c09d6ebcffa4138b9379 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Sun, 20 Jun 2021 01:24:09 +0800 Subject: [PATCH 206/931] Create zh translation for rabbitmq.md --- .../table-engines/integrations/rabbitmq.md | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 docs/zh/engines/table-engines/integrations/rabbitmq.md diff --git a/docs/zh/engines/table-engines/integrations/rabbitmq.md b/docs/zh/engines/table-engines/integrations/rabbitmq.md new file mode 100644 index 00000000000..c43218da14f --- /dev/null +++ b/docs/zh/engines/table-engines/integrations/rabbitmq.md @@ -0,0 +1,167 @@ +--- +toc_priority: 10 +toc_title: RabbitMQ +--- + +# RabbitMQ 引擎 {#rabbitmq-engine} + +该引擎允许 ClickHouse 与 [RabbitMQ](https://www.rabbitmq.com) 进行集成. + +`RabbitMQ` 可以让你: + +- 发布或订阅数据流。 +- 在数据流可用时进行处理。 + +## 创建一张表 {#table_engine-rabbitmq-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = RabbitMQ SETTINGS + rabbitmq_host_port = 'host:port', + rabbitmq_exchange_name = 'exchange_name', + rabbitmq_format = 'data_format'[,] + [rabbitmq_exchange_type = 'exchange_type',] + [rabbitmq_routing_key_list = 'key1,key2,...',] + [rabbitmq_row_delimiter = 'delimiter_symbol',] + [rabbitmq_schema = '',] + [rabbitmq_num_consumers = N,] + [rabbitmq_num_queues = N,] + [rabbitmq_queue_base = 'queue',] + [rabbitmq_deadletter_exchange = 'dl-exchange',] + [rabbitmq_persistent = 0,] + [rabbitmq_skip_broken_messages = N,] + [rabbitmq_max_block_size = N,] + [rabbitmq_flush_interval_ms = N] +``` + +必要参数: + +- `rabbitmq_host_port` – 主机名:端口号 (比如, `localhost:5672`). +- `rabbitmq_exchange_name` – RabbitMQ exchange 名称. +- `rabbitmq_format` – 消息格式. 使用与SQL`FORMAT`函数相同的标记,如`JSONEachRow`。 更多信息,请参阅 [Formats](../../../interfaces/formats.md) 部分. + +可选参数: + +- `rabbitmq_exchange_type` – RabbitMQ exchange 的类型: `direct`, `fanout`, `topic`, `headers`, `consistent_hash`. 默认是: `fanout`. +- `rabbitmq_routing_key_list` – 一个以逗号分隔的路由键列表. +- `rabbitmq_row_delimiter` – 用于消息结束的分隔符. +- `rabbitmq_schema` – 如果格式需要模式定义,必须使用该参数。比如, [Cap’n Proto](https://capnproto.org/) 需要模式文件的路径以及根 `schema.capnp:Message` 对象的名称. +- `rabbitmq_num_consumers` – 每个表的消费者数量。默认:`1`。如果一个消费者的吞吐量不够,可以指定更多的消费者. +- `rabbitmq_num_queues` – 队列的总数。默认值: `1`. 增加这个数字可以显著提高性能. +- `rabbitmq_queue_base` - 指定一个队列名称的提示。这个设置的使用情况如下. +- `rabbitmq_deadletter_exchange` - 为[dead letter exchange](https://www.rabbitmq.com/dlx.html)指定名称。你可以用这个 exchange 的名称创建另一个表,并在消息被重新发布到 dead letter exchange 的情况下收集它们。默认情况下,没有指定 dead letter exchange。Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). +- `rabbitmq_persistent` - 如果设置为 1 (true), 在插入查询中交付模式将被设置为 2 (将消息标记为 'persistent'). 默认是: `0`. +- `rabbitmq_skip_broken_messages` – RabbitMQ 消息解析器对每块模式不兼容消息的容忍度。默认值:`0`. 如果 `rabbitmq_skip_broken_messages = N`,那么引擎将跳过 *N* 个无法解析的 RabbitMQ 消息(一条消息等于一行数据)。 +- `rabbitmq_max_block_size` +- `rabbitmq_flush_interval_ms` + +同时,格式的设置也可以与 rabbitmq 相关的设置一起添加。 + +示例: + +``` sql + CREATE TABLE queue ( + key UInt64, + value UInt64, + date DateTime + ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'localhost:5672', + rabbitmq_exchange_name = 'exchange1', + rabbitmq_format = 'JSONEachRow', + rabbitmq_num_consumers = 5, + date_time_input_format = 'best_effort'; +``` + +RabbitMQ 服务器配置应使用 ClickHouse 配置文件添加。 + +必要配置: + +``` xml + + root + clickhouse + +``` + +可选配置: + +``` xml + + clickhouse + +``` + +## 描述 {#description} + +`SELECT`对于读取消息不是特别有用(除了调试),因为每个消息只能读取一次。使用[物化视图](../../../sql-reference/statements/create/view.md)创建实时线程更为实用。要做到这一点: + +1. 使用引擎创建一个 RabbitMQ 消费者,并将其视为一个数据流。 +2. 创建一个具有所需结构的表。 +3. 创建一个物化视图,转换来自引擎的数据并将其放入先前创建的表中。 + +当`物化视图`加入引擎时,它开始在后台收集数据。这允许您持续接收来自 RabbitMQ 的消息,并使用 `SELECT` 将它们转换为所需格式。 +一个 RabbitMQ 表可以有多个你需要的物化视图。 + +数据可以根据`rabbitmq_exchange_type`和指定的`rabbitmq_routing_key_list`进行通道。 +每个表不能有多于一个 exchange。一个 exchange 可以在多个表之间共享 - 因为可以使用路由让数据同时进入多个表。 + +Exchange 类型的选项: + +- `direct` - 路由是基于精确匹配的键。例如表的键列表: `key1,key2,key3,key4,key5`, 消息键可以是等同他们中的任意一个. +- `fanout` - 路由到所有的表 (exchange 名称相同的情况) 无论是什么键都是这样. +- `topic` - 路由是基于带有点分隔键的模式. 比如: `*.logs`, `records.*.*.2020`, `*.2018,*.2019,*.2020`. +- `headers` - 路由是基于`key=value`的匹配,设置为`x-match=all`或`x-match=any`. 例如表的键列表: `x-match=all,format=logs,type=report,year=2020`. +- `consistent_hash` - 数据在所有绑定的表之间均匀分布 (exchange 名称相同的情况). 请注意,这种 exchange 类型必须启用 RabbitMQ 插件: `rabbitmq-plugins enable rabbitmq_consistent_hash_exchange`. + +设置`rabbitmq_queue_base`可用于以下情况: + +- 来让不同的表共享队列, 这样就可以为同一个队列注册多个消费者,这使得性能更好。如果使用`rabbitmq_num_consumers`和/或`rabbitmq_num_queues`设置,在这些参数相同的情况下,实现队列的精确匹配。 +- 以便在不是所有消息都被成功消费时,能够恢复从某些持久队列的阅读。要从一个特定的队列恢复消耗 - 在`rabbitmq_queue_base`设置中设置其名称,不要指定`rabbitmq_num_consumers`和`rabbitmq_num_queues`(默认为1)。要恢复所有队列的消费,这些队列是为一个特定的表所声明的 - 只要指定相同的设置。`rabbitmq_queue_base`, `rabbitmq_num_consumers`, `rabbitmq_num_queues`。默认情况下,队列名称对表来说是唯一的。 +- 以重复使用队列,因为它们被声明为持久的,并且不会自动删除。可以通过任何 RabbitMQ CLI 工具删除) + +为了提高性能,收到的消息被分组为大小为 [max_insert_block_size](../../../operations/server-configuration-parameters/settings.md#settings-max_insert_block_size) 的块。如果在[stream_flush_interval_ms](../../../operations/server-configuration-parameters/settings.md)毫秒内没有形成数据块,无论数据块是否完整,数据都会被刷到表中。 + +如果`rabbitmq_num_consumers`和/或`rabbitmq_num_queues`设置与`rabbitmq_exchange_type`一起被指定,那么: + +- 必须启用`rabbitmq-consistent-hash-exchange` 插件. +- 必须指定已发布信息的 `message_id`属性(对于每个信息/批次都是唯一的)。 + +对于插入查询时有消息元数据,消息元数据被添加到每个发布的消息中:`messageID`和`republished`标志(如果值为true,则表示消息发布不止一次) - 可以通过消息头访问。 + +不要在插入和物化视图中使用同一个表。 + +示例: + +``` sql + CREATE TABLE queue ( + key UInt64, + value UInt64 + ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'localhost:5672', + rabbitmq_exchange_name = 'exchange1', + rabbitmq_exchange_type = 'headers', + rabbitmq_routing_key_list = 'format=logs,type=report,year=2020', + rabbitmq_format = 'JSONEachRow', + rabbitmq_num_consumers = 5; + + CREATE TABLE daily (key UInt64, value UInt64) + ENGINE = MergeTree() ORDER BY key; + + CREATE MATERIALIZED VIEW consumer TO daily + AS SELECT key, value FROM queue; + + SELECT key, value FROM daily ORDER BY key; +``` + +## 虚拟列 {#virtual-columns} + +- `_exchange_name` - RabbitMQ exchange 名称. +- `_channel_id` - 接收消息的消费者所声明的频道ID. +- `_delivery_tag` - 收到消息的DeliveryTag. 以每个频道为范围. +- `_redelivered` - 消息的`redelivered`标志. +- `_message_id` - 收到的消息的ID;如果在消息发布时被设置,则为非空. +- `_timestamp` - 收到的消息的时间戳;如果在消息发布时被设置,则为非空. + +[原始文章](https://clickhouse.tech/docs/en/engines/table-engines/integrations/rabbitmq/) From c56147c98ef42b25c4206e8ef3af14d857b1a2c3 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Sun, 20 Jun 2021 01:35:40 +0800 Subject: [PATCH 207/931] Create zh translation for s3.md --- docs/zh/sql-reference/table-functions/s3.md | 132 ++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 docs/zh/sql-reference/table-functions/s3.md diff --git a/docs/zh/sql-reference/table-functions/s3.md b/docs/zh/sql-reference/table-functions/s3.md new file mode 100644 index 00000000000..0a446dbc460 --- /dev/null +++ b/docs/zh/sql-reference/table-functions/s3.md @@ -0,0 +1,132 @@ +--- +toc_priority: 45 +toc_title: s3 +--- + +# S3 表函数 {#s3-table-function} + +提供类似于表的接口来 select/insert [Amazon S3](https://aws.amazon.com/s3/)中的文件。这个表函数类似于[hdfs](../../sql-reference/table-functions/hdfs.md),但提供了 S3 特有的功能。 + +**语法** + +``` sql +s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +``` + +**参数** + +- `path` — 带有文件路径的 Bucket url。在只读模式下支持以下通配符: `*`, `?`, `{abc,def}` 和 `{N..M}` 其中 `N`, `M` 是数字, `'abc'`, `'def'` 是字符串. 更多信息见[下文](#wildcards-in-path). +- `format` — 文件的[格式](../../../interfaces/formats.md#formats). +- `structure` — 表的结构. 格式像这样 `'column1_name column1_type, column2_name column2_type, ...'`. +- `compression` — 压缩类型. 支持的值: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. 参数是可选的. 默认情况下,通过文件扩展名自动检测压缩类型. + +**返回值** + +一个具有指定结构的表,用于读取或写入指定文件中的数据。 + +**示例** + +从 S3 文件`https://storage.yandexcloud.net/my-test-bucket-768/data.csv`中选择表格的前两行: + +``` sql +SELECT * +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/data.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2; +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +类似的情况,但来源是`gzip`压缩的文件: + +``` sql +SELECT * +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/data.csv.gz', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32', 'gzip') +LIMIT 2; +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +## 用法 {#usage-examples} + +假设我们在S3上有几个文件,URI如下: + +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_4.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv' +- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_4.csv' + +计算以数字1至3结尾的文件的总行数: + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}.csv', 'CSV', 'name String, value UInt32') +``` + +``` text +┌─count()─┐ +│ 18 │ +└─────────┘ +``` + +计算这两个目录中所有文件的行的总量: + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV', 'name String, value UInt32') +``` + +``` text +┌─count()─┐ +│ 24 │ +└─────────┘ +``` + +!!! warning "Warning" + 如果文件列表中包含有从零开头的数字范围,请对每个数字分别使用带括号的结构,或者使用`?`。 + +计算名为 `file-000.csv`, `file-001.csv`, … , `file-999.csv` 文件的总行数: + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV', 'name String, value UInt32'); +``` + +``` text +┌─count()─┐ +│ 12 │ +└─────────┘ +``` + +插入数据到 `test-data.csv.gz` 文件: + +``` sql +INSERT INTO FUNCTION s3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +VALUES ('test-data', 1), ('test-data-2', 2); +``` + +从已有的表插入数据到 `test-data.csv.gz` 文件: + +``` sql +INSERT INTO FUNCTION s3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +SELECT name, value FROM existing_table; +``` + +**另请参阅** + +- [S3 引擎](../../engines/table-engines/integrations/s3.md) + +[原始文章](https://clickhouse.tech/docs/en/sql-reference/table-functions/s3/) From 89e7857012c4c4703dd343fafc97a81dc10b043f Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 20 Jun 2021 01:25:11 +0300 Subject: [PATCH 208/931] Create s3Cluster.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Сделал грязный черновик описания табличной функции s3Cluster. Буду корректировать примеры, которые не работают. --- .../table-functions/s3Cluster.md | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 docs/en/sql-reference/table-functions/s3Cluster.md diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md new file mode 100644 index 00000000000..b49da53f01a --- /dev/null +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -0,0 +1,47 @@ +--- +toc_priority: 55 +toc_title: s3Cluster +--- + +# S3Cluster Table Function {#s3Cluster-table-function} + +Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator, it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatch each file dynamically. On the worker node, it asks the initiator about the next task to process, processes it. This is repeated until the tasks are finished. + +**Syntax** + +``` sql +s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure) +``` + +**Arguments** + +- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. +- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Returned value** + +A table with the specified structure for reading or writing data in the specified file. + +**Examples** + +Selecting the data from the cluster `cluster_simple` using source `http://minio1:9001/root/data/{clickhouse,database}/*`: + +``` sql +SELECT * from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); +``` + +Count the total amount of rows in all files of the cluster `cluster_simple`: + +``` sql +SELECT count(*) from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); +``` + +!!! warning "Warning" + If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**See Also** + +- [S3 engine](../../engines/table-engines/integrations/s3.md) +- [S3 table function](../../sql-reference/table-functions/s3.md) From 7e0e18260892aaf2779f4ef0c4f451026d77d296 Mon Sep 17 00:00:00 2001 From: meoww-bot <14239840+meoww-bot@users.noreply.github.com> Date: Sun, 20 Jun 2021 18:13:16 +0800 Subject: [PATCH 209/931] fix wrong link --- docs/zh/sql-reference/table-functions/s3.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/sql-reference/table-functions/s3.md b/docs/zh/sql-reference/table-functions/s3.md index 0a446dbc460..c55412f4ddd 100644 --- a/docs/zh/sql-reference/table-functions/s3.md +++ b/docs/zh/sql-reference/table-functions/s3.md @@ -16,7 +16,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres **参数** - `path` — 带有文件路径的 Bucket url。在只读模式下支持以下通配符: `*`, `?`, `{abc,def}` 和 `{N..M}` 其中 `N`, `M` 是数字, `'abc'`, `'def'` 是字符串. 更多信息见[下文](#wildcards-in-path). -- `format` — 文件的[格式](../../../interfaces/formats.md#formats). +- `format` — 文件的[格式](../../interfaces/formats.md#formats). - `structure` — 表的结构. 格式像这样 `'column1_name column1_type, column2_name column2_type, ...'`. - `compression` — 压缩类型. 支持的值: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. 参数是可选的. 默认情况下,通过文件扩展名自动检测压缩类型. From e409ead64ad093df06619a383f85be5dc37fb3a5 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Sun, 20 Jun 2021 23:18:05 +0300 Subject: [PATCH 210/931] fix tests --- src/Core/Settings.h | 2 +- tests/queries/0_stateless/01447_json_strings.reference | 4 ++-- .../queries/0_stateless/01449_json_compact_strings.reference | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index e5122720380..9b4999f4e1a 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -464,7 +464,7 @@ class IColumn; M(UnionMode, union_default_mode, UnionMode::Unspecified, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \ M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \ M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \ - M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibity reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \ + M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \ \ M(Bool, query_plan_enable_optimizations, true, "Apply optimizations to query plan", 0) \ M(UInt64, query_plan_max_optimizations_to_apply, 10000, "Limit the total number of optimizations applied to query plan. If zero, ignored. If limit reached, throw exception", 0) \ diff --git a/tests/queries/0_stateless/01447_json_strings.reference b/tests/queries/0_stateless/01447_json_strings.reference index ab88e2f3696..7892cb82922 100644 --- a/tests/queries/0_stateless/01447_json_strings.reference +++ b/tests/queries/0_stateless/01447_json_strings.reference @@ -14,7 +14,7 @@ "type": "Array(UInt8)" }, { - "name": "tuple(1, 'a')", + "name": "(1, 'a')", "type": "Tuple(UInt8, String)" }, { @@ -33,7 +33,7 @@ "1": "1", "'a'": "a", "[1, 2, 3]": "[1,2,3]", - "tuple(1, 'a')": "(1,'a')", + "(1, 'a')": "(1,'a')", "NULL": "ᴺᵁᴸᴸ", "nan": "nan" } diff --git a/tests/queries/0_stateless/01449_json_compact_strings.reference b/tests/queries/0_stateless/01449_json_compact_strings.reference index 1c6f073c0d0..53dba71d6ff 100644 --- a/tests/queries/0_stateless/01449_json_compact_strings.reference +++ b/tests/queries/0_stateless/01449_json_compact_strings.reference @@ -14,7 +14,7 @@ "type": "Array(UInt8)" }, { - "name": "tuple(1, 'a')", + "name": "(1, 'a')", "type": "Tuple(UInt8, String)" }, { From 556ec9e533e9eb547944a9458e67f338ffccc1ab Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Mon, 21 Jun 2021 12:06:28 +0300 Subject: [PATCH 211/931] Fix segfault in arrow and support Decimal256 --- .../Formats/Impl/ArrowColumnToCHColumn.cpp | 22 +++++++++---- .../Formats/Impl/CHColumnToArrowColumn.cpp | 30 +++++++++++++----- .../queries/0_stateless/01273_arrow.reference | 2 ++ tests/queries/0_stateless/01273_arrow.sh | 7 ++++ .../0_stateless/arrow_all_types_1.arrow | Bin 0 -> 4490 bytes .../0_stateless/arrow_all_types_2.arrow | Bin 0 -> 2714 bytes .../0_stateless/arrow_all_types_5.arrow | Bin 0 -> 2658 bytes 7 files changed, 47 insertions(+), 14 deletions(-) create mode 100644 tests/queries/0_stateless/arrow_all_types_1.arrow create mode 100644 tests/queries/0_stateless/arrow_all_types_2.arrow create mode 100644 tests/queries/0_stateless/arrow_all_types_5.arrow diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index edf131cd49e..16f27058121 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -225,18 +225,19 @@ namespace DB } } + template static void fillColumnWithDecimalData(std::shared_ptr & arrow_column, IColumn & internal_column) { - auto & column = assert_cast &>(internal_column); + auto & column = assert_cast &>(internal_column); auto & column_data = column.getData(); column_data.reserve(arrow_column->length()); for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) { - auto & chunk = static_cast(*(arrow_column->chunk(chunk_i))); + auto & chunk = static_cast(*(arrow_column->chunk(chunk_i))); for (size_t value_i = 0, length = static_cast(chunk.length()); value_i < length; ++value_i) { - column_data.emplace_back(chunk.IsNull(value_i) ? Decimal128(0) : *reinterpret_cast(chunk.Value(value_i))); // TODO: copy column + column_data.emplace_back(chunk.IsNull(value_i) ? DecimalType(0) : *reinterpret_cast(chunk.Value(value_i))); // TODO: copy column } } } @@ -335,8 +336,11 @@ namespace DB case arrow::Type::TIMESTAMP: fillColumnWithTimestampData(arrow_column, internal_column); break; - case arrow::Type::DECIMAL: - fillColumnWithDecimalData(arrow_column, internal_column /*, internal_nested_type*/); + case arrow::Type::DECIMAL128: + fillColumnWithDecimalData(arrow_column, internal_column /*, internal_nested_type*/); + break; + case arrow::Type::DECIMAL256: + fillColumnWithDecimalData(arrow_column, internal_column /*, internal_nested_type*/); break; case arrow::Type::MAP: [[fallthrough]]; case arrow::Type::LIST: @@ -442,12 +446,18 @@ namespace DB return makeNullable(getInternalType(arrow_type, nested_type, column_name, format_name)); } - if (arrow_type->id() == arrow::Type::DECIMAL) + if (arrow_type->id() == arrow::Type::DECIMAL128) { const auto * decimal_type = static_cast(arrow_type.get()); return std::make_shared>(decimal_type->precision(), decimal_type->scale()); } + if (arrow_type->id() == arrow::Type::DECIMAL256) + { + const auto * decimal_type = static_cast(arrow_type.get()); + return std::make_shared>(decimal_type->precision(), decimal_type->scale()); + } + if (arrow_type->id() == arrow::Type::LIST) { const auto * list_type = static_cast(arrow_type.get()); diff --git a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp index cc487535e37..230b28c657e 100644 --- a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp +++ b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp @@ -421,11 +421,20 @@ namespace DB || std::is_same_v> || std::is_same_v>) { - fillArrowArrayWithDecimalColumnData(column, null_bytemap, array_builder, format_name, start, end); + fillArrowArrayWithDecimalColumnData(column, null_bytemap, array_builder, format_name, start, end); + return true; } + if constexpr (std::is_same_v>) + { + fillArrowArrayWithDecimalColumnData(column, null_bytemap, array_builder, format_name, start, end); + return true; + } + return false; }; - callOnIndexAndDataType(column_type->getTypeId(), fill_decimal); + + if (!callOnIndexAndDataType(column_type->getTypeId(), fill_decimal)) + throw Exception{ErrorCodes::LOGICAL_ERROR, "Cannot fill arrow array with decimal data with type {}", column_type_name}; } #define DISPATCH(CPP_NUMERIC_TYPE, ARROW_BUILDER_TYPE) \ else if (#CPP_NUMERIC_TYPE == column_type_name) \ @@ -445,7 +454,7 @@ namespace DB } } - template + template static void fillArrowArrayWithDecimalColumnData( ColumnPtr write_column, const PaddedPODArray * null_bytemap, @@ -455,7 +464,7 @@ namespace DB size_t end) { const auto & column = assert_cast(*write_column); - arrow::DecimalBuilder & builder = assert_cast(*array_builder); + ArrowBuilder & builder = assert_cast(*array_builder); arrow::Status status; for (size_t value_i = start; value_i < end; ++value_i) @@ -463,8 +472,10 @@ namespace DB if (null_bytemap && (*null_bytemap)[value_i]) status = builder.AppendNull(); else - status = builder.Append( - arrow::Decimal128(reinterpret_cast(&column.getElement(value_i).value))); // TODO: try copy column + { + FieldType element = FieldType(column.getElement(value_i).value); + status = builder.Append(ArrowDecimalType(reinterpret_cast(&element))); // TODO: try copy column + } checkStatus(status, write_column->getName(), format_name); } @@ -512,15 +523,18 @@ namespace DB if constexpr ( std::is_same_v> || std::is_same_v> - || std::is_same_v>) + || std::is_same_v> + || std::is_same_v>) { const auto & decimal_type = assert_cast(column_type.get()); arrow_type = arrow::decimal(decimal_type->getPrecision(), decimal_type->getScale()); + return true; } return false; }; - callOnIndexAndDataType(column_type->getTypeId(), create_arrow_type); + if (!callOnIndexAndDataType(column_type->getTypeId(), create_arrow_type)) + throw Exception{ErrorCodes::LOGICAL_ERROR, "Cannot convert decimal type {} to arrow type", column_type->getFamilyName()}; return arrow_type; } diff --git a/tests/queries/0_stateless/01273_arrow.reference b/tests/queries/0_stateless/01273_arrow.reference index 0dc503f65e4..9f74ab344e5 100644 --- a/tests/queries/0_stateless/01273_arrow.reference +++ b/tests/queries/0_stateless/01273_arrow.reference @@ -58,3 +58,5 @@ dest from null: -108 108 -1016 1116 -1032 1132 -1064 1164 -1.032 -1.064 string-0 fixedstring\0\0\0\0 2001-02-03 2002-02-03 04:05:06 127 255 32767 65535 2147483647 4294967295 9223372036854775807 9223372036854775807 -1.032 -1.064 string-2 fixedstring-2\0\0 2004-06-07 2004-02-03 04:05:06 \N \N \N \N \N \N \N \N \N \N \N \N \N \N +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 diff --git a/tests/queries/0_stateless/01273_arrow.sh b/tests/queries/0_stateless/01273_arrow.sh index ad8a6f0fdb9..fa9b277e4aa 100755 --- a/tests/queries/0_stateless/01273_arrow.sh +++ b/tests/queries/0_stateless/01273_arrow.sh @@ -103,3 +103,10 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types2" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types3" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types4" +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS arrow_decimal" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE arrow_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory" +${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE arrow_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123)" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO arrow_decimal FORMAT Arrow" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal" +${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_decimal" + diff --git a/tests/queries/0_stateless/arrow_all_types_1.arrow b/tests/queries/0_stateless/arrow_all_types_1.arrow new file mode 100644 index 0000000000000000000000000000000000000000..0baff392ff85fc4e943b748aa0127ce9181d9b0d GIT binary patch literal 4490 zcmeHLF>6y%6h3LwB-Ef8iUg7J28RwxQ!OQfN-7S;Md@OP3@tR8K&)U>=++Jn89E4d zaLgqB2*FYu9K|V%&Q5~36s$hK@7#Oh^Tv?9yc6y__q*Ts&b#lvd+vKT%d4yRA1;W* zu)?HBNodnjl$=aS0X0}A3oJkd9JBr?avrERAyP+~LpjNayhk}iSx2d$oIvvd>Lfg|W|} z@lk8HvANxO*4lb3ecW?pgf<7Qm*E=x;Xa*ql5vxL$TM#H=|*R1Q9dCiZcDleZ3N!> zJJ8;^j@BIRH-{fBBX8X~Iwr~ZUY_%3HoG56VjxPi?MYQq)N@!ClHo%`nnWjn6h|hK-MsBAa zu<@0EH*Ec^2E1Y8YXNWA;;#g}VLSgpz#BGx&GUxW1HED6o1VYm{cfN)Y~!~d@P@7b zq`5BI^CG65HN3BPl{gGF$Z(7NZF^aM{*Gt&F3o?wlKZZ8J`=a*Uc(zngwG_gUTfp_ zJQ6^u{&3nsB;|&{MGs1hrHqTcA-msl-d1*4k#;38jdyU+z$fK-~ZF>zV5a*KRHk_&gToG%fPJ>!2LDP0dhyQ#X{u cdViX_bf2luAl+wTHk|c+W)pd{tpCCM0cKN?d;kCd literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/arrow_all_types_2.arrow b/tests/queries/0_stateless/arrow_all_types_2.arrow new file mode 100644 index 0000000000000000000000000000000000000000..42be61fe5f7dca501db2ee975df43894858c6e42 GIT binary patch literal 2714 zcmeHJziU%b6h5zcO&;-~8A=Hv1?g zs31}iN?Bql%CtmiL4puifFe9Y_Y}DdG@KG?L+7F6K;$EI5BeBdhmOJd1l@-|gEpZd z^x$`z!c1|zYt9E_a<8xTJN@qS4)Is;64;T)uL3g%g_eA_%YV|8`9fHleC|tdKIy*h ztZ(*TbvK^M2y3oS;1pL?)@iu~=Jn@LV&2}1wf@qgd`3vEGS?_6&3PdvU~C**Tanr06}vxltCip-cA zDw&F{xO%qhP7v4kKx?|gM&L$LGW`Gp=E1OO73X0FZAvV&7=tGwHkqXmni{C0#;ufr zW8}AI<`jO;A1N}K%Ub_7l*h4sQLJ(1^hL3)AzPPzobQ^V@x-9Bhhm;@O5K|5saWH! zEMgmqb(}}JxMGdJ%*7RJ{y{FTSo`ni;)*NIf8eb0k8*s)%0F@On)0&u=^PI0{zbXC zVx2#G@m|#T<+7YRJlNZ|OY8Bfg$$z8)VSKAxqaao|KcgKUQ&8Ebab%oWH7QEx+yNH z$c{Ux)}3zeR^J?|vhVQf!k6pCABTC%7|#sfFKfWw;kAadb!U0`$`vTjB;URFU8v!6 z=s^U1STkK^bD@1B0rBq9?K{Ui?d9X2b1gDCUs{eI+pcbhl|E9O7xU*F<%CjbBd literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/arrow_all_types_5.arrow b/tests/queries/0_stateless/arrow_all_types_5.arrow new file mode 100644 index 0000000000000000000000000000000000000000..26763a2bd0a9c9d004cdf2a938369a4d2068bb93 GIT binary patch literal 2658 zcmeHJu}&L75S_CR`vg|x0!1z&bfrs^AmkziNGYN;iD;670wkPZDIrmEktkDAQly}C zg(y=}P^3sfiBh?WC{v}T0FL+OX3Vk5{eZ<-b8~OsyV>2j-94|at-XA`D3ZgHx=34S zv(l1=%t(SeSQG;bP=af0SCI!mqngM%ybHgGM1H})!#ChP_yshl@B{cZybq7zM^|}{ z#5MN!$l4Gz_ts`M$c7&V#DAbm5yvHc2bei%jMVdf_Bzj*FGl2`!`yS=-VS#MTVJxz z!<~0B#+-W<+6J^HLm&KMo`tr~yyY=8nfLw2&1`u|ej}$yP4-IK6uggjqJ8ohtqsh# zfgdfBlj0ezNjX3HufrXtmw(W#0ROGf&VVzij8!8M^&K5oQSbld=5Ss>lS2EfLTkBo z&b;Y)A zJs0y%!TWn))W;DMXcK9|)gnf8n|H7u=8HBc9- Date: Mon, 21 Jun 2021 12:23:24 +0300 Subject: [PATCH 212/931] Add tests for Parquet --- docs/en/interfaces/formats.md | 1 + .../0_stateless/00900_long_parquet.reference | 2 ++ tests/queries/0_stateless/00900_long_parquet.sh | 8 ++++++++ tests/queries/0_stateless/01273_arrow.sh | 1 - .../queries/0_stateless/arrow_all_types_1.arrow | Bin 4490 -> 0 bytes .../queries/0_stateless/arrow_all_types_2.arrow | Bin 2714 -> 0 bytes .../queries/0_stateless/arrow_all_types_5.arrow | Bin 2658 -> 0 bytes 7 files changed, 11 insertions(+), 1 deletion(-) delete mode 100644 tests/queries/0_stateless/arrow_all_types_1.arrow delete mode 100644 tests/queries/0_stateless/arrow_all_types_2.arrow delete mode 100644 tests/queries/0_stateless/arrow_all_types_5.arrow diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 25127b0ea00..eb288721231 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1302,6 +1302,7 @@ The table below shows supported data types and how they match ClickHouse [data t | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `UTF8` | | `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `UTF8` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | +| `DECIMAL256` | [Decimal256](../sql-reference/data-types/decimal.md)| `DECIMAL256` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | Arrays can be nested and can have a value of the `Nullable` type as an argument. diff --git a/tests/queries/0_stateless/00900_long_parquet.reference b/tests/queries/0_stateless/00900_long_parquet.reference index 230d1f5ca48..bf0f66bb217 100644 --- a/tests/queries/0_stateless/00900_long_parquet.reference +++ b/tests/queries/0_stateless/00900_long_parquet.reference @@ -72,3 +72,5 @@ dest from null: 3 [] [] [] [[[1,2,3],[1,2,3]],[[1,2,3]],[[],[1,2,3]]] [[['Some string','Some string'],[]],[['Some string']],[[]]] [[NULL,1,2],[NULL],[1,2],[]] [['Some string',NULL,'Some string'],[NULL],[]] [[[1,2,3],[1,2,3]],[[1,2,3]],[[],[1,2,3]]] [[['Some string','Some string'],[]],[['Some string']],[[]]] [[NULL,1,2],[NULL],[1,2],[]] [['Some string',NULL,'Some string'],[NULL],[]] +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 diff --git a/tests/queries/0_stateless/00900_long_parquet.sh b/tests/queries/0_stateless/00900_long_parquet.sh index 8c19c7cecab..c30e1148abe 100755 --- a/tests/queries/0_stateless/00900_long_parquet.sh +++ b/tests/queries/0_stateless/00900_long_parquet.sh @@ -166,3 +166,11 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3 ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_nested_arrays" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_decimal" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory" +${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123)" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal" +${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_decimal" diff --git a/tests/queries/0_stateless/01273_arrow.sh b/tests/queries/0_stateless/01273_arrow.sh index fa9b277e4aa..bd6e3089859 100755 --- a/tests/queries/0_stateless/01273_arrow.sh +++ b/tests/queries/0_stateless/01273_arrow.sh @@ -109,4 +109,3 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE arrow_decimal VALUES (0.123, 0.1 ${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO arrow_decimal FORMAT Arrow" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_decimal" - diff --git a/tests/queries/0_stateless/arrow_all_types_1.arrow b/tests/queries/0_stateless/arrow_all_types_1.arrow deleted file mode 100644 index 0baff392ff85fc4e943b748aa0127ce9181d9b0d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4490 zcmeHLF>6y%6h3LwB-Ef8iUg7J28RwxQ!OQfN-7S;Md@OP3@tR8K&)U>=++Jn89E4d zaLgqB2*FYu9K|V%&Q5~36s$hK@7#Oh^Tv?9yc6y__q*Ts&b#lvd+vKT%d4yRA1;W* zu)?HBNodnjl$=aS0X0}A3oJkd9JBr?avrERAyP+~LpjNayhk}iSx2d$oIvvd>Lfg|W|} z@lk8HvANxO*4lb3ecW?pgf<7Qm*E=x;Xa*ql5vxL$TM#H=|*R1Q9dCiZcDleZ3N!> zJJ8;^j@BIRH-{fBBX8X~Iwr~ZUY_%3HoG56VjxPi?MYQq)N@!ClHo%`nnWjn6h|hK-MsBAa zu<@0EH*Ec^2E1Y8YXNWA;;#g}VLSgpz#BGx&GUxW1HED6o1VYm{cfN)Y~!~d@P@7b zq`5BI^CG65HN3BPl{gGF$Z(7NZF^aM{*Gt&F3o?wlKZZ8J`=a*Uc(zngwG_gUTfp_ zJQ6^u{&3nsB;|&{MGs1hrHqTcA-msl-d1*4k#;38jdyU+z$fK-~ZF>zV5a*KRHk_&gToG%fPJ>!2LDP0dhyQ#X{u cdViX_bf2luAl+wTHk|c+W)pd{tpCCM0cKN?d;kCd diff --git a/tests/queries/0_stateless/arrow_all_types_2.arrow b/tests/queries/0_stateless/arrow_all_types_2.arrow deleted file mode 100644 index 42be61fe5f7dca501db2ee975df43894858c6e42..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2714 zcmeHJziU%b6h5zcO&;-~8A=Hv1?g zs31}iN?Bql%CtmiL4puifFe9Y_Y}DdG@KG?L+7F6K;$EI5BeBdhmOJd1l@-|gEpZd z^x$`z!c1|zYt9E_a<8xTJN@qS4)Is;64;T)uL3g%g_eA_%YV|8`9fHleC|tdKIy*h ztZ(*TbvK^M2y3oS;1pL?)@iu~=Jn@LV&2}1wf@qgd`3vEGS?_6&3PdvU~C**Tanr06}vxltCip-cA zDw&F{xO%qhP7v4kKx?|gM&L$LGW`Gp=E1OO73X0FZAvV&7=tGwHkqXmni{C0#;ufr zW8}AI<`jO;A1N}K%Ub_7l*h4sQLJ(1^hL3)AzPPzobQ^V@x-9Bhhm;@O5K|5saWH! zEMgmqb(}}JxMGdJ%*7RJ{y{FTSo`ni;)*NIf8eb0k8*s)%0F@On)0&u=^PI0{zbXC zVx2#G@m|#T<+7YRJlNZ|OY8Bfg$$z8)VSKAxqaao|KcgKUQ&8Ebab%oWH7QEx+yNH z$c{Ux)}3zeR^J?|vhVQf!k6pCABTC%7|#sfFKfWw;kAadb!U0`$`vTjB;URFU8v!6 z=s^U1STkK^bD@1B0rBq9?K{Ui?d9X2b1gDCUs{eI+pcbhl|E9O7xU*F<%CjbBd diff --git a/tests/queries/0_stateless/arrow_all_types_5.arrow b/tests/queries/0_stateless/arrow_all_types_5.arrow deleted file mode 100644 index 26763a2bd0a9c9d004cdf2a938369a4d2068bb93..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2658 zcmeHJu}&L75S_CR`vg|x0!1z&bfrs^AmkziNGYN;iD;670wkPZDIrmEktkDAQly}C zg(y=}P^3sfiBh?WC{v}T0FL+OX3Vk5{eZ<-b8~OsyV>2j-94|at-XA`D3ZgHx=34S zv(l1=%t(SeSQG;bP=af0SCI!mqngM%ybHgGM1H})!#ChP_yshl@B{cZybq7zM^|}{ z#5MN!$l4Gz_ts`M$c7&V#DAbm5yvHc2bei%jMVdf_Bzj*FGl2`!`yS=-VS#MTVJxz z!<~0B#+-W<+6J^HLm&KMo`tr~yyY=8nfLw2&1`u|ej}$yP4-IK6uggjqJ8ohtqsh# zfgdfBlj0ezNjX3HufrXtmw(W#0ROGf&VVzij8!8M^&K5oQSbld=5Ss>lS2EfLTkBo z&b;Y)A zJs0y%!TWn))W;DMXcK9|)gnf8n|H7u=8HBc9- Date: Mon, 21 Jun 2021 13:05:09 +0300 Subject: [PATCH 213/931] Update docs/ru/operations/settings/settings.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/settings/settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 60d26ac5825..c453bc39535 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1195,8 +1195,8 @@ load_balancing = round_robin Возможные значения: -- 0 — выключена. -- 1 — включена. +- 0 — компиляция выключена. +- 1 — компиляция включена. Значение по умолчанию: `1`. ## min_count_to_compile {#min-count-to-compile} From 0f9fc33a4e917c2925228ce5a3b66eafd9b042f8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 21 Jun 2021 11:27:10 +0000 Subject: [PATCH 214/931] Fix postgres arrays --- .../fetchPostgreSQLTableStructure.cpp | 83 ++++++++++++++----- .../test_storage_postgresql/test.py | 16 ++++ 2 files changed, 80 insertions(+), 19 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index a310315dcc8..ff3e4008af0 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -25,14 +25,19 @@ namespace ErrorCodes } -static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, uint16_t dimensions) +static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, uint16_t dimensions, const std::function & recheck_array) { DataTypePtr res; + bool is_array = false; /// Get rid of trailing '[]' for arrays - if (dimensions) + if (type.ends_with("[]")) + { + is_array = true; + while (type.ends_with("[]")) type.resize(type.size() - 2); + } if (type == "smallint") res = std::make_shared(); @@ -88,8 +93,24 @@ static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, ui res = std::make_shared(); if (is_nullable) res = std::make_shared(res); - while (dimensions--) - res = std::make_shared(res); + + if (is_array) + { + /// In some cases att_ndims does not return correct number of dimensions + /// (it might return incorrect 0 number, for example, when a postgres table is created via 'as select * from table_with_arrays'). + /// So recheck all arrays separately afterwards. (Cannot check here on the same connection because another query is in execution). + if (!dimensions) + { + /// Return 1d array type and recheck all arrays dims with array_ndims + res = std::make_shared(res); + recheck_array(); + } + else + { + while (dimensions--) + res = std::make_shared(res); + } + } return res; } @@ -98,7 +119,7 @@ static DataTypePtr convertPostgreSQLDataType(String & type, bool is_nullable, ui std::shared_ptr fetchPostgreSQLTableStructure( postgres::ConnectionHolderPtr connection_holder, const String & postgres_table_name, bool use_nulls) { - auto columns = NamesAndTypesList(); + auto columns = NamesAndTypes(); if (postgres_table_name.find('\'') != std::string::npos || postgres_table_name.find('\\') != std::string::npos) @@ -115,22 +136,46 @@ std::shared_ptr fetchPostgreSQLTableStructure( "AND NOT attisdropped AND attnum > 0", postgres_table_name); try { - pqxx::read_transaction tx(connection_holder->get()); - auto stream{pqxx::stream_from::query(tx, query)}; - - std::tuple row; - while (stream >> row) + std::set recheck_arrays_indexes; { - columns.push_back(NameAndTypePair( - std::get<0>(row), - convertPostgreSQLDataType( - std::get<1>(row), - use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false, i.e. value is nullable - std::get<3>(row)))); + pqxx::read_transaction tx(connection_holder->get()); + auto stream{pqxx::stream_from::query(tx, query)}; + + std::tuple row; + size_t i = 0; + auto recheck_array = [&]() { recheck_arrays_indexes.insert(i); }; + while (stream >> row) + { + auto data_type = convertPostgreSQLDataType(std::get<1>(row), + use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false, i.e. value is nullable + std::get<3>(row), + recheck_array); + columns.push_back(NameAndTypePair(std::get<0>(row), data_type)); + ++i; + } + stream.complete(); + tx.commit(); + } + + for (auto & i : recheck_arrays_indexes) + { + const auto & name_and_type = columns[i]; + + pqxx::nontransaction tx(connection_holder->get()); + /// All rows must contain the same number of dimensions, so limit 1 is ok. If number of dimensions in all rows is not the same - + /// such arrays are not able to be used as ClickHouse Array at all. + pqxx::result result{tx.exec(fmt::format("SELECT array_ndims({}) FROM {} LIMIT 1", name_and_type.name, postgres_table_name))}; + auto dimensions = result[0][0].as(); + + /// It is always 1d array if it is in recheck. + DataTypePtr type = assert_cast(name_and_type.type.get())->getNestedType(); + while (dimensions--) + type = std::make_shared(type); + + columns[i] = NameAndTypePair(name_and_type.name, type); } - stream.complete(); - tx.commit(); } + catch (const pqxx::undefined_table &) { throw Exception(fmt::format( @@ -146,7 +191,7 @@ std::shared_ptr fetchPostgreSQLTableStructure( if (columns.empty()) return nullptr; - return std::make_shared(columns); + return std::make_shared(NamesAndTypesList(columns.begin(), columns.end())); } } diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index f81033822c8..05c7ba9365d 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -308,6 +308,22 @@ def test_postgres_distributed(started_cluster): assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n') +def test_postgres_ndim(started_cluster): + conn = get_postgres_conn(started_cluster, started_cluster.postgres_ip, True) + cursor = conn.cursor() + cursor.execute('CREATE TABLE arr1 (a Integer[])') + cursor.execute("INSERT INTO arr1 SELECT '{{1}, {2}}'") + + # The point is in creating a table via 'as select *', in postgres att_ndim will not be correct in this case. + cursor.execute('CREATE TABLE arr2 AS SELECT * FROM arr1') + cursor.execute("SELECT attndims AS dims FROM pg_attribute WHERE attrelid = 'arr2'::regclass; ") + result = cursor.fetchall()[0] + assert(int(result[0]) == 0) + + result = node1.query('''SELECT toTypeName(a) FROM postgresql('postgres1:5432', 'clickhouse', 'arr2', 'postgres', 'mysecretpassword')''') + assert(result.strip() == "Array(Array(Nullable(Int32)))") + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 115edd3e42afb2aba40154036a288bacde99d6d7 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Mon, 21 Jun 2021 12:21:26 +0100 Subject: [PATCH 215/931] Fix hang and incorrect exit code returned from clickhouse-test Variables aren't shared when using multiprocessing, use shared memory instead https://docs.python.org/3/library/multiprocessing.html#shared-ctypes-objects. There appears to be a deadlock when multiple threads try to send sigterm signal at the same time. Avoid it by making sure sigterm is sent only once for the process group. --- tests/clickhouse-test | 47 ++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index e508abab70c..dc8c5dbd2f6 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -48,15 +48,23 @@ MAX_RETRIES = 5 class Terminated(KeyboardInterrupt): pass + def signal_handler(sig, frame): raise Terminated(f'Terminated with {sig} signal') def stop_tests(): - # send signal to all processes in group to avoid hung check triggering - # (to avoid terminating clickhouse-test itself, the signal should be ignored) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - os.killpg(os.getpgid(os.getpid()), signal.SIGTERM) - signal.signal(signal.SIGTERM, signal.SIG_DFL) + global stop_tests_triggered_lock + global stop_tests_triggered + + with stop_tests_triggered_lock: + if not stop_tests_triggered.is_set(): + stop_tests_triggered.set() + + # send signal to all processes in group to avoid hung check triggering + # (to avoid terminating clickhouse-test itself, the signal should be ignored) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + os.killpg(os.getpgid(os.getpid()), signal.SIGTERM) + signal.signal(signal.SIGTERM, signal.SIG_DFL) def json_minify(string): """ @@ -326,18 +334,20 @@ def colored(text, args, color=None, on_color=None, attrs=None): return text -SERVER_DIED = False -exit_code = 0 stop_time = None +exit_code = multiprocessing.Value("i", 0) +server_died = multiprocessing.Event() +stop_tests_triggered_lock = multiprocessing.Lock() +stop_tests_triggered = multiprocessing.Event() queue = multiprocessing.Queue(maxsize=1) restarted_tests = [] # (test, stderr) # def run_tests_array(all_tests, suite, suite_dir, suite_tmp_dir, run_total): def run_tests_array(all_tests_with_params): all_tests, num_tests, suite, suite_dir, suite_tmp_dir = all_tests_with_params - global exit_code - global SERVER_DIED global stop_time + global exit_code + global server_died OP_SQUARE_BRACKET = colored("[", args, attrs=['bold']) CL_SQUARE_BRACKET = colored("]", args, attrs=['bold']) @@ -379,7 +389,7 @@ def run_tests_array(all_tests_with_params): else: break - if SERVER_DIED: + if server_died.is_set(): stop_tests() break @@ -441,7 +451,7 @@ def run_tests_array(all_tests_with_params): if failed_to_check or clickhouse_proc.returncode != 0: failures += 1 print("Server does not respond to health check") - SERVER_DIED = True + server_died.set() stop_tests() break @@ -494,10 +504,10 @@ def run_tests_array(all_tests_with_params): # Stop on fatal errors like segmentation fault. They are sent to client via logs. if ' ' in stderr: - SERVER_DIED = True + server_died.set() if testcase_args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr: - SERVER_DIED = True + server_died.set() if os.path.isfile(stdout_file): status += ", result:\n\n" @@ -583,7 +593,7 @@ def run_tests_array(all_tests_with_params): f" {skipped_total} tests skipped. {(datetime.now() - start_time).total_seconds():.2f} s elapsed" f' ({multiprocessing.current_process().name}).', args, "red", attrs=["bold"])) - exit_code = 1 + exit_code.value = 1 else: print(colored(f"\n{passed_total} tests passed. {skipped_total} tests skipped." f" {(datetime.now() - start_time).total_seconds():.2f} s elapsed" @@ -750,7 +760,7 @@ def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_test def main(args): - global SERVER_DIED + global server_died global stop_time global exit_code global server_logs_level @@ -853,7 +863,7 @@ def main(args): total_tests_run = 0 for suite in sorted(os.listdir(base_dir), key=sute_key_func): - if SERVER_DIED: + if server_died.is_set(): break suite_dir = os.path.join(base_dir, suite) @@ -953,8 +963,7 @@ def main(args): else: print(bt) - - exit_code = 1 + exit_code.value = 1 else: print(colored("\nNo queries hung.", args, "green", attrs=["bold"])) @@ -971,7 +980,7 @@ def main(args): else: print("All tests have finished.") - sys.exit(exit_code) + sys.exit(exit_code.value) def find_binary(name): From 82f0a5f2ddcdb73d747b640f762eb6b17712a5f4 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 21 Jun 2021 14:43:38 +0300 Subject: [PATCH 216/931] fix usage of WITH FILL together with WITH TOTALS --- src/Processors/QueryPlan/FillingStep.cpp | 5 ++++- .../01921_with_fill_with_totals.reference | 22 +++++++++++++++++++ .../01921_with_fill_with_totals.sql | 8 +++++++ 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01921_with_fill_with_totals.reference create mode 100644 tests/queries/0_stateless/01921_with_fill_with_totals.sql diff --git a/src/Processors/QueryPlan/FillingStep.cpp b/src/Processors/QueryPlan/FillingStep.cpp index a4306ffed2b..5393f1f5133 100644 --- a/src/Processors/QueryPlan/FillingStep.cpp +++ b/src/Processors/QueryPlan/FillingStep.cpp @@ -38,8 +38,11 @@ FillingStep::FillingStep(const DataStream & input_stream_, SortDescription sort_ void FillingStep::transformPipeline(QueryPipeline & pipeline, const BuildQueryPipelineSettings &) { - pipeline.addSimpleTransform([&](const Block & header) + pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type) -> ProcessorPtr { + if (stream_type == QueryPipeline::StreamType::Totals) + return nullptr; + return std::make_shared(header, sort_description); }); } diff --git a/tests/queries/0_stateless/01921_with_fill_with_totals.reference b/tests/queries/0_stateless/01921_with_fill_with_totals.reference new file mode 100644 index 00000000000..47c8c60e3c3 --- /dev/null +++ b/tests/queries/0_stateless/01921_with_fill_with_totals.reference @@ -0,0 +1,22 @@ +20 0 +19 0 +18 0 +17 0 +16 0 +15 0 +14 0 +13 0 +12 0 +11 0 +10 0 +9 0 +8 0 +7 7 +6 0 +5 0 +4 4 +3 0 +2 0 +1 1 + +0 12 diff --git a/tests/queries/0_stateless/01921_with_fill_with_totals.sql b/tests/queries/0_stateless/01921_with_fill_with_totals.sql new file mode 100644 index 00000000000..9d201848141 --- /dev/null +++ b/tests/queries/0_stateless/01921_with_fill_with_totals.sql @@ -0,0 +1,8 @@ +SELECT + number, + sum(number) +FROM numbers(10) +WHERE number % 3 = 1 +GROUP BY number + WITH TOTALS +ORDER BY number DESC WITH FILL FROM 20; From eb85c5a8e72450e5690ae4ae69f07697bacd5347 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 18 Jun 2021 17:28:52 +0300 Subject: [PATCH 217/931] Fix assert with non uint8 in prewhere --- src/Columns/FilterDescription.cpp | 14 -------------- src/Columns/FilterDescription.h | 3 --- src/Interpreters/ExpressionAnalyzer.cpp | 17 ++--------------- src/Interpreters/ExpressionAnalyzer.h | 2 -- .../MergeTree/MergeTreeBaseSelectProcessor.cpp | 11 +++++++++-- .../01917_prewhere_column_type.reference | 1 + .../0_stateless/01917_prewhere_column_type.sql | 16 ++++++++++++++++ 7 files changed, 28 insertions(+), 36 deletions(-) create mode 100644 tests/queries/0_stateless/01917_prewhere_column_type.reference create mode 100644 tests/queries/0_stateless/01917_prewhere_column_type.sql diff --git a/src/Columns/FilterDescription.cpp b/src/Columns/FilterDescription.cpp index d216094eaab..c9968d841c2 100644 --- a/src/Columns/FilterDescription.cpp +++ b/src/Columns/FilterDescription.cpp @@ -87,18 +87,4 @@ FilterDescription::FilterDescription(const IColumn & column_) ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } - -void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem) -{ - ConstantFilterDescription const_filter; - if (column_elem.column) - const_filter = ConstantFilterDescription(*column_elem.column); - - if (!const_filter.always_false && !const_filter.always_true) - { - auto column = column_elem.column ? column_elem.column : column_elem.type->createColumn(); - FilterDescription filter(*column); - } -} - } diff --git a/src/Columns/FilterDescription.h b/src/Columns/FilterDescription.h index 89474ea523c..05812fea283 100644 --- a/src/Columns/FilterDescription.h +++ b/src/Columns/FilterDescription.h @@ -32,7 +32,4 @@ struct FilterDescription struct ColumnWithTypeAndName; -/// Will throw an exception if column_elem is cannot be used as a filter column. -void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem); - } diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index fe52b30da7b..fd3d4fc8781 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -953,10 +953,8 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( ExpressionActionsChain & chain, bool only_types, const Names & additional_required_columns) { const auto * select_query = getSelectQuery(); - ActionsDAGPtr prewhere_actions; - if (!select_query->prewhere()) - return prewhere_actions; + return nullptr; Names first_action_names; if (!chain.steps.empty()) @@ -973,6 +971,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( throw Exception("Invalid type for filter in PREWHERE: " + filter_type->getName(), ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); + ActionsDAGPtr prewhere_actions; { /// Remove unused source_columns from prewhere actions. auto tmp_actions_dag = std::make_shared(sourceColumns()); @@ -1038,18 +1037,6 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( return prewhere_actions; } -void SelectQueryExpressionAnalyzer::appendPreliminaryFilter(ExpressionActionsChain & chain, ActionsDAGPtr actions_dag, String column_name) -{ - ExpressionActionsChain::Step & step = chain.lastStep(sourceColumns()); - - // FIXME: assert(filter_info); - auto * expression_step = typeid_cast(&step); - expression_step->actions_dag = std::move(actions_dag); - step.addRequiredOutput(column_name); - - chain.addStep(); -} - bool SelectQueryExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getSelectQuery(); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 70ff5643b7c..2427f65c694 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -357,8 +357,6 @@ private: ArrayJoinActionPtr appendArrayJoin(ExpressionActionsChain & chain, ActionsDAGPtr & before_array_join, bool only_types); bool appendJoinLeftKeys(ExpressionActionsChain & chain, bool only_types); JoinPtr appendJoin(ExpressionActionsChain & chain); - /// Add preliminary rows filtration. Actions are created in other expression analyzer to prevent any possible alias injection. - void appendPreliminaryFilter(ExpressionActionsChain & chain, ActionsDAGPtr actions_dag, String column_name); /// remove_filter is set in ExpressionActionsChain::finalize(); /// Columns in `additional_required_columns` will not be removed (they can be used for e.g. sampling or FINAL modifier). ActionsDAGPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types, const Names & additional_required_columns); diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index d9cb949042c..4ff593ea1c1 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -17,6 +17,7 @@ namespace DB namespace ErrorCodes { + extern const int ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER; extern const int LOGICAL_ERROR; } @@ -430,8 +431,14 @@ void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const P block.erase(prewhere_info->prewhere_column_name); else { - auto & ctn = block.getByName(prewhere_info->prewhere_column_name); - ctn.column = ctn.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); + WhichDataType which(prewhere_column.type); + if (which.isInt() || which.isUInt()) + prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); + else if (which.isFloat()) + prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1.0f)->convertToFullColumnIfConst(); + else + throw Exception("Illegal type " + prewhere_column.type->getName() + " of column for filter.", + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } } } diff --git a/tests/queries/0_stateless/01917_prewhere_column_type.reference b/tests/queries/0_stateless/01917_prewhere_column_type.reference new file mode 100644 index 00000000000..58c9bdf9d01 --- /dev/null +++ b/tests/queries/0_stateless/01917_prewhere_column_type.reference @@ -0,0 +1 @@ +111 diff --git a/tests/queries/0_stateless/01917_prewhere_column_type.sql b/tests/queries/0_stateless/01917_prewhere_column_type.sql new file mode 100644 index 00000000000..4046eb4d891 --- /dev/null +++ b/tests/queries/0_stateless/01917_prewhere_column_type.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 ( s String, f Float32, e UInt16 ) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t1 VALUES ('111', 1, 1); + +SELECT s FROM t1 WHERE f AND (e = 1); -- { serverError 59 } +SELECT s FROM t1 PREWHERE f; -- { serverError 59 } +SELECT s FROM t1 PREWHERE f WHERE (e = 1); -- { serverError 59 } +SELECT s FROM t1 PREWHERE f WHERE f AND (e = 1); -- { serverError 59 } + +SELECT s FROM t1 WHERE e AND (e = 1); +SELECT s FROM t1 PREWHERE e; -- { serverError 59 } +SELECT s FROM t1 PREWHERE e WHERE (e = 1); -- { serverError 59 } +SELECT s FROM t1 PREWHERE e WHERE f AND (e = 1); -- { serverError 59 } + From e1b509c8b1acd781c5626fe9f273f66cf121b796 Mon Sep 17 00:00:00 2001 From: Tiaonmmn Date: Mon, 21 Jun 2021 20:53:09 +0800 Subject: [PATCH 218/931] Update mergetree.md Update translation and add new contents. --- .../mergetree-family/mergetree.md | 449 ++++++++++++------ 1 file changed, 302 insertions(+), 147 deletions(-) diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index 353dd5f5bc8..45e080fd640 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -6,21 +6,21 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 主要特点: -- 存储的数据按主键排序。 +- 存储的数据按主键排序。 - 这使得你能够创建一个小型的稀疏索引来加快数据检索。 + 这使得您能够创建一个小型的稀疏索引来加快数据检索。 -- 支持数据分区,如果指定了 [分区键](custom-partitioning-key.md) 的话。 +- 如果指定了 [分区键](custom-partitioning-key.md) 的话,可以使用分区。 在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。 -- 支持数据副本。 +- 支持数据副本。 `ReplicatedMergeTree` 系列的表提供了数据副本功能。更多信息,请参阅 [数据副本](replication.md) 一节。 -- 支持数据采样。 +- 支持数据采样。 - 需要的话,你可以给表设置一个采样方法。 + 需要的话,您可以给表设置一个采样方法。 !!! note "注意" [合并](../special/merge.md#merge) 引擎并不属于 `*MergeTree` 系列。 @@ -50,54 +50,58 @@ ORDER BY expr **子句** -- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 +- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 - -- `ORDER BY` — 排序键。 +- `ORDER BY` — 排序键。 可以是一组列的元组或任意的表达式。 例如: `ORDER BY (CounterID, EventDate)` 。 - - 如果没有使用 `PRIMARY KEY` 显式的指定主键,ClickHouse 会使用排序键作为主键。 - + + 如果没有使用 `PRIMARY KEY` 显式指定的主键,ClickHouse 会使用排序键作为主键。 + 如果不需要排序,可以使用 `ORDER BY tuple()`. 参考 [选择主键](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/#selecting-the-primary-key) -- `PARTITION BY` — [分区键](custom-partitioning-key.md) 。 +- `PARTITION BY` — [分区键](custom-partitioning-key.md) ,可选项。 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的列。分区名的格式会是 `"YYYYMM"` 。 -- `PRIMARY KEY` - 主键,如果要 [选择与排序键不同的主键](#choosing-a-primary-key-that-differs-from-the-sorting-key),可选。 +- `PRIMARY KEY` - 如果要 [选择与排序键不同的主键](#choosing-a-primary-key-that-differs-from-the-sorting-key),在这里指定,可选项。 默认情况下主键跟排序键(由 `ORDER BY` 子句指定)相同。 因此,大部分情况下不需要再专门指定一个 `PRIMARY KEY` 子句。 -- `SAMPLE BY` — 用于抽样的表达式。 +- `SAMPLE BY` - 用于抽样的表达式,可选项。 如果要用抽样表达式,主键中必须包含这个表达式。例如: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))` 。 - -- TTL 指定行存储的持续时间并定义数据片段在硬盘和卷上的移动逻辑的规则列表,可选。 + +- `TTL` - 指定行存储的持续时间并定义数据片段在硬盘和卷上的移动逻辑的规则列表,可选项。 表达式中必须存在至少一个 `Date` 或 `DateTime` 类型的列,比如: - + `TTL date + INTERVAl 1 DAY` - + 规则的类型 `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'`指定了当满足条件(到达指定时间)时所要执行的动作:移除过期的行,还是将数据片段(如果数据片段中的所有行都满足表达式的话)移动到指定的磁盘(`TO DISK 'xxx'`) 或 卷(`TO VOLUME 'xxx'`)。默认的规则是移除(`DELETE`)。可以在列表中指定多个规则,但最多只能有一个`DELETE`的规则。 - + 更多细节,请查看 [表和列的 TTL](#table_engine-mergetree-ttl) -- `SETTINGS` — 控制 `MergeTree` 行为的额外参数: +- `SETTINGS` — 控制 `MergeTree` 行为的额外参数,可选项: - - `index_granularity` — 索引粒度。索引中相邻的『标记』间的数据行数。默认值,8192 。参考[数据存储](#mergetree-data-storage)。 - - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果想要仅按数据行数限制索引粒度, 请设置为0(不建议)。 - - `enable_mixed_granularity_parts` — 是否启用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从具有很大的行(几十上百兆字节)的表中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表里有很大的行,可以开启这项配置来提升`SELECT` 查询的性能。 - - `use_minimalistic_part_header_in_zookeeper` — 是否在 ZooKeeper 中启用最小的数据片段头 。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 - - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 + - `index_granularity` — 索引粒度。索引中相邻的『标记』间的数据行数。默认值8192 。参考[数据存储](#mergetree-data-storage)。 + - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果想要仅按数据行数限制索引粒度, 请设置为0(不建议)。 + - `min_index_granularity_bytes` - 允许的最小数据粒度,默认值:1024b。该选项用于防止误操作,添加了一个非常低索引粒度的表。参考[数据存储](#mergetree-data-storage) + - `enable_mixed_granularity_parts` — 是否启用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从具有很大的行(几十上百兆字节)的表中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果您的表里有很大的行,可以开启这项配置来提升`SELECT` 查询的性能。 + - `use_minimalistic_part_header_in_zookeeper` — ZooKeeper中数据片段存储方式 。如果`use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考[服务配置参数]([Server Settings | ClickHouse Documentation](https://clickhouse.tech/docs/zh/operations/server-configuration-parameters/settings/))这章中的 [设置描述](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 + - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 - - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间,单位:秒。默认值: 86400 (1 天)。 - - `write_final_mark` — 是否启用在数据片段尾部写入最终索引标记。默认值: 1(不建议更改)。 - - `merge_max_block_size` — 在块中进行合并操作时的最大行数限制。默认值:8192 - - `storage_policy` — 存储策略。 参见 [使用具有多个块的设备进行数据存储](#table_engine-mergetree-multiple-volumes). - - `min_bytes_for_wide_part`,`min_rows_for_wide_part` 在数据片段中可以使用`Wide`格式进行存储的最小字节数/行数。你可以不设置、只设置一个,或全都设置。参考:[数据存储](#mergetree-data-storage) + - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间,单位:秒。默认值: 86400 (1 天)。 + - `write_final_mark` — 是否启用在数据片段尾部写入最终索引标记。默认值: 1(不要关闭)。 + - `merge_max_block_size` — 在块中进行合并操作时的最大行数限制。默认值:8192 + - `storage_policy` — 存储策略。 参见 [使用具有多个块的设备进行数据存储](#table_engine-mergetree-multiple-volumes). + - `min_bytes_for_wide_part`,`min_rows_for_wide_part` 在数据片段中可以使用`Wide`格式进行存储的最小字节数/行数。您可以不设置、只设置一个,或全都设置。参考:[数据存储](#mergetree-data-storage) + - `max_parts_in_total` - 所有分区中最大块的数量(意义不明) + - `max_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最大大小。您可以在全局设置中设置该值(参见[max_compress_block_size](https://clickhouse.tech/docs/zh/operations/settings/settings/#max-compress-block-size))。建表时指定该值会覆盖全局设置。 + - `min_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最小大小。您可以在全局设置中设置该值(参见[min_compress_block_size](https://clickhouse.tech/docs/zh/operations/settings/settings/#min-compress-block-size))。建表时指定该值会覆盖全局设置。 + - `max_partitions_to_read` - 一次查询中可访问的分区最大数。您可以在全局设置中设置该值(参见[max_partitions_to_read](https://clickhouse.tech/docs/zh/operations/settings/settings/#max_partitions_to_read))。 **示例配置** @@ -107,12 +111,11 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa 在这个例子中,我们设置了按月进行分区。 -同时我们设置了一个按用户 ID 哈希的抽样表达式。这使得你可以对该表中每个 `CounterID` 和 `EventDate` 的数据伪随机分布。如果你在查询时指定了 [SAMPLE](../../../engines/table-engines/mergetree-family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 +同时我们设置了一个按用户 ID 哈希的抽样表达式。这使得您可以对该表中每个 `CounterID` 和 `EventDate` 的数据伪随机分布。如果您在查询时指定了 [SAMPLE](../../../engines/table-engines/mergetree-family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 `index_granularity` 可省略因为 8192 是默认设置 。

- 已弃用的建表方法 !!! attention "注意" @@ -127,10 +130,10 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa **MergeTree() 参数** -- `date-column` — 类型为 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 -- `sampling_expression` — 采样表达式。 -- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table-engines/mergetree-family/mergetree.md) -- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 +- `date-column` — 类型为 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 +- `sampling_expression` — 采样表达式。 +- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table-engines/mergetree-family/mergetree.md) +- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 **示例** @@ -152,51 +155,55 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa 数据存储格式由 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part` 表引擎参数控制。如果数据片段中的字节数或行数少于相应的设置值,数据片段会以 `Compact` 格式存储,否则会以 `Wide` 格式存储。 每个数据片段被逻辑的分割成颗粒(granules)。颗粒是 ClickHouse 中进行数据查询时的最小不可分割数据集。ClickHouse 不会对行或值进行拆分,所以每个颗粒总是包含整数个行。每个颗粒的第一行通过该行的主键值进行标记, -ClickHouse 会为每个数据片段创建一个索引文件来存储这些标记。对于每列,无论它是否包含在主键当中,ClickHouse 都会存储类似标记。这些标记让你可以在列文件中直接找到数据。 +ClickHouse 会为每个数据片段创建一个索引文件来存储这些标记。对于每列,无论它是否包含在主键当中,ClickHouse 都会存储类似标记。这些标记让您可以在列文件中直接找到数据。 -颗粒的大小通过表引擎参数 `index_granularity` 和 `index_granularity_bytes` 控制。取决于行的大小,颗粒的行数的在 `[1, index_granularity]` 范围中。如果单行的大小超过了 `index_granularity_bytes` 设置的值,那么一个颗粒的大小会超过 `index_granularity_bytes`。在这种情况下,颗粒的大小等于该行的大小。 +颗粒的大小通过表引擎参数 `index_granularity` 和 `index_granularity_bytes` 控制。颗粒的行数的在 `[1, index_granularity]` 范围中,这取决于行的大小。如果单行的大小超过了 `index_granularity_bytes` 设置的值,那么一个颗粒的大小会超过 `index_granularity_bytes`。在这种情况下,颗粒的大小等于该行的大小。 ## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries} 我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样: +``` text 全部数据 : [-------------------------------------------------------------------------] CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] 标记: | | | | | | | | | | | a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 标记号: 0 1 2 3 4 5 6 7 8 9 10 +``` 如果指定查询如下: -- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 -- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 -- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 +- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 +- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 +- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 上面例子可以看出使用索引通常会比全表描述要高效。 稀疏索引会引起额外的数据读取。当读取主键单个区间范围的数据时,每个数据块中最多会多读 `index_granularity * 2` 行额外的数据。 -稀疏索引使得你可以处理极大量的行,因为大多数情况下,这些索引常驻与内存(RAM)中。 +稀疏索引使得您可以处理极大量的行,因为大多数情况下,这些索引常驻于内存。 -ClickHouse 不要求主键惟一,所以你可以插入多条具有相同主键的行。 +ClickHouse 不要求主键唯一,所以您可以插入多条具有相同主键的行。 + +您可以在`PRIMARY KEY`与`ORDER BY`条件中使用`可为空的`类型的表达式,但强烈建议不要这么做。为了启用这项功能,请打开[allow_nullable_key](https://clickhouse.tech/docs/zh/operations/settings/settings/#allow-nullable-key),[NULLS_LAST](https://clickhouse.tech/docs/zh/sql-reference/statements/select/order-by/#sorting-of-special-values)规则也适用于`ORDER BY`条件中有NULL值的情况下。 ### 主键的选择 {#zhu-jian-de-xuan-ze} -主键中列的数量并没有明确的限制。依据数据结构,你可以在主键包含多些或少些列。这样可以: +主键中列的数量并没有明确的限制。依据数据结构,您可以在主键包含多些或少些列。这样可以: -- 改善索引的性能。 +- 改善索引的性能。 如果当前主键是 `(a, b)` ,在下列情况下添加另一个 `c` 列会提升性能: - - - 查询会使用 `c` 列作为条件 - - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这样的情况很普遍。换言之,就是加入另一列后,可以让你的查询略过很长的数据范围。 -- 改善数据压缩。 + - 查询会使用 `c` 列作为条件 + - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这样的情况很普遍。换言之,就是加入另一列后,可以让您的查询略过很长的数据范围。 + +- 改善数据压缩。 ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 -- 在[CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里进行数据合并时会提供额外的处理逻辑。 +- 在[CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里进行数据合并时会提供额外的处理逻辑。 在这种情况下,指定与主键不同的 *排序键* 也是有意义的。 @@ -206,9 +213,9 @@ ClickHouse 不要求主键惟一,所以你可以插入多条具有相同主键 想要根据初始顺序进行数据查询,使用 [单线程查询](../../../operations/settings/settings.md#settings-max_threads) -### 选择与排序键不同主键 {#choosing-a-primary-key-that-differs-from-the-sorting-key} +### 选择与排序键不同的主键 {#choosing-a-primary-key-that-differs-from-the-sorting-key} -指定一个跟排序键不一样的主键是可以的,此时排序键用于在数据片段中进行排序,主键用于在索引文件中进行标记的写入。这种情况下,主键表达式元组必须是排序键表达式元组的前缀。 +Clickhouse可以做到指定一个跟排序键不一样的主键,此时排序键用于在数据片段中进行排序,主键用于在索引文件中进行标记的写入。这种情况下,主键表达式元组必须是排序键表达式元组的前缀(即主键为(a,b),排序列必须为(a,b,******))。 当使用 [SummingMergeTree](summingmergetree.md) 和 [AggregatingMergeTree](aggregatingmergetree.md) 引擎时,这个特性非常有用。通常在使用这类引擎时,表里的列分两种:*维度* 和 *度量* 。典型的查询会通过任意的 `GROUP BY` 对度量列进行聚合并通过维度列进行过滤。由于 SummingMergeTree 和 AggregatingMergeTree 会对排序键相同的行进行聚合,所以把所有的维度放进排序键是很自然的做法。但这将导致排序键中包含大量的列,并且排序键会伴随着新添加的维度不断的更新。 @@ -218,14 +225,20 @@ ClickHouse 不要求主键惟一,所以你可以插入多条具有相同主键 ### 索引和分区在查询中的应用 {#use-of-indexes-and-partitions-in-queries} -对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为谓词链接一子项或整个)则可以使用索引:包含一个表示与主键/分区键中的部分字段或全部字段相等/不等的比较表达式;基于主键/分区键的字段上的 `IN` 或 固定前缀的`LIKE` 表达式;基于主键/分区键的字段上的某些函数;基于主键/分区键的表达式的逻辑表达式。 +对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为完整WHERE条件的一部分或全部)则可以使用索引:进行相等/不相等的比较;对主键列或分区列进行`IN`运算、有固定前缀的`LIKE`运算(如name like 'test%')、函数运算(部分函数适用),还有对上述表达式进行逻辑运算。 + + -因此,在索引键的一个或多个区间上快速地执行查询都是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等执行查询,都会非常快。 + + +因此,在索引键的一个或多个区间上快速地执行查询是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等执行查询,都会非常快。 当引擎配置如下时: +``` sql ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 +``` 这种情况下,这些查询: @@ -237,7 +250,7 @@ SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDat ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的分区键剪掉那些不包含符合数据的分区。 -上文的查询显示,即使索引用于复杂表达式。因为读表操作是组织好的,所以,使用索引不会比完整扫描慢。 +上文的查询显示,即使索引用于复杂表达式,因为读表操作经过优化,所以使用索引不会比完整扫描慢。 下面这个例子中,不会使用索引。 @@ -247,17 +260,16 @@ SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' 要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) 和 [force_primary_key](../../../operations/settings/settings.md) 。 -按月分区的分区键是只能读取包含适当范围日期的数据块。这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有带主键前几个字段作为条件的查询将会导致需要读取超过这个指定日期以外的数据。 +使用按月分区的分区列允许只读取包含适当日期区间的数据块,这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有用主键字段作为条件的查询将会导致需要读取超过这个指定日期以外的数据。 ### 部分单调主键的使用 -考虑这样的场景,比如一个月中的几天。它们在一个月的范围内形成一个[单调序列](https://zh.wikipedia.org/wiki/单调函数) ,但如果扩展到更大的时间范围它们就不再单调了。这就是一个部分单调序列。如果用户使用部分单调的主键创建表,ClickHouse同样会创建一个稀疏索引。当用户从这类表中查询数据时,ClickHouse 会对查询条件进行分析。如果用户希望获取两个索引标记之间的数据并且这两个标记在一个月以内,ClickHouse 可以在这种特殊情况下使用到索引,因为它可以计算出查询参数与索引标记之间的距离。 +考虑这样的场景,比如一个月中的天数。它们在一个月的范围内形成一个[单调序列](https://zh.wikipedia.org/wiki/单调函数) ,但如果扩展到更大的时间范围它们就不再单调了。这就是一个部分单调序列。如果用户使用部分单调的主键创建表,ClickHouse同样会创建一个稀疏索引。当用户从这类表中查询数据时,ClickHouse 会对查询条件进行分析。如果用户希望获取两个索引标记之间的数据并且这两个标记在一个月以内,ClickHouse 可以在这种特殊情况下使用到索引,因为它可以计算出查询参数与索引标记之间的距离。 如果查询参数范围内的主键不是单调序列,那么 ClickHouse 无法使用索引。在这种情况下,ClickHouse 会进行全表扫描。 ClickHouse 在任何主键代表一个部分单调序列的情况下都会使用这个逻辑。 - ### 跳数索引 {#tiao-shu-suo-yin-fen-duan-hui-zong-suo-yin-shi-yan-xing-de} 此索引在 `CREATE` 语句的列部分里定义。 @@ -267,11 +279,7 @@ INDEX index_name expr TYPE type(...) GRANULARITY granularity_value ``` `*MergeTree` 系列的表可以指定跳数索引。 - -这些索引是由数据块按粒度分割后的每部分在指定表达式上汇总信息 `granularity_value` 组成(粒度大小用表引擎里 `index_granularity` 的指定)。 -这些汇总信息有助于用 `where` 语句跳过大片不满足的数据,从而减少 `SELECT` 查询从磁盘读取的数据量, - -这些索引会在数据块上聚合指定表达式的信息,这些信息以 granularity_value 指定的粒度组成 (粒度的大小通过在表引擎中定义 index_granularity 定义)。这些汇总信息有助于跳过大片不满足 `where` 条件的数据,从而减少 `SELECT` 查询从磁盘读取的数据量。 +跳数索引是指数据片段按照粒度(建表时指定的`index_granularity`)分割成小块后,将上述SQL的granularity_value数量的小块组合成一个大的块,对这些大块写入索引信息,这样有助于使用`where`筛选时跳过大量不必要的数据,减少`SELECT`需要读取的数据量。 **示例** @@ -295,34 +303,32 @@ SELECT count() FROM table WHERE s < 'z' SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 ``` -#### 索引的可用类型 {#table_engine-mergetree-data_skipping-indexes} +#### 可用的索引类型 {#table_engine-mergetree-data_skipping-indexes} -- `minmax` +- `minmax` 存储指定表达式的极值(如果表达式是 `tuple` ,则存储 `tuple` 中每个元素的极值),这些信息用于跳过数据块,类似主键。 -- `set(max_rows)` - 存储指定表达式的不重复值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查 数据块是否满足 `WHERE` 条件。 +- `set(max_rows)` + 存储指定表达式的不重复值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查数据块是否满足 `WHERE` 条件。 -- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` 存储一个包含数据块中所有 n元短语(ngram) 的 [布隆过滤器](https://en.wikipedia.org/wiki/Bloom_filter) 。只可用在字符串上。 可用于优化 `equals` , `like` 和 `in` 表达式的性能。 - `n` – 短语长度。 - `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,单位字节。(因为压缩得好,可以指定比较大的值,如 256 或 512)。 - `number_of_hash_functions` – 布隆过滤器中使用的哈希函数的个数。 - `random_seed` – 哈希函数的随机种子。 + - `n` – 短语长度。 + - `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,字节为单位。(因为压缩得好,可以指定比较大的值,如 256 或 512)。 + - `number_of_hash_functions` – 布隆过滤器中使用的哈希函数的个数。 + - `random_seed` – 哈希函数的随机种子。 -- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - 跟 `ngrambf_v1` 类似,不同于 ngrams 存储字符串指定长度的所有片段。它只存储被非字母数字字符分割的片段。 +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + 跟 `ngrambf_v1` 类似,但是存储的是token而不是ngrams。Token是由非字母数字的符号分割的序列。 -- `bloom_filter(bloom_filter([false_positive])` – 为指定的列存储布隆过滤器 +- `bloom_filter(bloom_filter([false_positive])` – 为指定的列存储布隆过滤器 + + 可选参数`false_positive`用来指定从布隆过滤器收到错误响应的几率。取值范围是 (0,1),默认值:0.025 - 可选的参数 false_positive 用来指定从布隆过滤器收到错误响应的几率。取值范围是 (0,1),默认值:0.025 - 支持的数据类型:`Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`。 - + 以下函数会用到这个索引: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md) - - ``` sql INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 @@ -332,56 +338,56 @@ INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY #### 函数支持 {#functions-support} -WHERE 子句中的条件包含对列的函数调用,如果列是索引的一部分,ClickHouse 会在执行函数时尝试使用索引。不同的函数对索引的支持是不同的。 +WHERE 子句中的条件可以包含对某列数据进行运算的函数表达式,如果列是索引的一部分,ClickHouse会在执行函数时尝试使用索引。不同的函数对索引的支持是不同的。 `set` 索引会对所有函数生效,其他索引对函数的生效情况见下表 -| 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | -|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | +| 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | +| ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | 常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。 !!! note "注意" -布隆过滤器可能会包含不符合条件的匹配,所以 `ngrambf_v1`, `tokenbf_v1` 和 `bloom_filter` 索引不能用于负向的函数,例如: +布隆过滤器可能会包含不符合条件的匹配,所以 `ngrambf_v1`, `tokenbf_v1` 和 `bloom_filter` 索引不能用于结果返回为假的函数,例如: -- 可以用来优化的场景 - - `s LIKE '%test%'` - - `NOT s NOT LIKE '%test%'` - - `s = 1` - - `NOT s != 1` - - `startsWith(s, 'test')` -- 不能用来优化的场景 - - `NOT s LIKE '%test%'` - - `s NOT LIKE '%test%'` - - `NOT s = 1` - - `s != 1` - - `NOT startsWith(s, 'test')` +- 可以用来优化的场景 + - `s LIKE '%test%'` + - `NOT s NOT LIKE '%test%'` + - `s = 1` + - `NOT s != 1` + - `startsWith(s, 'test')` +- 不能用来优化的场景 + - `NOT s LIKE '%test%'` + - `s NOT LIKE '%test%'` + - `NOT s = 1` + - `s != 1` + - `NOT startsWith(s, 'test')` ## 并发数据访问 {#concurrent-data-access} -应对表的并发访问,我们使用多版本机制。换言之,当同时读和更新表时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 +对于表的并发访问,我们使用多版本机制。换言之,当一张表同时被读和更新时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 对表的读操作是自动并行的。 ## 列和表的 TTL {#table_engine-mergetree-ttl} -TTL 可以设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。表级别的 TTL 还会指定数据在磁盘和卷上自动转移的逻辑。 +TTL用于设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。表级别的 TTL 还会指定数据在磁盘和卷上自动转移的逻辑。 TTL 表达式的计算结果必须是 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 或 [日期时间](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的字段。 @@ -405,7 +411,7 @@ TTL date_time + INTERVAL 15 HOUR `TTL`子句不能被用于主键字段。 -示例: +**示例:** 创建表时指定 `TTL` @@ -443,16 +449,23 @@ ALTER TABLE example_table 表可以设置一个用于移除过期行的表达式,以及多个用于在磁盘或卷上自动转移数据片段的表达式。当表中的行过期时,ClickHouse 会删除所有对应的行。对于数据片段的转移特性,必须所有的行都满足转移条件。 ``` sql -TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... +TTL expr + [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'][, DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'] ... + [WHERE conditions] + [GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] + ``` TTL 规则的类型紧跟在每个 TTL 表达式后面,它会影响满足表达式时(到达指定时间时)应当执行的操作: -- `DELETE` - 删除过期的行(默认操作); -- `TO DISK 'aaa'` - 将数据片段移动到磁盘 `aaa`; -- `TO VOLUME 'bbb'` - 将数据片段移动到卷 `bbb`. +- `DELETE` - 删除过期的行(默认操作); +- `TO DISK 'aaa'` - 将数据片段移动到磁盘 `aaa`; +- `TO VOLUME 'bbb'` - 将数据片段移动到卷 `bbb`. +- `GROUP BY` - 聚合过期的行 -示例: +使用`WHERE`从句,您可以指定哪些过期的行会被删除或聚合(不适用于移动)。`GROUP BY`表达式必须是表主键的前缀。如果某列不是`GROUP BY`表达式的一部分,也没有在SET从句显示引用,结果行中相应列的值是随机的(就好像使用了`any`函数)。 + +**示例**: 创建时指定 TTL @@ -477,19 +490,49 @@ ALTER TABLE example_table MODIFY TTL d + INTERVAL 1 DAY; ``` +创建一张表,设置一个月后数据过期,这些过期的行中日期为星期一的删除: + +``` sql +CREATE TABLE table_with_where +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH DELETE WHERE toDayOfWeek(d) = 1; +``` + +创建一张表,设置过期的列会被聚合。列`x`包含每组行中的最大值,`y`为最小值,`d`为可能任意值。 + +``` sql +CREATE TABLE table_for_aggregation +( + d DateTime, + k1 Int, + k2 Int, + x Int, + y Int +) +ENGINE = MergeTree +ORDER BY (k1, k2) +TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); +``` + **删除数据** ClickHouse 在数据片段合并时会删除掉过期的数据。 -当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 你可以设置 `merge_with_ttl_timeout`。如果该值被设置的太低, 它将引发大量计划外的合并,这可能会消耗大量资源。 +当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 您可以设置 `merge_with_ttl_timeout`。如果该值被设置的太低, 它将引发大量计划外的合并,这可能会消耗大量资源。 -如果在合并的过程中执行 `SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在 `SELECT` 之前使用 [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) 查询。 +如果在合并的过程中执行 `SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在 `SELECT` 之前使用 [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) 。 -## 使用具有多个块的设备进行数据存储 {#table_engine-mergetree-multiple-volumes} +## 使用多个块设备进行数据存储 {#table_engine-mergetree-multiple-volumes} ### 介绍 {#introduction} -MergeTree 系列表引擎可以将数据存储在多块设备上。这对某些可以潜在被划分为“冷”“热”的表来说是很有用的。近期数据被定期的查询但只需要很小的空间。相反,详尽的历史数据很少被用到。如果有多块磁盘可用,那么“热”的数据可以放置在快速的磁盘上(比如 NVMe 固态硬盘或内存),“冷”的数据可以放在相对较慢的磁盘上(比如机械硬盘)。 +MergeTree 系列表引擎可以将数据存储在多个块设备上。这对某些可以潜在被划分为“冷”“热”的表来说是很有用的。最新数据被定期的查询但只需要很小的空间。相反,详尽的历史数据很少被用到。如果有多块磁盘可用,那么“热”的数据可以放置在快速的磁盘上(比如 NVMe 固态硬盘或内存),“冷”的数据可以放在相对较慢的磁盘上(比如机械硬盘)。 数据片段是 `MergeTree` 引擎表的最小可移动单元。属于同一个数据片段的数据被存储在同一块磁盘上。数据片段会在后台自动的在磁盘间移动,也可以通过 [ALTER](../../../sql-reference/statements/alter.md#alter_move-partition) 查询来移动。 @@ -497,12 +540,14 @@ MergeTree 系列表引擎可以将数据存储在多块设备上。这对某些 - 磁盘 — 挂载到文件系统的块设备 - 默认磁盘 — 在服务器设置中通过 [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) 参数指定的数据存储 -- 卷 — 磁盘的等效有序集合 (类似于 [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)) +- 卷 — 相同磁盘的顺序列表 (类似于 [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)) - 存储策略 — 卷的集合及他们之间的数据移动规则 + 以上名称的信息在Clickhouse中系统表[system.storage_policies](https://clickhouse.tech/docs/zh/operations/system-tables/storage_policies/#system_tables-storage_policies)和[system.disks](https://clickhouse.tech/docs/zh/operations/system-tables/disks/#system_tables-disks)体现。为了应用存储策略,可以在建表时使用`storage_policy`设置。 + ### 配置 {#table_engine-mergetree-multiple-volumes_configure} -磁盘、卷和存储策略应当在主文件 `config.xml` 或 `config.d` 目录中的独立文件中的 `` 标签内定义。 +磁盘、卷和存储策略应当在主配置文件 `config.xml` 或 `config.d` 目录中的独立文件中的 `` 标签内定义。 配置结构: @@ -530,9 +575,9 @@ MergeTree 系列表引擎可以将数据存储在多块设备上。这对某些 标签: -- `` — 磁盘名,名称必须与其他磁盘不同. -- `path` — 服务器将用来存储数据 (`data` 和 `shadow` 目录) 的路径, 应当以 ‘/’ 结尾. -- `keep_free_space_bytes` — 需要保留的剩余磁盘空间. +- `` — 磁盘名,名称必须与其他磁盘不同. +- `path` — 服务器将用来存储数据 (`data` 和 `shadow` 目录) 的路径, 应当以 ‘/’ 结尾. +- `keep_free_space_bytes` — 需要保留的剩余磁盘空间. 磁盘定义的顺序无关紧要。 @@ -567,11 +612,12 @@ MergeTree 系列表引擎可以将数据存储在多块设备上。这对某些 标签: -- `policy_name_N` — 策略名称,不能重复。 -- `volume_name_N` — 卷名称,不能重复。 -- `disk` — 卷中的磁盘。 -- `max_data_part_size_bytes` — 任意卷上的磁盘可以存储的数据片段的最大大小。 -- `move_factor` — 当可用空间少于这个因子时,数据将自动的向下一个卷(如果有的话)移动 (默认值为 0.1)。 +- `policy_name_N` — 策略名称,不能重复。 +- `volume_name_N` — 卷名称,不能重复。 +- `disk` — 卷中的磁盘。 +- `max_data_part_size_bytes` — 卷中的磁盘可以存储的数据片段的最大大小。 +- `move_factor` — 当可用空间少于这个因子时,数据将自动的向下一个卷(如果有的话)移动 (默认值为 0.1)。 +- `prefer_not_to_merge` - 禁止在这个卷中进行数据合并。该选项启用时,对该卷的数据不能进行合并。这个选项主要用于慢速磁盘。 配置示例: @@ -600,19 +646,31 @@ MergeTree 系列表引擎可以将数据存储在多块设备上。这对某些 0.2 + + + +
+ jbod1 +
+ + external + true + +
+
...
``` -在给出的例子中, `hdd_in_order` 策略实现了 [循环制](https://zh.wikipedia.org/wiki/循环制) 方法。因此这个策略只定义了一个卷(`single`),数据片段会以循环的顺序全部存储到它的磁盘上。当有多个类似的磁盘挂载到系统上,但没有配置 RAID 时,这种策略非常有用。请注意一个每个独立的磁盘驱动都并不可靠,你可能需要用 3 或更大的复制因此来补偿它。 +在给出的例子中, `hdd_in_order` 策略实现了 [循环制](https://zh.wikipedia.org/wiki/循环制) 方法。因此这个策略只定义了一个卷(`single`),数据片段会以循环的顺序全部存储到它的磁盘上。当有多个类似的磁盘挂载到系统上,但没有配置 RAID 时,这种策略非常有用。请注意一个每个独立的磁盘驱动都并不可靠,您可能需要用3份或更多的复制份数来补偿它。 如果在系统中有不同类型的磁盘可用,可以使用 `moving_from_ssd_to_hdd`。`hot` 卷由 SSD 磁盘(`fast_ssd`)组成,这个卷上可以存储的数据片段的最大大小为 1GB。所有大于 1GB 的数据片段都会被直接存储到 `cold` 卷上,`cold` 卷包含一个名为 `disk1` 的 HDD 磁盘。 同样,一旦 `fast_ssd` 被填充超过 80%,数据会通过后台进程向 `disk1` 进行转移。 存储策略中卷的枚举顺序是很重要的。因为当一个卷被充满时,数据会向下一个卷转移。磁盘的枚举顺序同样重要,因为数据是依次存储在磁盘上的。 -在创建表时,可以将一个配置好的策略应用到表: +在创建表时,可以应用存储策略: ``` sql CREATE TABLE table_with_non_default_policy ( @@ -626,7 +684,7 @@ PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy = 'moving_from_ssd_to_hdd' ``` -`default` 存储策略意味着只使用一个卷,这个卷只包含一个在 `` 中定义的磁盘。表创建后,它的存储策略就不能改变了。 +`default` 存储策略意味着只使用一个卷,这个卷只包含一个在 `` 中定义的磁盘。您可以使用[ALTER TABLE ... MODIFY SETTING]来修改存储策略,新的存储策略应该包含所有以前的磁盘和卷,并使用相同的名称。 可以通过 [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) 设置调整执行后台任务的线程数。 @@ -634,24 +692,121 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' 对于 `MergeTree` 表,数据通过以下不同的方式写入到磁盘当中: -- 作为插入(`INSERT`查询)的结果 -- 在后台合并和[数据变异](../../../sql-reference/statements/alter.md#alter-mutations)期间 -- 当从另一个副本下载时 -- 作为 [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter.md#alter_freeze-partition) 冻结分区的结果 +- 插入(`INSERT`查询) +- 后台合并和[数据变异](../../../sql-reference/statements/alter.md#alter-mutations) +- 从另一个副本下载 +- [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter.md#alter_freeze-partition) 冻结分区 除了数据变异和冻结分区以外的情况下,数据按照以下逻辑存储到卷或磁盘上: -1. 首个卷(按定义顺序)拥有足够的磁盘空间存储数据片段(`unreserved_space > current_part_size`)并且允许存储给定数据片段的大小(`max_data_part_size_bytes > current_part_size`) -2. 在这个数据卷内,紧挨着先前存储数据的那块磁盘之后的磁盘,拥有比数据片段大的剩余空间。(`unreserved_space - keep_free_space_bytes > current_part_size`) +1. 首个卷(按定义顺序)拥有足够的磁盘空间存储数据片段(`unreserved_space > current_part_size`)并且允许存储给定数据片段的大小(`max_data_part_size_bytes > current_part_size`) +2. 在这个数据卷内,紧挨着先前存储数据的那块磁盘之后的磁盘,拥有比数据片段大的剩余空间。(`unreserved_space - keep_free_space_bytes > current_part_size`) -更进一步,数据变异和分区冻结使用的是 [硬链接](https://en.wikipedia.org/wiki/Hard_link)。不同磁盘之间的硬链接是不支持的,所以在这种情况下数据片段都会被存储到初始化的那一块磁盘上。 +更进一步,数据变异和分区冻结使用的是 [硬链接](https://en.wikipedia.org/wiki/Hard_link)。不同磁盘之间的硬链接是不支持的,所以在这种情况下数据片段都会被存储到原来的那一块磁盘上。 -在后台,数据片段基于剩余空间(`move_factor`参数)根据卷在配置文件中定义的顺序进行转移。数据永远不会从最后一个移出也不会从第一个移入。可以通过系统表 [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (字段 `type = MOVE_PART`) 和 [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (字段 `path` 和 `disk`) 来监控后台的移动情况。同时,具体细节可以通过服务器日志查看。 +在后台,数据片段基于剩余空间(`move_factor`参数)根据卷在配置文件中定义的顺序进行转移。数据永远不会从最后一个移出也不会从第一个移入。可以通过系统表 [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (字段 `type = MOVE_PART`) 和 [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (字段 `path` 和 `disk`) 来监控后台的移动情况。具体细节可以通过服务器日志查看。 用户可以通过 [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter.md#alter_move-partition) 强制移动一个数据片段或分区到另外一个卷,所有后台移动的限制都会被考虑在内。这个查询会自行启动,无需等待后台操作完成。如果没有足够的可用空间或任何必须条件没有被满足,用户会收到报错信息。 数据移动不会妨碍到数据复制。也就是说,同一张表的不同副本可以指定不同的存储策略。 -在后台合并和数据变异之后,就的数据片段会在一定时间后被移除 (`old_parts_lifetime`)。在这期间,他们不能被移动到其他的卷或磁盘。也就是说,直到数据片段被完全移除,它们仍然会被磁盘占用空间计算在内。 +在后台合并和数据变异之后,旧的数据片段会在一定时间后被移除 (`old_parts_lifetime`)。在这期间,他们不能被移动到其他的卷或磁盘。也就是说,直到数据片段被完全移除,它们仍然会被磁盘占用空间计算在内。 + +## 使用S3进行数据存储 {#using-s3-data-storage} + +`MergeTree`系列表引擎允许使用[S3](https://aws.amazon.com/s3/)存储数据,需要修改磁盘类型为`S3`。 + +示例配置: + +``` xml + + ... + + + s3 + https://storage.yandexcloud.net/my-bucket/root-path/ + your_access_key_id + your_secret_access_key + + your_base64_encoded_customer_key + + http://proxy1 + http://proxy2 + + 10000 + 5000 + 10 + 4 + 1000 + /var/lib/clickhouse/disks/s3/ + true + /var/lib/clickhouse/disks/s3/cache/ + false + + + ... + +``` + +必须的参数: + +- `endpoint` - S3的结点URL,以`path`或`virtual hosted`[格式](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)书写。 +- `access_key_id` - S3的Access Key ID。 +- `secret_access_key` - S3的Secret Access Key。 + +可选参数: + +- `region` - S3的区域名称 +- `use_environment_credentials` - 从环境变量AWS_ACCESS_KEY_ID、AWS_SECRET_ACCESS_KEY和AWS_SESSION_TOKEN中读取认证参数。默认值为`false`。 +- `use_insecure_imds_request` - 如果设置为`true`,S3客户端在认证时会使用不安全的IMDS请求。默认值为`false`。 +- `proxy` - 访问S3结点URL时代理设置。每一个`uri`项的值都应该是合法的代理URL。 +- `connect_timeout_ms` - Socket连接超时时间,默认值为`10000`,即10秒。 +- `request_timeout_ms` - 请求超时时间,默认值为`5000`,即5秒。 +- `retry_attempts` - 请求失败后的重试次数,默认值为10。 +- `single_read_retries` - 读过程中连接丢失后重试次数,默认值为4。 +- `min_bytes_for_seek` - 使用查找操作,而不是顺序读操作的最小字节数,默认值为1000。 +- `metadata_path` - 本地存放S3元数据文件的路径,默认值为`/var/lib/clickhouse/disks//` +- `cache_enabled` - 是否允许缓存标记和索引文件。默认值为`true`。 +- `cache_path` - 本地缓存标记和索引文件的路径。默认值为`/var/lib/clickhouse/disks//cache/`。 +- `skip_access_check` - 如果为`true`,Clickhouse启动时不检查磁盘是否可用。默认为`false`。 +- `server_side_encryption_customer_key_base64` - 如果指定该项的值,请求时会加上为了访问SSE-C加密数据而必须的头信息。 + +S3磁盘也可以设置冷热存储: +```xml + + ... + + + s3 + https://storage.yandexcloud.net/my-bucket/root-path/ + your_access_key_id + your_secret_access_key + + + + + +
+ s3 +
+
+
+ + +
+ default +
+ + s3 + +
+ 0.2 +
+
+ ... +
+``` + +指定了`cold`选项后,本地磁盘剩余空间如果小于`move_factor * disk_size`,或有TTL设置时,数据就会定时迁移至S3了。 [原始文章](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) From 46a5dd67013311592dd30065c7bde659851c6e99 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Jun 2021 16:01:02 +0300 Subject: [PATCH 219/931] Fix MergeTreeBaseSelectProcessor::executePrewhereActions --- src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 4ff593ea1c1..5f0a0f298af 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -431,7 +431,7 @@ void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const P block.erase(prewhere_info->prewhere_column_name); else { - WhichDataType which(prewhere_column.type); + WhichDataType which(removeNullable(recursiveRemoveLowCardinality(prewhere_column.type))); if (which.isInt() || which.isUInt()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); else if (which.isFloat()) From 0adad2425a98c5dba656cb90bebb175d230f4c16 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 18 Jun 2021 15:09:04 +0800 Subject: [PATCH 220/931] json extract string or raw --- src/Functions/FunctionsJSON.h | 7 ++++++- tests/queries/0_stateless/00918_json_functions.reference | 4 ++-- .../0_stateless/01915_json_extract_raw_string.reference | 1 + .../queries/0_stateless/01915_json_extract_raw_string.sql | 1 + 4 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01915_json_extract_raw_string.reference create mode 100644 tests/queries/0_stateless/01915_json_extract_raw_string.sql diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index b6bdf1be013..eec0a15c7a2 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -600,6 +600,8 @@ public: } }; +template +class JSONExtractRawImpl; /// Nodes of the extract tree. We need the extract tree to extract from JSON complex values containing array, tuples or nullables. template @@ -630,7 +632,10 @@ struct JSONExtractTree public: bool insertResultToColumn(IColumn & dest, const Element & element) override { - return JSONExtractStringImpl::insertResultToColumn(dest, element, {}); + if (element.isString()) + return JSONExtractStringImpl::insertResultToColumn(dest, element, {}); + else + return JSONExtractRawImpl::insertResultToColumn(dest, element, {}); } }; diff --git a/tests/queries/0_stateless/00918_json_functions.reference b/tests/queries/0_stateless/00918_json_functions.reference index a3beb2967d4..4a971bbad42 100644 --- a/tests/queries/0_stateless/00918_json_functions.reference +++ b/tests/queries/0_stateless/00918_json_functions.reference @@ -58,7 +58,7 @@ Friday (3,5) (3,0) --JSONExtractKeysAndValues-- -[('a','hello')] +[('a','hello'),('b','[-100,200,300]')] [('b',[-100,200,300])] [('a','hello'),('b','world')] [('a',5),('b',7),('c',11)] @@ -160,7 +160,7 @@ Friday (3,5) (3,0) --JSONExtractKeysAndValues-- -[('a','hello')] +[('a','hello'),('b','[-100,200,300]')] [('b',[-100,200,300])] [('a','hello'),('b','world')] [('a',5),('b',7),('c',11)] diff --git a/tests/queries/0_stateless/01915_json_extract_raw_string.reference b/tests/queries/0_stateless/01915_json_extract_raw_string.reference new file mode 100644 index 00000000000..839cb33f5f2 --- /dev/null +++ b/tests/queries/0_stateless/01915_json_extract_raw_string.reference @@ -0,0 +1 @@ +('123','456','[7,8,9]') diff --git a/tests/queries/0_stateless/01915_json_extract_raw_string.sql b/tests/queries/0_stateless/01915_json_extract_raw_string.sql new file mode 100644 index 00000000000..6ba94ac6dfd --- /dev/null +++ b/tests/queries/0_stateless/01915_json_extract_raw_string.sql @@ -0,0 +1 @@ +select JSONExtract('{"a": "123", "b": 456, "c": [7, 8, 9]}', 'Tuple(a String, b String, c String)'); From 76cee4e3cf68ae68755e333c919c787d4c2181a4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 21 Jun 2021 16:58:39 +0300 Subject: [PATCH 221/931] Debugging --- src/Common/ZooKeeper/ZooKeeperIO.cpp | 16 +++++ src/Common/ZooKeeper/ZooKeeperIO.h | 9 +++ src/Coordination/KeeperStorage.cpp | 15 +++-- src/Coordination/ZooKeeperDataReader.cpp | 2 +- tests/integration/helpers/cluster.py | 2 +- .../test_keeper_zookeeper_converter/test.py | 64 +++++++++---------- 6 files changed, 68 insertions(+), 40 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeperIO.cpp b/src/Common/ZooKeeper/ZooKeeperIO.cpp index 55448c9a109..0e0a034c633 100644 --- a/src/Common/ZooKeeper/ZooKeeperIO.cpp +++ b/src/Common/ZooKeeper/ZooKeeperIO.cpp @@ -9,6 +9,14 @@ void write(size_t x, WriteBuffer & out) writeBinary(x, out); } +#ifdef __APPLE__ +void write(uint64_t x, WriteBuffer & out) +{ + x = __builtin_bswap64(x); + writeBinary(x, out); +} +#endif + void write(int64_t x, WriteBuffer & out) { x = __builtin_bswap64(x); @@ -63,6 +71,14 @@ void write(const Error & x, WriteBuffer & out) write(static_cast(x), out); } +#ifdef __APPLE__ +void read(uint64_t & x, ReadBuffer & in) +{ + readBinary(x, in); + x = __builtin_bswap64(x); +} +#endif + void read(size_t & x, ReadBuffer & in) { readBinary(x, in); diff --git a/src/Common/ZooKeeper/ZooKeeperIO.h b/src/Common/ZooKeeper/ZooKeeperIO.h index fd47e324664..1fcb96315a5 100644 --- a/src/Common/ZooKeeper/ZooKeeperIO.h +++ b/src/Common/ZooKeeper/ZooKeeperIO.h @@ -14,6 +14,12 @@ namespace Coordination using namespace DB; void write(size_t x, WriteBuffer & out); + +/// uint64_t != size_t on darwin +#ifdef __APPLE__ +void write(uint64_t x, WriteBuffer & out); +#endif + void write(int64_t x, WriteBuffer & out); void write(int32_t x, WriteBuffer & out); void write(OpNum x, WriteBuffer & out); @@ -39,6 +45,9 @@ void write(const std::vector & arr, WriteBuffer & out) } void read(size_t & x, ReadBuffer & in); +#ifdef __APPLE__ +void read(uint64_t & x, ReadBuffer & in); +#endif void read(int64_t & x, ReadBuffer & in); void read(int32_t & x, ReadBuffer & in); void read(OpNum & x, ReadBuffer & in); diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 5418afb2501..d59af287bab 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -57,7 +57,7 @@ static String generateDigest(const String & userdata) { std::vector user_password; boost::split(user_password, userdata, [](char c) { return c == ':'; }); - return user_password[0] + ":" + base64Encode(getSHA1(user_password[1])); + return user_password[0] + ":" + base64Encode(getSHA1(userdata)); } static bool checkACL(int32_t permission, const Coordination::ACLs & node_acls, const std::vector & session_auths) @@ -71,14 +71,19 @@ static bool checkACL(int32_t permission, const Coordination::ACLs & node_acls, c for (const auto & node_acl : node_acls) { + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL PERMISSIONS {} SESSION PERMS {}", node_acl.permissions, permission); if (node_acl.permissions & permission) { if (node_acl.scheme == "world" && node_acl.id == "anyone") return true; for (const auto & session_auth : session_auths) + { + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL SCHEME {} SESSION SCHEME {}", node_acl.scheme, session_auth.scheme); + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL AUTHID {} SESSION AUTHID {}", node_acl.id, session_auth.id); if (node_acl.scheme == session_auth.scheme && node_acl.id == session_auth.id) return true; + } } } @@ -353,16 +358,19 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "CHECKING ACL FOR PATH {} IN GET", zk_request->getPath()); auto & container = storage.container; auto it = container.find(zk_request->getPath()); if (it == container.end()) return true; const auto & node_acls = storage.acl_map.convertNumber(it->value.acl_id); + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACLID {} ACL SIZE {}",it->value.acl_id, node_acls.size()); if (node_acls.empty()) return true; const auto & session_auths = storage.session_and_auth[session_id]; + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "SESSION AUTHS SIZE {}", session_auths.size()); return checkACL(Coordination::ACL::Read, node_acls, session_auths); } @@ -908,15 +916,10 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina KeeperStorage::ResponsesForSessions results; if (new_last_zxid) { - LOG_INFO(&Poco::Logger::get("DEBUG"), "GOT ZXID {}", *new_last_zxid); if (zxid >= *new_last_zxid) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got new ZXID {} smaller or equal than current {}. It's a bug", *new_last_zxid, zxid); zxid = *new_last_zxid; } - else - { - LOG_INFO(&Poco::Logger::get("DEBUG"), "NO ZXID PROVIDED"); - } session_expiry_queue.update(session_id, session_and_timeout[session_id]); if (zk_request->getOpNum() == Coordination::OpNum::Close) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 4a324abe93d..42440250ed8 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -429,7 +429,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) } -bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * log) +bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*log*/) { int64_t checksum; Coordination::read(checksum, in); diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 59e598ce6ba..54e129fed11 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -213,7 +213,7 @@ class ClickHouseCluster: if self.name: instances_dir_name += '_' + self.name - if 'INTEGRATION_TESTS_RUN_ID' in os.environ: + if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']: instances_dir_name += '_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID']) self.instances_dir = p.join(self.base_dir, instances_dir_name) diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index 5c6ed90eb35..61f4248f2be 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -204,35 +204,35 @@ def test_multi_and_failed_requests(started_cluster): compare_states(genuine_connection, fake_connection) -#def test_acls(started_cluster): -# restart_and_clear_zookeeper() -# genuine_connection = get_genuine_zk() -# genuine_connection.add_auth('digest', 'user1:password1') -# genuine_connection.add_auth('digest', 'user2:password2') -# genuine_connection.add_auth('digest', 'user3:password3') -# -# genuine_connection.create("/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) -# -# other_connection = get_genuine_zk() -# other_connection.add_auth('digest', 'user1:password1') -# other_connection.set("/test_multi_all_acl", b"X") -# assert other_connection.get("/test_multi_all_acl")[0] == b"X" -# -# yet_other_auth_connection = get_genuine_zk() -# yet_other_auth_connection.add_auth('digest', 'user2:password2') -# -# yet_other_auth_connection.set("/test_multi_all_acl", b"Y") -# -# copy_zookeeper_data() -# -# genuine_connection = get_genuine_zk() -# genuine_connection.add_auth('digest', 'user1:password1') -# genuine_connection.add_auth('digest', 'user2:password2') -# genuine_connection.add_auth('digest', 'user3:password3') -# -# fake_connection = get_fake_zk() -# fake_connection.add_auth('digest', 'user1:password1') -# fake_connection.add_auth('digest', 'user2:password2') -# fake_connection.add_auth('digest', 'user3:password3') -# -# compare_states(genuine_connection, fake_connection) +def test_acls(started_cluster): + restart_and_clear_zookeeper() + genuine_connection = get_genuine_zk() + genuine_connection.add_auth('digest', 'user1:password1') + genuine_connection.add_auth('digest', 'user2:password2') + genuine_connection.add_auth('digest', 'user3:password3') + + genuine_connection.create("/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) + + other_connection = get_genuine_zk() + other_connection.add_auth('digest', 'user1:password1') + other_connection.set("/test_multi_all_acl", b"X") + assert other_connection.get("/test_multi_all_acl")[0] == b"X" + + yet_other_auth_connection = get_genuine_zk() + yet_other_auth_connection.add_auth('digest', 'user2:password2') + + yet_other_auth_connection.set("/test_multi_all_acl", b"Y") + + copy_zookeeper_data() + + genuine_connection = get_genuine_zk() + genuine_connection.add_auth('digest', 'user1:password1') + genuine_connection.add_auth('digest', 'user2:password2') + genuine_connection.add_auth('digest', 'user3:password3') + + fake_connection = get_fake_zk() + fake_connection.add_auth('digest', 'user1:password1') + fake_connection.add_auth('digest', 'user2:password2') + fake_connection.add_auth('digest', 'user3:password3') + + compare_states(genuine_connection, fake_connection) From 4688f9e038cb0cff49ed2842c82582be68391480 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 21 Jun 2021 13:50:09 +0000 Subject: [PATCH 222/931] hdfs truncate table --- src/Storages/HDFS/StorageHDFS.cpp | 27 ++++++++++++++------- src/Storages/HDFS/StorageHDFS.h | 2 ++ tests/integration/test_storage_hdfs/test.py | 16 ++++++++++-- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index e3fd287bad8..9de4ca4650f 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -26,6 +26,7 @@ #include #include #include +#include namespace fs = std::filesystem; @@ -280,15 +281,7 @@ Pipe StorageHDFS::read( size_t max_block_size, unsigned num_streams) { - size_t begin_of_path; - /// This uri is checked for correctness in constructor of StorageHDFS and never modified afterwards - auto two_slash = uri.find("//"); - - if (two_slash == std::string::npos) - begin_of_path = uri.find('/'); - else - begin_of_path = uri.find('/', two_slash + 2); - + const size_t begin_of_path = uri.find('/', uri.find("//") + 2); const String path_from_uri = uri.substr(begin_of_path); const String uri_without_path = uri.substr(0, begin_of_path); @@ -330,6 +323,22 @@ BlockOutputStreamPtr StorageHDFS::write(const ASTPtr & /*query*/, const StorageM chooseCompressionMethod(uri, compression_method)); } +void StorageHDFS::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, ContextPtr context_, TableExclusiveLockHolder &) +{ + const size_t begin_of_path = uri.find('/', uri.find("//") + 2); + const String path = uri.substr(begin_of_path); + const String url = uri.substr(0, begin_of_path); + + HDFSBuilderWrapper builder = createHDFSBuilder(url + "/", context_->getGlobalContext()->getConfigRef()); + HDFSFSPtr fs = createHDFSFS(builder.get()); + + int wait; + int ret = hdfsTruncate(fs.get(), path.data(), 0, &wait); + if (ret) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to truncate hdfs table: {}", std::string(hdfsGetLastError())); +} + + void registerStorageHDFS(StorageFactory & factory) { factory.registerStorage("HDFS", [](const StorageFactory::Arguments & args) diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index 397e147e7cd..da77b397adf 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -34,6 +34,8 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + void truncate(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, TableExclusiveLockHolder &) override; + NamesAndTypesList getVirtuals() const override; protected: diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 34ced652a01..2dac7bc19d4 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -15,7 +15,6 @@ def started_cluster(): finally: cluster.shutdown() - def test_read_write_storage(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -235,7 +234,7 @@ def test_virtual_columns(started_cluster): expected = "1\tfile1\thdfs://hdfs1:9000//file1\n2\tfile2\thdfs://hdfs1:9000//file2\n3\tfile3\thdfs://hdfs1:9000//file3\n" assert node1.query("select id, _file as file_name, _path as file_path from virtual_cols order by id") == expected - + def test_read_files_with_spaces(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -246,6 +245,19 @@ def test_read_files_with_spaces(started_cluster): assert node1.query("select * from test order by id") == "1\n2\n3\n" +def test_truncate_table(started_cluster): + hdfs_api = started_cluster.hdfs_api + node1.query( + "create table test_truncate (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/tr', 'TSV')") + node1.query("insert into test_truncate values (1, 'Mark', 72.53)") + assert hdfs_api.read_data("/tr") == "1\tMark\t72.53\n" + assert node1.query("select * from test_truncate") == "1\tMark\t72.53\n" + node1.query("truncate table test_truncate") + assert hdfs_api.read_data("/tr") == "" + assert node1.query("select * from test_truncate") == "" + node1.query("drop table test_truncate") + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From a173baf1eacdcd28ae3bfc38b6f9529f873d092c Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 21 Jun 2021 17:02:41 +0300 Subject: [PATCH 223/931] Add test for concurrent ttl merges --- ...and_normal_merges_zookeeper_long.reference | 1 + ...nt_ttl_and_normal_merges_zookeeper_long.sh | 70 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference create mode 100755 tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh new file mode 100755 index 00000000000..62d0d3001fb --- /dev/null +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +NUM_REPLICAS=5 + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" +done + + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i( + key DateTime + ) + ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/${CLICKHOUSE_DATABASE}/ttl_table', '$i') + ORDER BY tuple() + TTL key + INTERVAL 1 SECOND + SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100;" +done + +function optimize_thread +{ + while true; do + REPLICA=$(($RANDOM % 5 + 1)) + $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE ttl_table$REPLICA FINAl" + done +} + +function insert_thread +{ + while true; do + REPLICA=$(($RANDOM % 5 + 1)) + $CLICKHOUSE_CLIENT --optimize_on_insert=0 --query "INSERT INTO ttl_table$REPLICA SELECT now() + rand() % 5 - rand() % 3 FROM numbers(5)" + $CLICKHOUSE_CLIENT --optimize_on_insert=0 --query "INSERT INTO ttl_table$REPLICA SELECT now() + rand() % 5 - rand() % 3 FROM numbers(5)" + $CLICKHOUSE_CLIENT --optimize_on_insert=0 --query "INSERT INTO ttl_table$REPLICA SELECT now() + rand() % 5 - rand() % 3 FROM numbers(5)" + done +} + + +export -f insert_thread; +export -f optimize_thread; + +TIMEOUT=30 + +timeout $TIMEOUT bash -c insert_thread 2> /dev/null & +timeout $TIMEOUT bash -c insert_thread 2> /dev/null & +timeout $TIMEOUT bash -c insert_thread 2> /dev/null & +timeout $TIMEOUT bash -c insert_thread 2> /dev/null & +timeout $TIMEOUT bash -c insert_thread 2> /dev/null & +timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & +timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & +timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & +timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & +timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & + +wait + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA ttl_table$i" +done + +$CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception != '' FORMAT Vertical" +$CLICKHOUSE_CLIENT --query "SELECT COUNT() > 0 FROM system.part_log where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}'" + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" +done From 660e824851aafd38d29416c910ea02702a32eac4 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Mon, 21 Jun 2021 15:13:23 +0100 Subject: [PATCH 224/931] Missed one server_died.set() --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index dc8c5dbd2f6..c3ca1ec5953 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -489,7 +489,7 @@ def run_tests_array(all_tests_with_params): if MAX_RETRIES < counter: if args.replicated_database: if DISTRIBUTED_DDL_TIMEOUT_MSG in stderr: - SERVER_DIED = True + server_died.set() break if proc.returncode != 0: From fc9179de1fa676d01aa54cfebb1b4a8652784f1c Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 21 Jun 2021 14:20:29 +0000 Subject: [PATCH 225/931] Add test for progress bar --- .../0_stateless/01921_test_progress_bar.py | 20 +++++++++++++++++++ .../01921_test_progress_bar.reference | 0 2 files changed, 20 insertions(+) create mode 100755 tests/queries/0_stateless/01921_test_progress_bar.py create mode 100644 tests/queries/0_stateless/01921_test_progress_bar.reference diff --git a/tests/queries/0_stateless/01921_test_progress_bar.py b/tests/queries/0_stateless/01921_test_progress_bar.py new file mode 100755 index 00000000000..89806bcfdab --- /dev/null +++ b/tests/queries/0_stateless/01921_test_progress_bar.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +import os +import sys +import signal + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from client import client, prompt, end_of_block + +log = None +# uncomment the line below for debugging +#log=sys.stdout + +with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: + client1.expect(prompt) + client1.send('SELECT number FROM numbers(100) FORMAT Null') + client1.expect('Progress: 100\.00 rows, 800\.00 B.*' + end_of_block) + # 0 rows becuase Format Null. + client1.expect('0 rows in set. Elapsed: 0\.[\\w]{3} sec.' + end_of_block) diff --git a/tests/queries/0_stateless/01921_test_progress_bar.reference b/tests/queries/0_stateless/01921_test_progress_bar.reference new file mode 100644 index 00000000000..e69de29bb2d From 77a3a1416deb9b54981087d659ba720d94789119 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 21 Jun 2021 17:26:14 +0300 Subject: [PATCH 226/931] Update fetchPostgreSQLTableStructure.cpp --- src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index ff3e4008af0..ea86fe94e8e 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -157,7 +157,7 @@ std::shared_ptr fetchPostgreSQLTableStructure( tx.commit(); } - for (auto & i : recheck_arrays_indexes) + for (const auto & i : recheck_arrays_indexes) { const auto & name_and_type = columns[i]; From 662d5b8495b16745f6df36b7538b91f2db1d011a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 21 Jun 2021 18:20:18 +0300 Subject: [PATCH 227/931] fix ALTER MODIFY COLUMN of columns, that participates in TTL expressions --- src/Interpreters/MutationsInterpreter.cpp | 7 ++- src/Interpreters/MutationsInterpreter.h | 4 ++ .../MergeTree/MergeTreeDataMergerMutator.cpp | 6 +-- .../MergeTree/MergeTreeDataMergerMutator.h | 3 +- .../01923_ttl_with_modify_column.reference | 2 + .../01923_ttl_with_modify_column.sql | 43 +++++++++++++++++++ 6 files changed, 59 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/01923_ttl_with_modify_column.reference create mode 100644 tests/queries/0_stateless/01923_ttl_with_modify_column.sql diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index dbf75baad14..78ac8dc656b 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -388,7 +388,6 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) if (commands.empty()) throw Exception("Empty mutation commands list", ErrorCodes::LOGICAL_ERROR); - const ColumnsDescription & columns_desc = metadata_snapshot->getColumns(); const IndicesDescription & indices_desc = metadata_snapshot->getSecondaryIndices(); const ProjectionsDescription & projections_desc = metadata_snapshot->getProjections(); @@ -426,7 +425,7 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) } /// Columns, that we need to read for calculation of skip indices, projections or TTL expressions. - auto dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns); + dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns); /// First, break a sequence of commands into stages. for (auto & command : commands) @@ -921,6 +920,10 @@ const Block & MutationsInterpreter::getUpdatedHeader() const return *updated_header; } +const ColumnDependencies & MutationsInterpreter::getColumnDependencies() const +{ + return dependencies; +} size_t MutationsInterpreter::evaluateCommandsSize() { diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 0d91da5613c..c9d66b4f13b 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -56,6 +56,8 @@ public: /// Only changed columns. const Block & getUpdatedHeader() const; + const ColumnDependencies & getColumnDependencies() const; + /// Latest mutation stage affects all columns in storage bool isAffectingAllColumns() const; @@ -148,6 +150,8 @@ private: NameSet materialized_projections; MutationKind mutation_kind; /// Do we meet any index or projection mutation. + + ColumnDependencies dependencies; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 846ad7b026d..ee88326608b 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1267,7 +1267,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor bool need_sync = needSyncPart(source_part->rows_count, source_part->getBytesOnDisk(), *data_settings); bool need_remove_expired_values = false; - if (in && shouldExecuteTTL(metadata_snapshot, in->getHeader().getNamesAndTypesList().getNames(), commands_for_part)) + if (in && shouldExecuteTTL(metadata_snapshot, interpreter->getColumnDependencies(), commands_for_part)) need_remove_expired_values = true; /// All columns from part are changed and may be some more that were missing before in part @@ -1956,7 +1956,8 @@ std::set MergeTreeDataMergerMutator::getProjectionsToRec return projections_to_recalc; } -bool MergeTreeDataMergerMutator::shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const Names & columns, const MutationCommands & commands) +bool MergeTreeDataMergerMutator::shouldExecuteTTL( + const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands) { if (!metadata_snapshot->hasAnyTTL()) return false; @@ -1965,7 +1966,6 @@ bool MergeTreeDataMergerMutator::shouldExecuteTTL(const StorageMetadataPtr & met if (command.type == MutationCommand::MATERIALIZE_TTL) return true; - auto dependencies = metadata_snapshot->getColumnDependencies(NameSet(columns.begin(), columns.end())); for (const auto & dependency : dependencies) if (dependency.kind == ColumnDependency::TTL_EXPRESSION || dependency.kind == ColumnDependency::TTL_TARGET) return true; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index b082d063dcf..ca7376d8f3e 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -200,7 +200,8 @@ private: const ProjectionsDescription & all_projections, const MutationCommands & commands_for_removes); - static bool shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const Names & columns, const MutationCommands & commands); + static bool shouldExecuteTTL( + const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies, const MutationCommands & commands); /// Return set of indices which should be recalculated during mutation also /// wraps input stream into additional expression stream diff --git a/tests/queries/0_stateless/01923_ttl_with_modify_column.reference b/tests/queries/0_stateless/01923_ttl_with_modify_column.reference new file mode 100644 index 00000000000..2e55ea564b9 --- /dev/null +++ b/tests/queries/0_stateless/01923_ttl_with_modify_column.reference @@ -0,0 +1,2 @@ +2 ['Int16'] +2 ['Date'] diff --git a/tests/queries/0_stateless/01923_ttl_with_modify_column.sql b/tests/queries/0_stateless/01923_ttl_with_modify_column.sql new file mode 100644 index 00000000000..ed2812d2a39 --- /dev/null +++ b/tests/queries/0_stateless/01923_ttl_with_modify_column.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_ttl_modify_column; + +CREATE TABLE t_ttl_modify_column +( + InsertionDateTime DateTime, + TTLDays Int32 DEFAULT CAST(365, 'Int32') +) +ENGINE = MergeTree +ORDER BY tuple() +TTL InsertionDateTime + toIntervalDay(TTLDays) +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_ttl_modify_column VALUES (now(), 23); + +SET mutations_sync = 2; + +ALTER TABLE t_ttl_modify_column modify column TTLDays Int16 DEFAULT CAST(365, 'Int16'); + +INSERT INTO t_ttl_modify_column VALUES (now(), 23); + +SELECT sum(rows), groupUniqArray(type) FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_ttl_modify_column' AND column = 'TTLDays' AND active; + +DROP TABLE IF EXISTS t_ttl_modify_column; + +CREATE TABLE t_ttl_modify_column (InsertionDateTime DateTime) +ENGINE = MergeTree +ORDER BY tuple() +TTL InsertionDateTime + INTERVAL 3 DAY +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_ttl_modify_column VALUES (now()); + +ALTER TABLE t_ttl_modify_column MODIFY COLUMN InsertionDateTime Date; + +INSERT INTO t_ttl_modify_column VALUES (now()); + +SELECT sum(rows), groupUniqArray(type) FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_ttl_modify_column' AND column = 'InsertionDateTime' AND active; + +ALTER TABLE t_ttl_modify_column MODIFY COLUMN InsertionDateTime String; -- { serverError 43 } + +DROP TABLE IF EXISTS t_ttl_modify_column; From 880ed245654e4ae2ca41f529a7f103515bb320b6 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 21 Jun 2021 18:31:18 +0300 Subject: [PATCH 228/931] move comment --- src/Interpreters/MutationsInterpreter.cpp | 1 - src/Interpreters/MutationsInterpreter.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 78ac8dc656b..03a2a4da1d1 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -424,7 +424,6 @@ ASTPtr MutationsInterpreter::prepare(bool dry_run) validateUpdateColumns(storage, metadata_snapshot, updated_columns, column_to_affected_materialized); } - /// Columns, that we need to read for calculation of skip indices, projections or TTL expressions. dependencies = getAllColumnDependencies(metadata_snapshot, updated_columns); /// First, break a sequence of commands into stages. diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index c9d66b4f13b..65ad027118a 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -151,6 +151,7 @@ private: MutationKind mutation_kind; /// Do we meet any index or projection mutation. + /// Columns, that we need to read for calculation of skip indices, projections or TTL expressions. ColumnDependencies dependencies; }; From ac0f86cdbf308bdad3e5d76b9fdf38cb9dc597b8 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 21 Jun 2021 15:44:36 +0000 Subject: [PATCH 229/931] Truncate for s3 --- src/Storages/HDFS/StorageHDFS.cpp | 3 +- src/Storages/HDFS/StorageHDFS.h | 2 +- src/Storages/StorageS3.cpp | 26 +++++++++++ src/Storages/StorageS3.h | 2 + tests/integration/test_storage_hdfs/test.py | 1 - tests/integration/test_storage_s3/test.py | 49 ++++++++++++++++----- 6 files changed, 67 insertions(+), 16 deletions(-) diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 9de4ca4650f..c878fd4e1f8 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -332,8 +332,7 @@ void StorageHDFS::truncate(const ASTPtr & /* query */, const StorageMetadataPtr HDFSBuilderWrapper builder = createHDFSBuilder(url + "/", context_->getGlobalContext()->getConfigRef()); HDFSFSPtr fs = createHDFSFS(builder.get()); - int wait; - int ret = hdfsTruncate(fs.get(), path.data(), 0, &wait); + int ret = hdfsDelete(fs.get(), path.data(), 0); if (ret) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to truncate hdfs table: {}", std::string(hdfsGetLastError())); } diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index da77b397adf..4a6614be2e0 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -34,7 +34,7 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; - void truncate(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, TableExclusiveLockHolder &) override; + void truncate(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context_, TableExclusiveLockHolder &) override; NamesAndTypesList getVirtuals() const override; diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 290a585128e..12ec405771e 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -434,6 +436,30 @@ BlockOutputStreamPtr StorageS3::write(const ASTPtr & /*query*/, const StorageMet max_single_part_upload_size); } + +void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) +{ + updateClientAndAuthSettings(local_context, client_auth); + + Aws::S3::Model::ObjectIdentifier obj; + obj.SetKey(client_auth.uri.key); + + Aws::S3::Model::Delete delkeys; + delkeys.AddObjects(std::move(obj)); + + Aws::S3::Model::DeleteObjectsRequest request; + request.SetBucket(client_auth.uri.bucket); + request.SetDelete(delkeys); + + auto response = client_auth.client->DeleteObjects(request); + if (!response.IsSuccess()) + { + const auto & err = response.GetError(); + throw Exception(std::to_string(static_cast(err.GetErrorType())) + ": " + err.GetMessage(), ErrorCodes::S3_ERROR); + } +} + + void StorageS3::updateClientAndAuthSettings(ContextPtr ctx, StorageS3::ClientAuthentificaiton & upd) { auto settings = ctx->getStorageS3Settings().getSettings(upd.uri.uri.toString()); diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index 73becc2aa57..240327fba6f 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -130,6 +130,8 @@ public: BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + void truncate(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, TableExclusiveLockHolder &) override; + NamesAndTypesList getVirtuals() const override; private: diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 2dac7bc19d4..f60dc836608 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -253,7 +253,6 @@ def test_truncate_table(started_cluster): assert hdfs_api.read_data("/tr") == "1\tMark\t72.53\n" assert node1.query("select * from test_truncate") == "1\tMark\t72.53\n" node1.query("truncate table test_truncate") - assert hdfs_api.read_data("/tr") == "" assert node1.query("select * from test_truncate") == "" node1.query("drop table test_truncate") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 545ca4256f3..3f5254af49a 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -276,28 +276,28 @@ def test_put_get_with_redirect(started_cluster): # Test put with restricted S3 server redirect. def test_put_with_zero_redirect(started_cluster): - # type: (ClickHouseCluster) -> None + # type: (clickhousecluster) -> none bucket = started_cluster.minio_bucket - instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance - table_format = "column1 UInt32, column2 UInt32, column3 UInt32" + instance = started_cluster.instances["s3_max_redirects"] # type: clickhouseinstance + table_format = "column1 uint32, column2 uint32, column3 uint32" values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" filename = "test.csv" - # Should work without redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values) + # should work without redirect + query = "insert into table function s3('http://{}:{}/{}/{}', 'csv', '{}') values {}".format( + started_cluster.minio_ip, minio_internal_port, bucket, filename, table_format, values) run_query(instance, query) - # Should not work with redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( + # should not work with redirect + query = "insert into table function s3('http://{}:{}/{}/{}', 'csv', '{}') values {}".format( started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values) - exception_raised = False + exception_raised = false try: run_query(instance, query) - except Exception as e: - assert str(e).find("Too many redirects while trying to access") != -1 - exception_raised = True + except exception as e: + assert str(e).find("too many redirects while trying to access") != -1 + exception_raised = true finally: assert exception_raised @@ -645,3 +645,28 @@ def test_storage_s3_put_gzip(started_cluster, extension, method): f = gzip.GzipFile(fileobj=buf, mode="rb") uncompressed_content = f.read().decode() assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708 + + +def test_truncate_table(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] # type: ClickHouseInstance + name = "truncate" + + instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format( + name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name)) + + instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name)) + result = instance.query("SELECT * FROM {}".format(name)) + assert result == instance.query("SELECT number FROM numbers(10)") + instance.query("TRUNCATE TABLE {}".format(name)) + + minio = started_cluster.minio_client + timeout = 30 + while timeout > 0: + if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0: + return + timeout -= 1 + time.sleep(1) + assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0) + assert instance.query("SELECT * FROM {}".format(name)) == "" + From 731edc9a6d13f63c2ba8fa95de761edcbb64cc4a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 21 Jun 2021 18:45:45 +0300 Subject: [PATCH 230/931] Fixes in ACLs --- src/Common/ZooKeeper/ZooKeeperCommon.h | 3 +++ src/Coordination/KeeperStorage.cpp | 18 +++++++----------- src/Coordination/KeeperStorage.h | 2 +- src/Coordination/ZooKeeperDataReader.cpp | 3 ++- .../test_keeper_zookeeper_converter/test.py | 5 +++++ 5 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.h b/src/Common/ZooKeeper/ZooKeeperCommon.h index ced154133b5..c50c271c1ec 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.h +++ b/src/Common/ZooKeeper/ZooKeeperCommon.h @@ -183,6 +183,9 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest bool isReadRequest() const override { return false; } size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); } + + /// During recovery from log we don't rehash ACLs + bool need_to_hash_acls = true; }; struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index d59af287bab..dd0a7dffabb 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -71,7 +71,6 @@ static bool checkACL(int32_t permission, const Coordination::ACLs & node_acls, c for (const auto & node_acl : node_acls) { - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL PERMISSIONS {} SESSION PERMS {}", node_acl.permissions, permission); if (node_acl.permissions & permission) { if (node_acl.scheme == "world" && node_acl.id == "anyone") @@ -79,8 +78,6 @@ static bool checkACL(int32_t permission, const Coordination::ACLs & node_acls, c for (const auto & session_auth : session_auths) { - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL SCHEME {} SESSION SCHEME {}", node_acl.scheme, session_auth.scheme); - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACL AUTHID {} SESSION AUTHID {}", node_acl.id, session_auth.id); if (node_acl.scheme == session_auth.scheme && node_acl.id == session_auth.id) return true; } @@ -93,7 +90,8 @@ static bool checkACL(int32_t permission, const Coordination::ACLs & node_acls, c static bool fixupACL( const std::vector & request_acls, const std::vector & current_ids, - std::vector & result_acls) + std::vector & result_acls, + bool hash_acls) { if (request_acls.empty()) return true; @@ -126,7 +124,8 @@ static bool fixupACL( return false; valid_found = true; - new_acl.id = generateDigest(new_acl.id); + if (hash_acls) + new_acl.id = generateDigest(new_acl.id); result_acls.push_back(new_acl); } } @@ -274,7 +273,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest KeeperStorage::Node created_node; Coordination::ACLs node_acls; - if (!fixupACL(request.acls, session_auth_ids, node_acls)) + if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) { response.error = Coordination::Error::ZINVALIDACL; return {response_ptr, {}}; @@ -358,19 +357,16 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest bool checkAuth(KeeperStorage & storage, int64_t session_id) const override { - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "CHECKING ACL FOR PATH {} IN GET", zk_request->getPath()); auto & container = storage.container; auto it = container.find(zk_request->getPath()); if (it == container.end()) return true; const auto & node_acls = storage.acl_map.convertNumber(it->value.acl_id); - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NODE ACLID {} ACL SIZE {}",it->value.acl_id, node_acls.size()); if (node_acls.empty()) return true; const auto & session_auths = storage.session_and_auth[session_id]; - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "SESSION AUTHS SIZE {}", session_auths.size()); return checkACL(Coordination::ACL::Read, node_acls, session_auths); } @@ -911,7 +907,7 @@ KeeperWrapperFactory::KeeperWrapperFactory() } -KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordination::ZooKeeperRequestPtr & zk_request, int64_t session_id, std::optional new_last_zxid) +KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordination::ZooKeeperRequestPtr & zk_request, int64_t session_id, std::optional new_last_zxid, bool check_acl) { KeeperStorage::ResponsesForSessions results; if (new_last_zxid) @@ -969,7 +965,7 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(const Coordina KeeperStorageRequestPtr storage_request = KeeperWrapperFactory::instance().get(zk_request); Coordination::ZooKeeperResponsePtr response; - if (!storage_request->checkAuth(*this, session_id)) + if (check_acl && !storage_request->checkAuth(*this, session_id)) { response = zk_request->makeResponse(); /// Original ZooKeeper always throws no auth, even when user provided some credentials diff --git a/src/Coordination/KeeperStorage.h b/src/Coordination/KeeperStorage.h index 7c90a9bd661..e3cb0f59fdc 100644 --- a/src/Coordination/KeeperStorage.h +++ b/src/Coordination/KeeperStorage.h @@ -116,7 +116,7 @@ public: session_expiry_queue.update(session_id, session_timeout_ms); } - ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, std::optional new_last_zxid); + ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, std::optional new_last_zxid, bool check_acl = true); void finalize(); diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 42440250ed8..10d04ba77f9 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -289,6 +289,7 @@ Coordination::ZooKeeperRequestPtr deserializeCreateTxn(ReadBuffer & in) Coordination::read(result->data, in); Coordination::read(result->acls, in); Coordination::read(result->is_ephemeral, in); + result->need_to_hash_acls = false; /// How we should use it? It should just increment on request execution int32_t parent_c_version; Coordination::read(parent_c_version, in); @@ -476,7 +477,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l if (request->getOpNum() == Coordination::OpNum::Multi && hasErrorsInMultiRequest(request)) return true; - storage.processRequest(request, session_id, zxid); + storage.processRequest(request, session_id, zxid, /* check_acl = */ false); } } diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index 61f4248f2be..fa2178974e9 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -223,6 +223,11 @@ def test_acls(started_cluster): yet_other_auth_connection.set("/test_multi_all_acl", b"Y") + no_auth_connection = get_genuine_zk() + + with pytest.raises(Exception): + no_auth_connection.set("/test_multi_all_acl", b"Z") + copy_zookeeper_data() genuine_connection = get_genuine_zk() From 590c82ce9b25916cb215f1c686733beac35e3652 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 21 Jun 2021 18:47:57 +0300 Subject: [PATCH 231/931] Fix style --- .../01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 62d0d3001fb..f84a69e8eb0 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -15,7 +15,7 @@ for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i( key DateTime ) - ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/${CLICKHOUSE_DATABASE}/ttl_table', '$i') + ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/ttl_table', '$i') ORDER BY tuple() TTL key + INTERVAL 1 SECOND SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100;" From 00f9dfc12a7f49c854883fa074e8b0770623c4c6 Mon Sep 17 00:00:00 2001 From: MyroTk Date: Mon, 21 Jun 2021 17:49:28 +0200 Subject: [PATCH 232/931] Syntax update - changing 'is' to '=='. --- .../extended_precision_data_types/tests/arithmetic.py | 2 +- .../extended_precision_data_types/tests/rounding.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/testflows/extended_precision_data_types/tests/arithmetic.py b/tests/testflows/extended_precision_data_types/tests/arithmetic.py index 49d7ee1fcb3..c57f3d7d8e1 100644 --- a/tests/testflows/extended_precision_data_types/tests/arithmetic.py +++ b/tests/testflows/extended_precision_data_types/tests/arithmetic.py @@ -141,7 +141,7 @@ def inline_check_dec(self, arithmetic_func, expected_result, node=None): if node is None: node = self.context.node - if arithmetic_func is 'negate' or arithmetic_func is 'abs': + if arithmetic_func in ['negate','abs']: with When(f"I check {arithmetic_func} with toDecimal256"): output = node.query(f"SELECT {arithmetic_func}(toDecimal256(1,0))").output diff --git a/tests/testflows/extended_precision_data_types/tests/rounding.py b/tests/testflows/extended_precision_data_types/tests/rounding.py index f01d6898b32..e32f4e941d3 100644 --- a/tests/testflows/extended_precision_data_types/tests/rounding.py +++ b/tests/testflows/extended_precision_data_types/tests/rounding.py @@ -25,7 +25,7 @@ def round_int_inline(self, func, expected_result, supported, int_type, min, max, if node is None: node = self.context.node - if func is 'roundDown': + if func == 'roundDown': with When(f"I check roundDown with {int_type}"): node.query(f"SELECT roundDown(to{int_type}(1), [0,2]), roundDown(to{int_type}(\'{max}\'), [0,2]), roundDown(to{int_type}(\'{min}\'), [0,2])", @@ -62,7 +62,7 @@ def round_int_table(self, func, expected_result, supported, int_type, min, max, with Given("I have a table"): table(name = table_name, data_type = int_type) - if func is 'roundDown': + if func == 'roundDown': for value in [1,max,min]: @@ -101,7 +101,7 @@ def round_dec_inline(self, func, expected_result, supported, node=None): if node is None: node = self.context.node - if func is 'roundDown': + if func == 'roundDown': with When(f"I check roundDown with Decimal256"): node.query(f"""SELECT roundDown(toDecimal256(1,0), [toDecimal256(0,0),toDecimal256(2,0)]), @@ -142,7 +142,7 @@ def round_dec_table(self, func, expected_result, supported, node=None): with Given("I have a table"): table(name = table_name, data_type = 'Decimal256(0)') - if func is 'roundDown': + if func == 'roundDown': for value in [1, max, min]: From f47dd116c4471f3e6bbf6bcee69136353d1483e5 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 21 Jun 2021 19:07:17 +0300 Subject: [PATCH 233/931] Update test.py --- tests/integration/test_storage_s3/test.py | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 3f5254af49a..8a5708f5e8e 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -276,28 +276,28 @@ def test_put_get_with_redirect(started_cluster): # Test put with restricted S3 server redirect. def test_put_with_zero_redirect(started_cluster): - # type: (clickhousecluster) -> none + # type: (ClickHouseCluster) -> None bucket = started_cluster.minio_bucket - instance = started_cluster.instances["s3_max_redirects"] # type: clickhouseinstance - table_format = "column1 uint32, column2 uint32, column3 uint32" + instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance + table_format = "column1 UInt32, column2 UInt32, column3 UInt32" values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" filename = "test.csv" - # should work without redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'csv', '{}') values {}".format( - started_cluster.minio_ip, minio_internal_port, bucket, filename, table_format, values) + # Should work without redirect + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( + started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values) run_query(instance, query) - # should not work with redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'csv', '{}') values {}".format( + # Should not work with redirect + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values) - exception_raised = false + exception_raised = False try: run_query(instance, query) - except exception as e: - assert str(e).find("too many redirects while trying to access") != -1 - exception_raised = true + except Exception as e: + assert str(e).find("Too many redirects while trying to access") != -1 + exception_raised = True finally: assert exception_raised From cbca2a6c36585b533ea0a0690f2a10f9e8eb1df3 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Mon, 21 Jun 2021 20:11:47 +0300 Subject: [PATCH 234/931] Update docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 04903cea94c..4791a3d99c9 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -525,7 +525,7 @@ Setting fields: - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. If `true`, then there will be an exception about connection loss straight away if there was no connection when the query was executed. If `false`, then there will be attempts to retry queries three times. By default, `false`. +- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. If `true`, an exception is thrown immediately if the connection between client and server was lost while the query was executed. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. From e9767752688144eec1c948d03533254ad295a24b Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Mon, 21 Jun 2021 20:12:34 +0300 Subject: [PATCH 235/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-sources.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 82455c62561..7abca09bf9b 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -494,7 +494,7 @@ SOURCE(MYSQL( - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – Параметр конфигурации, контролирующий неожиданную потерю соединения во время выполнения запроса. Если `true`, то сразу же возникнет исключение о том, что при выполнении запроса соединения не было. Если `false`, то будут попытки повторить запросы три раза. По умолчанию `false`. +- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения во время выполнения запроса. Если значение `true`, то при потере соединения во время выполнения запроса исключение генерируется сразу же. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. @@ -735,4 +735,3 @@ Setting fields: - `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр. - `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - From 0de6e90a3457130a76a99e79a95a2a86784400cf Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 21 Jun 2021 20:29:32 +0300 Subject: [PATCH 236/931] fix tests --- tests/clickhouse-test | 2 +- tests/config/install.sh | 5 +++++ tests/config/users.d/timeouts.xml | 9 +++++++++ ..._zookeeper_mutation_stuck_after_replace_partition.sql | 1 + tests/queries/0_stateless/01154_move_partition_long.sh | 4 ++-- 5 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 tests/config/users.d/timeouts.xml diff --git a/tests/clickhouse-test b/tests/clickhouse-test index e508abab70c..3fa71215b17 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -44,7 +44,7 @@ MESSAGES_TO_RETRY = [ DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME ] -MAX_RETRIES = 5 +MAX_RETRIES = 3 class Terminated(KeyboardInterrupt): pass diff --git a/tests/config/install.sh b/tests/config/install.sh index 7e01860e241..08add810cbf 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -41,6 +41,11 @@ ln -sf $SRC_PATH/users.d/database_atomic_drop_detach_sync.xml $DEST_SERVER_PATH/ ln -sf $SRC_PATH/users.d/opentelemetry.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/remote_queries.xml $DEST_SERVER_PATH/users.d/ +# FIXME DataPartsExchange may hang for http_send_timeout seconds +# when nobody is going to read from the other side of socket (due to "Fetching of part was cancelled"), +# but socket is owned by HTTPSessionPool, so it's not closed. +ln -sf $SRC_PATH/users.d/timeouts.xml $DEST_SERVER_PATH/users.d/ + ln -sf $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/ ln -sf $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/ ln -sf $SRC_PATH/decimals_dictionary.xml $DEST_SERVER_PATH/ diff --git a/tests/config/users.d/timeouts.xml b/tests/config/users.d/timeouts.xml new file mode 100644 index 00000000000..583caca36a4 --- /dev/null +++ b/tests/config/users.d/timeouts.xml @@ -0,0 +1,9 @@ + + + + + 60 + 60 + + + diff --git a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql index 178f9b81ead..951bc149533 100644 --- a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql +++ b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql @@ -15,6 +15,7 @@ alter table rmt update s = 's'||toString(n) where 1; select * from rmt; alter table rmt replace partition '0' from mt; +system sync replica rmt; select table, partition_id, name, rows from system.parts where database=currentDatabase() and table in ('mt', 'rmt') and active=1 order by table, name; diff --git a/tests/queries/0_stateless/01154_move_partition_long.sh b/tests/queries/0_stateless/01154_move_partition_long.sh index 66ebbacee42..1ce40770e46 100755 --- a/tests/queries/0_stateless/01154_move_partition_long.sh +++ b/tests/queries/0_stateless/01154_move_partition_long.sh @@ -12,10 +12,10 @@ engines[2]="ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard} for ((i=0; i<16; i++)) do $CLICKHOUSE_CLIENT -q "CREATE TABLE dst_$i (p UInt64, k UInt64, v UInt64) ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '$i') - PARTITION BY p % 10 ORDER BY k" 2>&1| grep -Pv "Retrying createReplica|created by another server at the same moment, will retry" & + PARTITION BY p % 10 ORDER BY k" 2>&1| grep -Pv "Retrying createReplica|created by another server at the same moment, will retry|is already started to be removing" 2>&1 & engine=${engines[$((i % ${#engines[@]}))]} $CLICKHOUSE_CLIENT -q "CREATE TABLE src_$i (p UInt64, k UInt64, v UInt64) ENGINE=$engine - PARTITION BY p % 10 ORDER BY k" 2>&1| grep -Pv "Retrying createReplica|created by another server at the same moment, will retry" & + PARTITION BY p % 10 ORDER BY k" 2>&1| grep -Pv "Retrying createReplica|created by another server at the same moment, will retry|is already started to be removing" 2>&1 & done wait From 6e3b1841de3d978108850f0a1153df6912e9a56c Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Mon, 21 Jun 2021 20:51:11 +0300 Subject: [PATCH 237/931] Update s3Cluster.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Поправил примеры. --- docs/en/sql-reference/table-functions/s3Cluster.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index b49da53f01a..4bde49b8cc0 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -26,13 +26,13 @@ A table with the specified structure for reading or writing data in the specifie **Examples** -Selecting the data from the cluster `cluster_simple` using source `http://minio1:9001/root/data/{clickhouse,database}/*`: +Selecting the data from all files in the cluster `cluster_simple`: ``` sql SELECT * from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); ``` -Count the total amount of rows in all files of the cluster `cluster_simple`: +Count the total amount of rows in all files in the cluster `cluster_simple`: ``` sql SELECT count(*) from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); From 58a66db58843c2f7d2f4f15d5af46633f4b95256 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 21 Jun 2021 21:51:12 +0300 Subject: [PATCH 238/931] Fix flaky test 01249_flush_interactive.sh --- .../01249_flush_interactive.reference | 12 ----------- .../0_stateless/01249_flush_interactive.sh | 21 +++++++++++++++---- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/tests/queries/0_stateless/01249_flush_interactive.reference b/tests/queries/0_stateless/01249_flush_interactive.reference index 6d6abb2d37f..e69de29bb2d 100644 --- a/tests/queries/0_stateless/01249_flush_interactive.reference +++ b/tests/queries/0_stateless/01249_flush_interactive.reference @@ -1,12 +0,0 @@ -0 -1 -2 -3 -4 ---- -0 -1 -2 -3 -4 ---- diff --git a/tests/queries/0_stateless/01249_flush_interactive.sh b/tests/queries/0_stateless/01249_flush_interactive.sh index 2af75dbcbe5..89167002ed5 100755 --- a/tests/queries/0_stateless/01249_flush_interactive.sh +++ b/tests/queries/0_stateless/01249_flush_interactive.sh @@ -11,7 +11,20 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # unless the my-program will try to output a thousand more lines overflowing pipe buffer and terminating with Broken Pipe. # But if my program just output 5 (or slightly more) lines and hang up, the pipeline is not terminated. -timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query "SELECT DISTINCT number % 5 FROM system.numbers" ||: -echo '---' -timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary "SELECT DISTINCT number % 5 FROM system.numbers" ||: -echo '---' +function test() +{ + timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query " + SELECT DISTINCT number % 5 FROM system.numbers" ||: + echo '---' + timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary " + SELECT DISTINCT number % 5 FROM system.numbers" ||: + echo '---' +} + +# The test depends on timeouts. And there is a chance that under high system load the query +# will not be able to finish in 5 seconds (this will lead to test flakiness). +# Let's check that is will be able to show the expected result at least once. +while true; do + [[ $(test) == $(echo -ne "0\n1\n2\n3\n4\n---\n0\n1\n2\n3\n4\n---\n") ]] && break + sleep 1 +done From 3275a537236a80f0b6459fcdec4e6209f825d81e Mon Sep 17 00:00:00 2001 From: Benjamin Naecker Date: Mon, 21 Jun 2021 12:36:32 -0700 Subject: [PATCH 239/931] Resolves the actual port a server binds, in the case the user requests any available port from the OS. --- programs/server/Server.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index ad2c83da194..59984773d76 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -324,6 +324,13 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false)); #endif + /// If caller requests any available port from the OS, discover it after binding. + if (port == 0) + { + address = socket.address(); + LOG_DEBUG(&logger(), "Requested any available port (port == 0), actual port is {:d}", address.port()); + } + socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64)); return address; From 71e9689ba6a65a3461c1a386534c5bad05344241 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 21 Jun 2021 22:59:19 +0300 Subject: [PATCH 240/931] Fix PVS warning --- src/Coordination/ZooKeeperDataReader.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 10d04ba77f9..cf28627961f 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -422,6 +422,9 @@ bool isErrorRequest(Coordination::ZooKeeperRequestPtr request) bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) { + if (request == nullptr) + return true; + for (const auto & subrequest : dynamic_cast(request.get())->requests) if (subrequest == nullptr) return true; From f3b0f11b59acacb8f5ac8304dc590c5000729662 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 21 Jun 2021 23:04:58 +0300 Subject: [PATCH 241/931] Update StorageHDFS.cpp --- src/Storages/HDFS/StorageHDFS.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index c878fd4e1f8..578da239c20 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -26,7 +26,6 @@ #include #include #include -#include namespace fs = std::filesystem; @@ -35,6 +34,7 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ACCESS_DENIED; } StorageHDFS::StorageHDFS( @@ -334,7 +334,7 @@ void StorageHDFS::truncate(const ASTPtr & /* query */, const StorageMetadataPtr int ret = hdfsDelete(fs.get(), path.data(), 0); if (ret) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to truncate hdfs table: {}", std::string(hdfsGetLastError())); + throw Exception(ErrorCodes::ACCESS_DENIED, "Unable to truncate hdfs table: {}", std::string(hdfsGetLastError())); } From 9415b6e935a4a03d7c296df55cae76496211cd10 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 21 Jun 2021 23:46:26 +0300 Subject: [PATCH 242/931] Update 01921_test_progress_bar.py --- tests/queries/0_stateless/01921_test_progress_bar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01921_test_progress_bar.py b/tests/queries/0_stateless/01921_test_progress_bar.py index 89806bcfdab..8e917d4cc46 100755 --- a/tests/queries/0_stateless/01921_test_progress_bar.py +++ b/tests/queries/0_stateless/01921_test_progress_bar.py @@ -17,4 +17,4 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo client1.send('SELECT number FROM numbers(100) FORMAT Null') client1.expect('Progress: 100\.00 rows, 800\.00 B.*' + end_of_block) # 0 rows becuase Format Null. - client1.expect('0 rows in set. Elapsed: 0\.[\\w]{3} sec.' + end_of_block) + client1.expect('0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec.' + end_of_block) From ee28db0dafc447d30604127193f8622804b04269 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Tue, 22 Jun 2021 00:23:24 +0300 Subject: [PATCH 243/931] Fix the inaccuracy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Внес небольшую поправку. --- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 4791a3d99c9..ebc04d01de3 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -525,7 +525,7 @@ Setting fields: - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss during query execution. If `true`, an exception is thrown immediately if the connection between client and server was lost while the query was executed. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. +- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss before the query is executed. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 7abca09bf9b..ac371cc2c16 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -494,7 +494,7 @@ SOURCE(MYSQL( - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения во время выполнения запроса. Если значение `true`, то при потере соединения во время выполнения запроса исключение генерируется сразу же. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. +- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения перед выполнением запроса. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. From bf0304bc87cf46645e3d79fb433631a6721e9a08 Mon Sep 17 00:00:00 2001 From: yuchuansun Date: Tue, 22 Jun 2021 14:43:28 +0800 Subject: [PATCH 244/931] doc: update cluster.md in chinese --- docs/zh/operations/system-tables/clusters.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/zh/operations/system-tables/clusters.md b/docs/zh/operations/system-tables/clusters.md index 1e5935c276e..71ecc4245d3 100644 --- a/docs/zh/operations/system-tables/clusters.md +++ b/docs/zh/operations/system-tables/clusters.md @@ -1,9 +1,4 @@ ---- -machine_translated: true -machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 ---- - -# 系统。集群 {#system-clusters} +# 系统-集群 {#system-clusters} 包含有关配置文件中可用的集群及其中的服务器的信息。 From 8a70a9ba7c218315f8585f27bf9029561a75d7f6 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 22 Jun 2021 10:39:27 +0300 Subject: [PATCH 245/931] update cmake --- contrib/h3-cmake/CMakeLists.txt | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 6b184a175b0..f4c70dc476f 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -3,21 +3,22 @@ set(H3_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib") set(SRCS "${H3_SOURCE_DIR}/lib/algos.c" -"${H3_SOURCE_DIR}/lib/baseCells.c" -"${H3_SOURCE_DIR}/lib/bbox.c" "${H3_SOURCE_DIR}/lib/coordijk.c" -"${H3_SOURCE_DIR}/lib/faceijk.c" -"${H3_SOURCE_DIR}/lib/geoCoord.c" -"${H3_SOURCE_DIR}/lib/h3Index.c" -"${H3_SOURCE_DIR}/lib/h3UniEdge.c" -"${H3_SOURCE_DIR}/lib/linkedGeo.c" -"${H3_SOURCE_DIR}/lib/localij.c" -"${H3_SOURCE_DIR}/lib/mathExtensions.c" +"${H3_SOURCE_DIR}/lib/bbox.c" "${H3_SOURCE_DIR}/lib/polygon.c" +"${H3_SOURCE_DIR}/lib/h3Index.c" "${H3_SOURCE_DIR}/lib/vec2d.c" "${H3_SOURCE_DIR}/lib/vec3d.c" "${H3_SOURCE_DIR}/lib/vertex.c" +"${H3_SOURCE_DIR}/lib/linkedGeo.c" +"${H3_SOURCE_DIR}/lib/localij.c" +"${H3_SOURCE_DIR}/lib/latLng.c" +"${H3_SOURCE_DIR}/lib/directedEdge.c" +"${H3_SOURCE_DIR}/lib/mathExtensions.c" +"${H3_SOURCE_DIR}/lib/iterators.c" "${H3_SOURCE_DIR}/lib/vertexGraph.c" +"${H3_SOURCE_DIR}/lib/faceijk.c" +"${H3_SOURCE_DIR}/lib/baseCells.c" ) configure_file("${H3_SOURCE_DIR}/include/h3api.h.in" "${H3_BINARY_DIR}/include/h3api.h") From 730554589f0b876dbec48178408b28b423c3d235 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 22 Jun 2021 11:14:54 +0300 Subject: [PATCH 246/931] fix superdigest --- tests/integration/test_keeper_auth/configs/keeper_config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_keeper_auth/configs/keeper_config.xml b/tests/integration/test_keeper_auth/configs/keeper_config.xml index bb3c9a5d94a..bee3ccb0aba 100644 --- a/tests/integration/test_keeper_auth/configs/keeper_config.xml +++ b/tests/integration/test_keeper_auth/configs/keeper_config.xml @@ -4,7 +4,7 @@ 1 /var/lib/clickhouse/coordination/log /var/lib/clickhouse/coordination/snapshots - super:0DPiKuNIrrVmD8IUCuw1hQxNqZc= + super:xQJmxLMiHGwaqBvst5y6rkB6HQs= 5000 From f2486ac8e93978d0b8efa86bacdd6e457c67bc64 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 22 Jun 2021 12:23:27 +0300 Subject: [PATCH 247/931] changes --- src/Functions/geoToH3.cpp | 15 +++++++++++---- src/Functions/h3EdgeAngle.cpp | 2 +- src/Functions/h3EdgeLengthM.cpp | 2 +- src/Functions/h3GetBaseCell.cpp | 2 +- src/Functions/h3GetResolution.cpp | 2 +- src/Functions/h3HexAreaM2.cpp | 2 +- src/Functions/h3IndexesAreNeighbors.cpp | 2 +- src/Functions/h3IsValid.cpp | 2 +- src/Functions/h3ToChildren.cpp | 4 ++-- src/Functions/h3ToParent.cpp | 2 +- src/Functions/h3ToString.cpp | 2 +- src/Functions/h3kRing.cpp | 6 +++--- 12 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index 7edb3faf62d..4fa20d0ad62 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -21,6 +21,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int INCORRECT_DATA; } namespace @@ -79,11 +80,17 @@ public: const double lat = col_lat->getFloat64(row); const UInt8 res = col_res->getUInt(row); - GeoCoord coord; - coord.lon = degsToRads(lon); + LatLng coord; + coord.lng = degsToRads(lon); coord.lat = degsToRads(lat); - - H3Index hindex = geoToH3(&coord, res); + + H3Index hindex; + H3Error err = latLngToCell(&coord, res, &hindex); + if (err) { + throw Exception( + "Incorrect coorinates lat:" + std::to_string(coord.lat) + " lng:" + std::to_string(coord.lng) + " err:" + std::to_string(err), + ErrorCodes::INCORRECT_DATA); + } dst_data[row] = hindex; } diff --git a/src/Functions/h3EdgeAngle.cpp b/src/Functions/h3EdgeAngle.cpp index 0fdafff9eed..071581a7c60 100644 --- a/src/Functions/h3EdgeAngle.cpp +++ b/src/Functions/h3EdgeAngle.cpp @@ -66,7 +66,7 @@ public: + " is out of bounds because the maximum resolution in H3 library is " + toString(MAX_H3_RES), ErrorCodes::ARGUMENT_OUT_OF_BOUND); // Numerical constant is 180 degrees / pi / Earth radius, Earth radius is from h3 sources - Float64 res = 8.99320592271288084e-6 * edgeLengthM(resolution); + Float64 res = 8.99320592271288084e-6 * getHexagonEdgeLengthAvgM(resolution); dst_data[row] = res; } diff --git a/src/Functions/h3EdgeLengthM.cpp b/src/Functions/h3EdgeLengthM.cpp index 5ec57510e54..56374e10077 100644 --- a/src/Functions/h3EdgeLengthM.cpp +++ b/src/Functions/h3EdgeLengthM.cpp @@ -70,7 +70,7 @@ public: throw Exception("The argument 'resolution' (" + toString(resolution) + ") of function " + getName() + " is out of bounds because the maximum resolution in H3 library is " + toString(MAX_H3_RES), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - Float64 res = edgeLengthM(resolution); + Float64 res = getHexagonEdgeLengthAvgM(resolution); dst_data[row] = res; } diff --git a/src/Functions/h3GetBaseCell.cpp b/src/Functions/h3GetBaseCell.cpp index 7f3843ed792..b73245f751b 100644 --- a/src/Functions/h3GetBaseCell.cpp +++ b/src/Functions/h3GetBaseCell.cpp @@ -59,7 +59,7 @@ public: { const UInt64 hindex = col_hindex->getUInt(row); - UInt8 res = h3GetBaseCell(hindex); + UInt8 res = getBaseCellNumber(hindex); dst_data[row] = res; } diff --git a/src/Functions/h3GetResolution.cpp b/src/Functions/h3GetResolution.cpp index 074e07e4277..49ade509934 100644 --- a/src/Functions/h3GetResolution.cpp +++ b/src/Functions/h3GetResolution.cpp @@ -59,7 +59,7 @@ public: { const UInt64 hindex = col_hindex->getUInt(row); - UInt8 res = h3GetResolution(hindex); + UInt8 res = getResolution(hindex); dst_data[row] = res; } diff --git a/src/Functions/h3HexAreaM2.cpp b/src/Functions/h3HexAreaM2.cpp index e630fb7bd70..7f41348a14b 100644 --- a/src/Functions/h3HexAreaM2.cpp +++ b/src/Functions/h3HexAreaM2.cpp @@ -65,7 +65,7 @@ public: throw Exception("The argument 'resolution' (" + toString(resolution) + ") of function " + getName() + " is out of bounds because the maximum resolution in H3 library is " + toString(MAX_H3_RES), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - Float64 res = hexAreaM2(resolution); + Float64 res = getHexagonAreaAvgM2(resolution); dst_data[row] = res; } diff --git a/src/Functions/h3IndexesAreNeighbors.cpp b/src/Functions/h3IndexesAreNeighbors.cpp index 3c03d3d1adb..6507998e24c 100644 --- a/src/Functions/h3IndexesAreNeighbors.cpp +++ b/src/Functions/h3IndexesAreNeighbors.cpp @@ -67,7 +67,7 @@ public: const UInt64 hindex_origin = col_hindex_origin->getUInt(row); const UInt64 hindex_dest = col_hindex_dest->getUInt(row); - UInt8 res = h3IndexesAreNeighbors(hindex_origin, hindex_dest); + UInt8 res = areNeighborCells(hindex_origin, hindex_dest); dst_data[row] = res; } diff --git a/src/Functions/h3IsValid.cpp b/src/Functions/h3IsValid.cpp index d7f5a2c0771..bc140450b71 100644 --- a/src/Functions/h3IsValid.cpp +++ b/src/Functions/h3IsValid.cpp @@ -59,7 +59,7 @@ public: { const UInt64 hindex = col_hindex->getUInt(row); - UInt8 is_valid = h3IsValid(hindex) == 0 ? 0 : 1; + UInt8 is_valid = isValidCell(hindex) == 0 ? 0 : 1; dst_data[row] = is_valid; } diff --git a/src/Functions/h3ToChildren.cpp b/src/Functions/h3ToChildren.cpp index d472c298432..88ac3056e72 100644 --- a/src/Functions/h3ToChildren.cpp +++ b/src/Functions/h3ToChildren.cpp @@ -84,14 +84,14 @@ public: throw Exception("The argument 'resolution' (" + toString(child_resolution) + ") of function " + getName() + " is out of bounds because the maximum resolution in H3 library is " + toString(MAX_H3_RES), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - const size_t vec_size = maxH3ToChildrenSize(parent_hindex, child_resolution); + const size_t vec_size = cellToChildrenSize(parent_hindex, child_resolution); if (vec_size > MAX_ARRAY_SIZE) throw Exception("The result of function" + getName() + " (array of " + toString(vec_size) + " elements) will be too large with resolution argument = " + toString(child_resolution), ErrorCodes::TOO_LARGE_ARRAY_SIZE); hindex_vec.resize(vec_size); - h3ToChildren(parent_hindex, child_resolution, hindex_vec.data()); + cellToChildren(parent_hindex, child_resolution, hindex_vec.data()); dst_data.reserve(dst_data.size() + vec_size); for (auto hindex : hindex_vec) diff --git a/src/Functions/h3ToParent.cpp b/src/Functions/h3ToParent.cpp index 6719d9f3456..9755184d63c 100644 --- a/src/Functions/h3ToParent.cpp +++ b/src/Functions/h3ToParent.cpp @@ -74,7 +74,7 @@ public: throw Exception("The argument 'resolution' (" + toString(resolution) + ") of function " + getName() + " is out of bounds because the maximum resolution in H3 library is " + toString(MAX_H3_RES), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - UInt64 res = h3ToParent(hindex, resolution); + UInt64 res = cellToParent(hindex, resolution); dst_data[row] = res; } diff --git a/src/Functions/h3ToString.cpp b/src/Functions/h3ToString.cpp index dcd0951f67f..8ac97db0621 100644 --- a/src/Functions/h3ToString.cpp +++ b/src/Functions/h3ToString.cpp @@ -66,7 +66,7 @@ public: { const UInt64 hindex = col_hindex->getUInt(i); - if (!h3IsValid(hindex)) + if (!isValidCell(hindex)) { throw Exception("Invalid H3 index: " + std::to_string(hindex), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } diff --git a/src/Functions/h3kRing.cpp b/src/Functions/h3kRing.cpp index b54ed48ef3f..8b91f2fa1c7 100644 --- a/src/Functions/h3kRing.cpp +++ b/src/Functions/h3kRing.cpp @@ -77,7 +77,7 @@ public: const H3Index origin_hindex = col_hindex->getUInt(row); const int k = col_k->getInt(row); - /// Overflow is possible. The function maxKringSize does not check for overflow. + /// Overflow is possible. The function maxGridDiskSize does not check for overflow. /// The calculation is similar to square of k but several times more. /// Let's use huge underestimation as the safe bound. We should not allow to generate too large arrays nevertheless. constexpr auto max_k = 10000; @@ -86,9 +86,9 @@ public: if (k < 0) throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Argument 'k' for {} function must be non negative", getName()); - const auto vec_size = maxKringSize(k); + const auto vec_size = maxGridDiskSize(k); hindex_vec.resize(vec_size); - kRing(origin_hindex, k, hindex_vec.data()); + gridDisk(origin_hindex, k, hindex_vec.data()); dst_data.reserve(dst_data.size() + vec_size); for (auto hindex : hindex_vec) From 1b0a598c2189e77f1e4dcc89de7281a675d18bd2 Mon Sep 17 00:00:00 2001 From: Mikhail Date: Tue, 22 Jun 2021 12:31:37 +0300 Subject: [PATCH 248/931] =?UTF-8?q?=D0=BE=D0=BF=D0=B5=D1=87=D0=B0=D1=82?= =?UTF-8?q?=D0=BA=D0=B8=20=D0=B8=D1=81=D0=BF=D1=80=D0=B0=D0=B2=D0=BB=D0=B5?= =?UTF-8?q?=D0=BD=D1=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/en/operations/settings/settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 4afedb2c32f..055b394d0a9 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1569,7 +1569,7 @@ Possible values: - 0 — Disabled (final query processing is done on the initiator node). - 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards. -- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possilbe when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`). +- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`). **Example** @@ -1621,7 +1621,7 @@ Possible values: Default value: 0 -## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shardslrewrite-in} +## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shards-rewrite-in} Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards). @@ -2084,7 +2084,7 @@ Default value: 16. ## background_fetches_pool_size {#background_fetches_pool_size} -Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recomended to use default value. +Sets the number of threads performing background fetches for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. For production usage with frequent small insertions or slow ZooKeeper cluster is recommended to use default value. Possible values: From 6a79fef8bd4d2a8b252ee02f72792b8826acaeb5 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 22 Jun 2021 12:35:56 +0300 Subject: [PATCH 249/931] submodule --- contrib/h3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/h3 b/contrib/h3 index 9cb6ff75836..c7f46cfd71f 160000 --- a/contrib/h3 +++ b/contrib/h3 @@ -1 +1 @@ -Subproject commit 9cb6ff758365b9cf4cb5d669b664d2d448a14373 +Subproject commit c7f46cfd71fb60e2fefc90e28abe81657deff735 From ea98d9ef8995f19fb087545c3541232c3c741bbb Mon Sep 17 00:00:00 2001 From: tavplubix Date: Tue, 22 Jun 2021 12:51:53 +0300 Subject: [PATCH 250/931] Update 01149_zookeeper_mutation_stuck_after_replace_partition.sql --- ...01149_zookeeper_mutation_stuck_after_replace_partition.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql index 951bc149533..790cee9a90b 100644 --- a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql +++ b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql @@ -4,7 +4,7 @@ drop table if exists rmt sync; create table mt (n UInt64, s String) engine = MergeTree partition by intDiv(n, 10) order by n; insert into mt values (3, '3'), (4, '4'); -create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149/rmt', 'r1') partition by intDiv(n, 10) order by n; +create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149_{database}/rmt', 'r1') partition by intDiv(n, 10) order by n; insert into rmt values (1,'1'), (2, '2'); select * from rmt; @@ -27,7 +27,7 @@ select * from rmt; drop table rmt sync; set replication_alter_partitions_sync=0; -create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149/rmt', 'r1') partition by intDiv(n, 10) order by n; +create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149_{database}/rmt', 'r1') partition by intDiv(n, 10) order by n; insert into rmt values (1,'1'), (2, '2'); alter table rmt update s = 's'||toString(n) where 1; From 23486ff86c89805d6f289f82d895a976fce47aa1 Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Tue, 22 Jun 2021 12:59:44 +0300 Subject: [PATCH 251/931] Make block length size_t --- contrib/murmurhash/include/MurmurHash2.h | 14 ++++++++------ contrib/murmurhash/include/MurmurHash3.h | 8 +++++--- contrib/murmurhash/src/MurmurHash2.cpp | 18 +++++++++--------- contrib/murmurhash/src/MurmurHash3.cpp | 6 +++--- 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/contrib/murmurhash/include/MurmurHash2.h b/contrib/murmurhash/include/MurmurHash2.h index 6d289edee29..835ae1e2c13 100644 --- a/contrib/murmurhash/include/MurmurHash2.h +++ b/contrib/murmurhash/include/MurmurHash2.h @@ -5,6 +5,8 @@ #ifndef _MURMURHASH2_H_ #define _MURMURHASH2_H_ +#include + //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -26,12 +28,12 @@ typedef unsigned __int64 uint64_t; //----------------------------------------------------------------------------- -uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ); -uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ); -uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ); -uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ); -uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ); -uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ); +uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed ); +uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed ); +uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ); +uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ); +uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ); +uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ); //----------------------------------------------------------------------------- diff --git a/contrib/murmurhash/include/MurmurHash3.h b/contrib/murmurhash/include/MurmurHash3.h index e1c6d34976c..0c61908887a 100644 --- a/contrib/murmurhash/include/MurmurHash3.h +++ b/contrib/murmurhash/include/MurmurHash3.h @@ -5,6 +5,8 @@ #ifndef _MURMURHASH3_H_ #define _MURMURHASH3_H_ +#include + //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -26,11 +28,11 @@ typedef unsigned __int64 uint64_t; //----------------------------------------------------------------------------- -void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); +void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out ); -void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); +void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out ); -void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out ); +void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out ); //----------------------------------------------------------------------------- diff --git a/contrib/murmurhash/src/MurmurHash2.cpp b/contrib/murmurhash/src/MurmurHash2.cpp index cd1e53a9b92..1c4469b0a02 100644 --- a/contrib/murmurhash/src/MurmurHash2.cpp +++ b/contrib/murmurhash/src/MurmurHash2.cpp @@ -34,7 +34,7 @@ //----------------------------------------------------------------------------- -uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ) +uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed ) { // 'm' and 'r' are mixing constants generated offline. // They're not really 'magic', they just happen to work well. @@ -93,7 +93,7 @@ uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ) // 64-bit hash for 64-bit platforms -uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ) +uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed ) { const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); const int r = 47; @@ -139,7 +139,7 @@ uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ) // 64-bit hash for 32-bit platforms -uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ) +uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ) { const uint32_t m = 0x5bd1e995; const int r = 24; @@ -203,7 +203,7 @@ uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ) #define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } -uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ) +uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ) { const uint32_t m = 0x5bd1e995; const int r = 24; @@ -270,7 +270,7 @@ public: m_size = 0; } - void Add ( const unsigned char * data, int len ) + void Add ( const unsigned char * data, size_t len ) { m_size += len; @@ -306,7 +306,7 @@ private: static const uint32_t m = 0x5bd1e995; static const int r = 24; - void MixTail ( const unsigned char * & data, int & len ) + void MixTail ( const unsigned char * & data, size_t & len ) { while( len && ((len<4) || m_count) ) { @@ -336,7 +336,7 @@ private: // Same as MurmurHash2, but endian- and alignment-neutral. // Half the speed though, alas. -uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ) +uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ) { const uint32_t m = 0x5bd1e995; const int r = 24; @@ -391,7 +391,7 @@ uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ) #define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } -uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ) +uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ) { const uint32_t m = 0x5bd1e995; const int r = 24; @@ -400,7 +400,7 @@ uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ) uint32_t h = seed ^ len; - int align = (uint64_t)data & 3; + size_t align = (uint64_t)data & 3; if(align && (len >= 4)) { diff --git a/contrib/murmurhash/src/MurmurHash3.cpp b/contrib/murmurhash/src/MurmurHash3.cpp index aa7982d3eef..05003fe75ae 100644 --- a/contrib/murmurhash/src/MurmurHash3.cpp +++ b/contrib/murmurhash/src/MurmurHash3.cpp @@ -91,7 +91,7 @@ FORCE_INLINE uint64_t fmix64 ( uint64_t k ) //----------------------------------------------------------------------------- -void MurmurHash3_x86_32 ( const void * key, int len, +void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out ) { const uint8_t * data = (const uint8_t*)key; @@ -147,7 +147,7 @@ void MurmurHash3_x86_32 ( const void * key, int len, //----------------------------------------------------------------------------- -void MurmurHash3_x86_128 ( const void * key, const int len, +void MurmurHash3_x86_128 ( const void * key, const size_t len, uint32_t seed, void * out ) { const uint8_t * data = (const uint8_t*)key; @@ -252,7 +252,7 @@ void MurmurHash3_x86_128 ( const void * key, const int len, //----------------------------------------------------------------------------- -void MurmurHash3_x64_128 ( const void * key, const int len, +void MurmurHash3_x64_128 ( const void * key, const size_t len, const uint32_t seed, void * out ) { const uint8_t * data = (const uint8_t*)key; From 1f1c61ce9478ed721950a39c6709603096367f94 Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Tue, 22 Jun 2021 13:00:05 +0300 Subject: [PATCH 252/931] Fix undefined behavior due to unaligned read --- contrib/murmurhash/src/MurmurHash3.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/murmurhash/src/MurmurHash3.cpp b/contrib/murmurhash/src/MurmurHash3.cpp index 05003fe75ae..cf5158e97ad 100644 --- a/contrib/murmurhash/src/MurmurHash3.cpp +++ b/contrib/murmurhash/src/MurmurHash3.cpp @@ -8,6 +8,7 @@ // non-native version will be less than optimal. #include "MurmurHash3.h" +#include //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -54,7 +55,9 @@ inline uint64_t rotl64 ( uint64_t x, int8_t r ) FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) { - return p[i]; + uint32_t res; + memcpy(&res, p + i, sizeof(res)); + return res; } FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) From 3c76c34d6070f326ca11b7f96d2ae6a23774cd57 Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Tue, 22 Jun 2021 13:00:46 +0300 Subject: [PATCH 253/931] Mark functions as extern "C" to allow CGO bindings --- contrib/murmurhash/include/MurmurHash2.h | 8 ++++++++ contrib/murmurhash/include/MurmurHash3.h | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/contrib/murmurhash/include/MurmurHash2.h b/contrib/murmurhash/include/MurmurHash2.h index 835ae1e2c13..017475923bb 100644 --- a/contrib/murmurhash/include/MurmurHash2.h +++ b/contrib/murmurhash/include/MurmurHash2.h @@ -28,12 +28,20 @@ typedef unsigned __int64 uint64_t; //----------------------------------------------------------------------------- +#ifdef __cplusplus +extern "C" { +#endif + uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed ); uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed ); uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ); uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ); uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ); uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ); + +#ifdef __cplusplus +} +#endif //----------------------------------------------------------------------------- diff --git a/contrib/murmurhash/include/MurmurHash3.h b/contrib/murmurhash/include/MurmurHash3.h index 0c61908887a..920a49da3c2 100644 --- a/contrib/murmurhash/include/MurmurHash3.h +++ b/contrib/murmurhash/include/MurmurHash3.h @@ -28,12 +28,20 @@ typedef unsigned __int64 uint64_t; //----------------------------------------------------------------------------- +#ifdef __cplusplus +extern "C" { +#endif + void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out ); void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out ); void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out ); +#ifdef __cplusplus +} +#endif + //----------------------------------------------------------------------------- #endif // _MURMURHASH3_H_ From 68ffbd2ad37e92940ec1dd772749bdea23f1ab38 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 22 Jun 2021 13:14:24 +0300 Subject: [PATCH 254/931] fix docs for function 'initializeAggregation' --- .../reference/initializeAggregation.md | 37 ------ .../functions/other-functions.md | 87 ++++++++++++-- .../reference/initializeAggregation.md | 40 ------- .../functions/other-functions.md | 111 +++++++++++++++--- 4 files changed, 171 insertions(+), 104 deletions(-) delete mode 100644 docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md delete mode 100644 docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md diff --git a/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md b/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md deleted file mode 100644 index c8fb535089b..00000000000 --- a/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -toc_priority: 150 ---- - -## initializeAggregation {#initializeaggregation} - -Initializes aggregation for your input rows. It is intended for the functions with the suffix `State`. -Use it for tests or to process columns of types `AggregateFunction` and `AggregationgMergeTree`. - -**Syntax** - -``` sql -initializeAggregation (aggregate_function, column_1, column_2) -``` - -**Arguments** - -- `aggregate_function` — Name of the aggregation function. The state of this function — the creating one. [String](../../../sql-reference/data-types/string.md#string). -- `column_n` — The column to translate it into the function as it's argument. [String](../../../sql-reference/data-types/string.md#string). - -**Returned value(s)** - -Returns the result of the aggregation for your input rows. The return type will be the same as the return type of function, that `initializeAgregation` takes as first argument. -For example for functions with the suffix `State` the return type will be `AggregateFunction`. - -**Example** - -Query: - -```sql -SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000); -``` -Result: - -┌─uniqMerge(state)─┐ -│ 3 │ -└──────────────────┘ diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 8163650efab..30e2e427158 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -831,7 +831,7 @@ Returns 0 for the first row and the difference from the previous row for each su !!! warning "Warning" It can reach the previous row only inside the currently processed data block. - + The result of the function depends on the affected data blocks and the order of data in the block. The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user. @@ -908,7 +908,7 @@ Same as for [runningDifference](./other-functions.md#other_functions-runningdiff ## runningConcurrency {#runningconcurrency} Calculates the number of concurrent events. -Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. +Each event has a start time and an end time. The start time is included in the event, while the end time is excluded. Columns with a start time and an end time must be of the same data type. The function calculates the total number of active (concurrent) events for each event start time. @@ -1424,11 +1424,83 @@ Result: └───────────┴────────┘ ``` +## initializeAggregation {#initializeaggregation} + +Calculates result of aggregate function based on single value. It is intended to use this function to initialize aggregate functions with combinator [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state). You can create states of aggregate functions and insert them to columns of type [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) or use initialized aggregates as default values. + +**Syntax** + +``` sql +initializeAggregation (aggregate_function, arg1, arg2, ..., argN) +``` + +**Arguments** + +- `aggregate_function` — Name of the aggregation function to initialize. [String](../../sql-reference/data-types/string.md). +- `arg` — Arguments of aggregate function. + +**Returned value(s)** + +- Result of aggregation for every row passed to the function. + +The return type is the same as the return type of function, that `initializeAgregation` takes as first argument. + +**Example** + +Query: + +```sql +SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000)); +``` +Result: + +```text +┌─uniqMerge(state)─┐ +│ 3 │ +└──────────────────┘ +``` + +Query: + +```sql +SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5)); +``` +Result: + +```text +┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐ +│ 0 │ AggregateFunction(sum, UInt8) │ +│ 1 │ AggregateFunction(sum, UInt8) │ +│ 2 │ AggregateFunction(sum, UInt8) │ +│ 0 │ AggregateFunction(sum, UInt8) │ +│ 1 │ AggregateFunction(sum, UInt8) │ +└────────────────────────────┴───────────────────────────────┘ +``` + +Example with `AggregatingMergeTree` table engine and `AggregateFunction` column: + +```sql +CREATE TABLE metrics +( + key UInt64, + value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0)) +) +ENGINE = AggregatingMergeTree +ORDER BY key +``` + +```sql +INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42))) +``` + +**See Also** +- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) + ## finalizeAggregation {#function-finalizeaggregation} Takes state of aggregate function. Returns result of aggregation (or finalized state when using[-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) combinator). -**Syntax** +**Syntax** ``` sql finalizeAggregation(state) @@ -1442,7 +1514,7 @@ finalizeAggregation(state) - Value/values that was aggregated. -Type: Value of any types that was aggregated. +Type: Value of any types that was aggregated. **Examples** @@ -1474,7 +1546,7 @@ Result: └──────────────────────────────────┘ ``` -Note that `NULL` values are ignored. +Note that `NULL` values are ignored. Query: @@ -1520,10 +1592,9 @@ Result: └────────┴─────────────┴────────────────┘ ``` -**See Also** - +**See Also** - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) -- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) +- [initializeAggregation](#initializeaggregation) ## runningAccumulate {#runningaccumulate} diff --git a/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md b/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md deleted file mode 100644 index 3565115d8de..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -toc_priority: 150 ---- - -## initializeAggregation {#initializeaggregation} - -Инициализирует агрегацию для введеных строчек. Предназначена для функций с суффиксом `State`. -Поможет вам проводить тесты или работать со столбцами типов: `AggregateFunction` и `AggregationgMergeTree`. - -**Синтаксис** - -``` sql -initializeAggregation (aggregate_function, column_1, column_2) -``` - -**Аргументы** - -- `aggregate_function` — название функции агрегации, состояние которой нужно создать. [String](../../../sql-reference/data-types/string.md#string). -- `column_n` — столбец, который передается в функцию агрегации как аргумент. [String](../../../sql-reference/data-types/string.md#string). - -**Возвращаемое значение** - -Возвращает результат агрегации введенной информации. Тип возвращаемого значения такой же, как и для функции, которая становится первым аргументом для `initializeAgregation`. - -Пример: - -Возвращаемый тип функций с суффиксом `State` — `AggregateFunction`. - -**Пример** - -Запрос: - -```sql -SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000); -``` -Результат: - -┌─uniqMerge(state)─┐ -│ 3 │ -└──────────────────┘ diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 84bbc6af968..7945abb7a15 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -13,7 +13,7 @@ toc_title: "Прочие функции" Возвращает именованное значение из секции [macros](../../operations/server-configuration-parameters/settings.md#macros) конфигурации сервера. -**Синтаксис** +**Синтаксис** ```sql getMacro(name) @@ -854,8 +854,8 @@ WHERE diff != 1 ## runningConcurrency {#runningconcurrency} Подсчитывает количество одновременно идущих событий. -У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных. -Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке. +У каждого события есть время начала и время окончания. Считается, что время начала включено в событие, а время окончания исключено из него. Столбцы со временем начала и окончания событий должны иметь одинаковый тип данных. +Функция подсчитывает количество событий, происходящих одновременно на момент начала каждого из событий в выборке. !!! warning "Предупреждение" События должны быть отсортированы по возрастанию времени начала. Если это требование нарушено, то функция вызывает исключение. @@ -1371,11 +1371,84 @@ SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesy └───────────┴────────┘ ``` +## initializeAggregation {#initializeaggregation} + +Вычисляет результат агрегатной функции для каждой строки. Предназначена для инициализации агрегатных функций с комбинатором [-State](../../sql-reference/aggregate-functions/combinators.md#state). Может быть полезна для создания состояний агрегатных функций для последующей их вставки в столбцы типа [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction) или использования в качестве значений по-умолчанию. + +**Синтаксис** + +``` sql +initializeAggregation (aggregate_function, arg1, arg2, ..., argN) +``` + +**Аргументы** + +- `aggregate_function` — название агрегатной функции, состояние которой нужно создать. [String](../../sql-reference/data-types/string.md#string). +- `arg` — аргументы, которые передается агрегатную функцию. + +**Возвращаемое значение** + +- В каждой строке результат агрегатной функции, примененной к аргументам из этой строки. + +Тип возвращаемого значения такой же, как и у функции, переданной первым аргументом. + + +**Пример** + +Запрос: + +```sql +SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM numbers(10000)); +``` +Результат: + +```text +┌─uniqMerge(state)─┐ +│ 3 │ +└──────────────────┘ +``` + +Запрос: + +```sql +SELECT finalizeAggregation(state), toTypeName(state) FROM (SELECT initializeAggregation('sumState', number % 3) AS state FROM numbers(5)); +``` +Результат: + +```text +┌─finalizeAggregation(state)─┬─toTypeName(state)─────────────┐ +│ 0 │ AggregateFunction(sum, UInt8) │ +│ 1 │ AggregateFunction(sum, UInt8) │ +│ 2 │ AggregateFunction(sum, UInt8) │ +│ 0 │ AggregateFunction(sum, UInt8) │ +│ 1 │ AggregateFunction(sum, UInt8) │ +└────────────────────────────┴───────────────────────────────┘ +``` + +Пример с движком таблиц `AggregatingMergeTree` и столбцом типа `AggregateFunction`: + +```sql +CREATE TABLE metrics +( + key UInt64, + value AggregateFunction(sum, UInt64) DEFAULT initializeAggregation('sumState', toUInt64(0)) +) +ENGINE = AggregatingMergeTree +ORDER BY key +``` + +```sql +INSERT INTO metrics VALUES (0, initializeAggregation('sumState', toUInt64(42))) +``` + +**Смотрите также** +- [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) + ## finalizeAggregation {#function-finalizeaggregation} Принимает состояние агрегатной функции. Возвращает результат агрегирования (или конечное состояние при использовании комбинатора [-State](../../sql-reference/aggregate-functions/combinators.md#state)). -**Синтаксис** +**Синтаксис** ``` sql finalizeAggregation(state) @@ -1421,7 +1494,7 @@ SELECT finalizeAggregation(( SELECT sumState(number) FROM numbers(10))); └──────────────────────────────────┘ ``` -Обратите внимание, что значения `NULL` игнорируются. +Обратите внимание, что значения `NULL` игнорируются. Запрос: @@ -1470,7 +1543,7 @@ FROM numbers(10); **Смотрите также** - [arrayReduce](../../sql-reference/functions/array-functions.md#arrayreduce) -- [initializeAggregation](../../sql-reference/aggregate-functions/reference/initializeAggregation.md) +- [initializeAggregation](#initializeaggregation) ## runningAccumulate {#runningaccumulate} @@ -1537,13 +1610,13 @@ SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) Запрос: ```sql -SELECT +SELECT grouping, item, runningAccumulate(state, grouping) AS res -FROM +FROM ( - SELECT + SELECT toInt8(number / 4) AS grouping, number AS item, sumState(number) AS state @@ -1732,7 +1805,7 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers randomString(length) ``` -**Аргументы** +**Аргументы** - `length` — длина строки. Положительное целое число. @@ -1831,13 +1904,13 @@ randomStringUTF8(length) Запрос: -```sql +```sql SELECT randomStringUTF8(13) ``` Результат: -```text +```text ┌─randomStringUTF8(13)─┐ │ 𘤗𙉝д兠庇󡅴󱱎󦐪􂕌𔊹𓰛 │ └──────────────────────┘ @@ -1848,13 +1921,13 @@ SELECT randomStringUTF8(13) Возвращает текущее значение [пользовательской настройки](../../operations/settings/index.md#custom_settings). -**Синтаксис** +**Синтаксис** ```sql getSetting('custom_setting') ``` -**Параметр** +**Параметр** - `custom_setting` — название настройки. [String](../../sql-reference/data-types/string.md). @@ -1866,7 +1939,7 @@ getSetting('custom_setting') ```sql SET custom_a = 123; -SELECT getSetting('custom_a'); +SELECT getSetting('custom_a'); ``` **Результат** @@ -1875,7 +1948,7 @@ SELECT getSetting('custom_a'); 123 ``` -**См. также** +**См. также** - [Пользовательские настройки](../../operations/settings/index.md#custom_settings) @@ -1889,10 +1962,10 @@ SELECT getSetting('custom_a'); isDecimalOverflow(d, [p]) ``` -**Аргументы** +**Аргументы** - `d` — число. [Decimal](../../sql-reference/data-types/decimal.md). -- `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). +- `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). **Возвращаемое значение** @@ -1926,7 +1999,7 @@ SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9), countDigits(x) ``` -**Аргументы** +**Аргументы** - `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число. From bf0a4864ac74b498eec0d522f778ad8464c4116c Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 22 Jun 2021 13:49:35 +0300 Subject: [PATCH 255/931] Add support for set/get ACL commands --- src/Common/ZooKeeper/ZooKeeperCommon.cpp | 51 +++++++++ src/Common/ZooKeeper/ZooKeeperCommon.h | 42 +++++++ src/Common/ZooKeeper/ZooKeeperConstants.cpp | 6 + src/Common/ZooKeeper/ZooKeeperConstants.h | 2 + src/Coordination/KeeperStorage.cpp | 107 ++++++++++++++++++ src/Coordination/ZooKeeperDataReader.cpp | 17 +++ tests/integration/test_keeper_auth/test.py | 46 +++++++- .../test_keeper_zookeeper_converter/test.py | 15 +++ 8 files changed, 285 insertions(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.cpp b/src/Common/ZooKeeper/ZooKeeperCommon.cpp index 50bdc6c77ba..1560d7a25da 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.cpp +++ b/src/Common/ZooKeeper/ZooKeeperCommon.cpp @@ -239,6 +239,53 @@ void ZooKeeperListResponse::writeImpl(WriteBuffer & out) const Coordination::write(stat, out); } + +void ZooKeeperSetACLRequest::writeImpl(WriteBuffer & out) const +{ + Coordination::write(path, out); + Coordination::write(acls, out); + Coordination::write(version, out); +} + +void ZooKeeperSetACLRequest::readImpl(ReadBuffer & in) +{ + Coordination::read(path, in); + Coordination::read(acls, in); + Coordination::read(version, in); +} + +void ZooKeeperSetACLResponse::writeImpl(WriteBuffer & out) const +{ + Coordination::write(stat, out); +} + +void ZooKeeperSetACLResponse::readImpl(ReadBuffer & in) +{ + Coordination::read(stat, in); +} + +void ZooKeeperGetACLRequest::readImpl(ReadBuffer & in) +{ + Coordination::read(path, in); +} + +void ZooKeeperGetACLRequest::writeImpl(WriteBuffer & out) const +{ + Coordination::write(path, out); +} + +void ZooKeeperGetACLResponse::writeImpl(WriteBuffer & out) const +{ + Coordination::write(acl, out); + Coordination::write(stat, out); +} + +void ZooKeeperGetACLResponse::readImpl(ReadBuffer & in) +{ + Coordination::read(acl, in); + Coordination::read(stat, in); +} + void ZooKeeperCheckRequest::writeImpl(WriteBuffer & out) const { Coordination::write(path, out); @@ -454,6 +501,8 @@ ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::ma ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::make_shared(); } ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared(requests); } ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared(); } +ZooKeeperResponsePtr ZooKeeperSetACLRequest::makeResponse() const { return std::make_shared(); } +ZooKeeperResponsePtr ZooKeeperGetACLRequest::makeResponse() const { return std::make_shared(); } void ZooKeeperSessionIDRequest::writeImpl(WriteBuffer & out) const { @@ -545,6 +594,8 @@ ZooKeeperRequestFactory::ZooKeeperRequestFactory() registerZooKeeperRequest(*this); registerZooKeeperRequest(*this); registerZooKeeperRequest(*this); + registerZooKeeperRequest(*this); + registerZooKeeperRequest(*this); } } diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.h b/src/Common/ZooKeeper/ZooKeeperCommon.h index c50c271c1ec..a816c1eb8bb 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.h +++ b/src/Common/ZooKeeper/ZooKeeperCommon.h @@ -353,6 +353,48 @@ struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse size_t bytesSize() const override { return ErrorResponse::bytesSize() + sizeof(xid) + sizeof(zxid); } }; +struct ZooKeeperSetACLRequest final : SetACLRequest, ZooKeeperRequest +{ + OpNum getOpNum() const override { return OpNum::SetACL; } + void writeImpl(WriteBuffer & out) const override; + void readImpl(ReadBuffer & in) override; + ZooKeeperResponsePtr makeResponse() const override; + bool isReadRequest() const override { return false; } + + size_t bytesSize() const override { return SetACLRequest::bytesSize() + sizeof(xid); } + + bool need_to_hash_acls = true; +}; + +struct ZooKeeperSetACLResponse final : SetACLResponse, ZooKeeperResponse +{ + void readImpl(ReadBuffer & in) override; + void writeImpl(WriteBuffer & out) const override; + OpNum getOpNum() const override { return OpNum::SetACL; } + + size_t bytesSize() const override { return SetACLResponse::bytesSize() + sizeof(xid) + sizeof(zxid); } +}; + +struct ZooKeeperGetACLRequest final : GetACLRequest, ZooKeeperRequest +{ + OpNum getOpNum() const override { return OpNum::GetACL; } + void writeImpl(WriteBuffer & out) const override; + void readImpl(ReadBuffer & in) override; + ZooKeeperResponsePtr makeResponse() const override; + bool isReadRequest() const override { return true; } + + size_t bytesSize() const override { return GetACLRequest::bytesSize() + sizeof(xid); } +}; + +struct ZooKeeperGetACLResponse final : GetACLResponse, ZooKeeperResponse +{ + void readImpl(ReadBuffer & in) override; + void writeImpl(WriteBuffer & out) const override; + OpNum getOpNum() const override { return OpNum::GetACL; } + + size_t bytesSize() const override { return GetACLResponse::bytesSize() + sizeof(xid) + sizeof(zxid); } +}; + struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest { OpNum getOpNum() const override { return OpNum::Multi; } diff --git a/src/Common/ZooKeeper/ZooKeeperConstants.cpp b/src/Common/ZooKeeper/ZooKeeperConstants.cpp index d2dde4c4cdd..3f480fb6b2b 100644 --- a/src/Common/ZooKeeper/ZooKeeperConstants.cpp +++ b/src/Common/ZooKeeper/ZooKeeperConstants.cpp @@ -22,6 +22,8 @@ static const std::unordered_set VALID_OPERATIONS = static_cast(OpNum::Multi), static_cast(OpNum::Auth), static_cast(OpNum::SessionID), + static_cast(OpNum::SetACL), + static_cast(OpNum::GetACL), }; std::string toString(OpNum op_num) @@ -58,6 +60,10 @@ std::string toString(OpNum op_num) return "Auth"; case OpNum::SessionID: return "SessionID"; + case OpNum::SetACL: + return "SetACL"; + case OpNum::GetACL: + return "GetACL"; } int32_t raw_op = static_cast(op_num); throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED); diff --git a/src/Common/ZooKeeper/ZooKeeperConstants.h b/src/Common/ZooKeeper/ZooKeeperConstants.h index f91204693a0..ed7afd83628 100644 --- a/src/Common/ZooKeeper/ZooKeeperConstants.h +++ b/src/Common/ZooKeeper/ZooKeeperConstants.h @@ -23,6 +23,8 @@ enum class OpNum : int32_t Exists = 3, Get = 4, Set = 5, + GetACL = 6, + SetACL = 7, SimpleList = 8, Sync = 9, Heartbeat = 11, diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index dd0a7dffabb..97c78e04f05 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -678,6 +678,111 @@ struct KeeperStorageCheckRequest final : public KeeperStorageRequest } }; + +struct KeeperStorageSetACLRequest final : public KeeperStorageRequest +{ + bool checkAuth(KeeperStorage & storage, int64_t session_id) const override + { + auto & container = storage.container; + auto it = container.find(zk_request->getPath()); + if (it == container.end()) + return true; + + const auto & node_acls = storage.acl_map.convertNumber(it->value.acl_id); + if (node_acls.empty()) + return true; + + const auto & session_auths = storage.session_and_auth[session_id]; + return checkACL(Coordination::ACL::Admin, node_acls, session_auths); + } + + using KeeperStorageRequest::KeeperStorageRequest; + + std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t session_id) const override + { + auto & container = storage.container; + + Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse(); + Coordination::ZooKeeperSetACLResponse & response = dynamic_cast(*response_ptr); + Coordination::ZooKeeperSetACLRequest & request = dynamic_cast(*zk_request); + auto it = container.find(request.path); + if (it == container.end()) + { + response.error = Coordination::Error::ZNONODE; + } + else if (request.version != -1 && request.version != it->value.stat.aversion) + { + response.error = Coordination::Error::ZBADVERSION; + } + else + { + auto & session_auth_ids = storage.session_and_auth[session_id]; + Coordination::ACLs node_acls; + + if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) + { + response.error = Coordination::Error::ZINVALIDACL; + return {response_ptr, {}}; + } + + uint64_t acl_id = storage.acl_map.convertACLs(node_acls); + storage.acl_map.addUsage(acl_id); + + storage.container.updateValue(request.path, [acl_id] (KeeperStorage::Node & node) + { + node.acl_id = acl_id; + ++node.stat.aversion; + }); + + response.stat = it->value.stat; + response.error = Coordination::Error::ZOK; + } + + /// It cannot be used insied multitransaction? + return { response_ptr, {} }; + } +}; + +struct KeeperStorageGetACLRequest final : public KeeperStorageRequest +{ + bool checkAuth(KeeperStorage & storage, int64_t session_id) const override + { + auto & container = storage.container; + auto it = container.find(zk_request->getPath()); + if (it == container.end()) + return true; + + const auto & node_acls = storage.acl_map.convertNumber(it->value.acl_id); + if (node_acls.empty()) + return true; + + const auto & session_auths = storage.session_and_auth[session_id]; + /// LOL, GetACL require more permissions, then SetACL... + return checkACL(Coordination::ACL::Admin | Coordination::ACL::Read, node_acls, session_auths); + } + using KeeperStorageRequest::KeeperStorageRequest; + + std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override + { + Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse(); + Coordination::ZooKeeperGetACLResponse & response = dynamic_cast(*response_ptr); + Coordination::ZooKeeperGetACLRequest & request = dynamic_cast(*zk_request); + auto & container = storage.container; + auto it = container.find(request.path); + if (it == container.end()) + { + response.error = Coordination::Error::ZNONODE; + } + else + { + response.stat = it->value.stat; + response.acl = storage.acl_map.convertNumber(it->value.acl_id); + } + + return {response_ptr, {}}; + } +}; + struct KeeperStorageMultiRequest final : public KeeperStorageRequest { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override @@ -904,6 +1009,8 @@ KeeperWrapperFactory::KeeperWrapperFactory() registerKeeperRequestWrapper(*this); registerKeeperRequestWrapper(*this); registerKeeperRequestWrapper(*this); + registerKeeperRequestWrapper(*this); + registerKeeperRequestWrapper(*this); } diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index cf28627961f..51965b499a2 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -347,6 +347,20 @@ Coordination::ZooKeeperRequestPtr deserializeErrorTxn(ReadBuffer & in) return nullptr; } +Coordination::ZooKeeperRequestPtr deserializeSetACLTxn(ReadBuffer & in) +{ + std::shared_ptr result = std::make_shared(); + + Coordination::read(result->path, in); + Coordination::read(result->acls, in); + Coordination::read(result->version, in); + /// It stores version + 1 (which should be, not for request) + result->version -= 1; + result->need_to_hash_acls = false; + + return result; +} + Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in); Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn) @@ -371,6 +385,9 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx case 5: result = deserializeSetTxn(in); break; + case 7: + result = deserializeSetACLTxn(in); + break; case 13: result = deserializeCheckVersionTxn(in); break; diff --git a/tests/integration/test_keeper_auth/test.py b/tests/integration/test_keeper_auth/test.py index 5f60d5b8bdb..721ccd6fddb 100644 --- a/tests/integration/test_keeper_auth/test.py +++ b/tests/integration/test_keeper_auth/test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 + import pytest from helpers.cluster import ClickHouseCluster from kazoo.client import KazooClient, KazooState @@ -300,3 +300,47 @@ def test_auth_snapshot(started_cluster): with pytest.raises(NoAuthError): connection2.get("/test_snapshot_acl1") + + +@pytest.mark.parametrize( + ('get_zk'), + [ + get_genuine_zk, + get_fake_zk + ] +) +def test_get_set_acl(started_cluster, get_zk): + auth_connection = get_zk() + auth_connection.add_auth('digest', 'username1:secret1') + auth_connection.add_auth('digest', 'username2:secret2') + + auth_connection.create("/test_set_get_acl", b"data", acl=[make_acl("auth", "", all=True)]) + + acls, stat = auth_connection.get_acls("/test_set_get_acl") + + assert stat.aversion == 0 + assert len(acls) == 2 + for acl in acls: + assert acl.acl_list == ['ALL'] + assert acl.id.scheme == 'digest' + assert acl.perms == 31 + assert acl.id.id in ('username1:eGncMdBgOfGS/TCojt51xWsWv/Y=', 'username2:qgSSumukVlhftkVycylbHNvxhFU=') + + + other_auth_connection = get_zk() + other_auth_connection.add_auth('digest', 'username1:secret1') + other_auth_connection.add_auth('digest', 'username3:secret3') + other_auth_connection.set_acls("/test_set_get_acl", acls=[make_acl("auth", "", read=True, write=False, create=True, delete=True, admin=True)]) + + acls, stat = other_auth_connection.get_acls("/test_set_get_acl") + + assert stat.aversion == 1 + assert len(acls) == 2 + for acl in acls: + assert acl.acl_list == ['READ', 'CREATE', 'DELETE', 'ADMIN'] + assert acl.id.scheme == 'digest' + assert acl.perms == 29 + assert acl.id.id in ('username1:eGncMdBgOfGS/TCojt51xWsWv/Y=', 'username3:CvWITOxxTwk+u6S5PoGlQ4hNoWI=') + + with pytest.raises(KazooException): + other_auth_connection.set_acls("/test_set_get_acl", acls=[make_acl("auth", "", all=True)], version=0) diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index fa2178974e9..816faebe63d 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -223,6 +223,11 @@ def test_acls(started_cluster): yet_other_auth_connection.set("/test_multi_all_acl", b"Y") + genuine_connection.add_auth('digest', 'user3:password3') + + # just to check that we are able to deserialize it + genuine_connection.set_acls("/test_multi_all_acl", acls=[make_acl("auth", "", read=True, write=False, create=True, delete=True, admin=True)]) + no_auth_connection = get_genuine_zk() with pytest.raises(Exception): @@ -241,3 +246,13 @@ def test_acls(started_cluster): fake_connection.add_auth('digest', 'user3:password3') compare_states(genuine_connection, fake_connection) + + for connection in [genuine_connection, fake_connection]: + acls, stat = connection.get_acls("/test_multi_all_acl") + assert stat.aversion == 1 + assert len(acls) == 3 + for acl in acls: + assert acl.acl_list == ['READ', 'CREATE', 'DELETE', 'ADMIN'] + assert acl.id.scheme == 'digest' + assert acl.perms == 29 + assert acl.id.id in ('user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=', 'user2:lo/iTtNMP+gEZlpUNaCqLYO3i5U=', 'user3:wr5Y0kEs9nFX3bKrTMKxrlcFeWo=') From babe87d5a536a77a670b3f39d448f5850746dd9c Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Tue, 22 Jun 2021 14:13:19 +0300 Subject: [PATCH 256/931] Make -Wreserved-id-macro happy --- contrib/murmurhash/include/MurmurHash2.h | 6 +++--- contrib/murmurhash/include/MurmurHash3.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/murmurhash/include/MurmurHash2.h b/contrib/murmurhash/include/MurmurHash2.h index 017475923bb..22e47f5c8e9 100644 --- a/contrib/murmurhash/include/MurmurHash2.h +++ b/contrib/murmurhash/include/MurmurHash2.h @@ -2,8 +2,8 @@ // MurmurHash2 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. -#ifndef _MURMURHASH2_H_ -#define _MURMURHASH2_H_ +#ifndef MURMURHASH2_H +#define MURMURHASH2_H #include @@ -38,7 +38,7 @@ uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ); uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ); uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ); uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ); - + #ifdef __cplusplus } #endif diff --git a/contrib/murmurhash/include/MurmurHash3.h b/contrib/murmurhash/include/MurmurHash3.h index 920a49da3c2..e9db8f1e878 100644 --- a/contrib/murmurhash/include/MurmurHash3.h +++ b/contrib/murmurhash/include/MurmurHash3.h @@ -2,8 +2,8 @@ // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. -#ifndef _MURMURHASH3_H_ -#define _MURMURHASH3_H_ +#ifndef MURMURHASH3_H +#define MURMURHASH3_H #include From 63db58710d8ddfac43bbe20b41e4909471b0ec79 Mon Sep 17 00:00:00 2001 From: Zijie Lu Date: Tue, 22 Jun 2021 19:25:14 +0800 Subject: [PATCH 257/931] Support for DISTINCT ON (columns) Signed-off-by: Zijie Lu --- src/Common/ErrorCodes.cpp | 1 + src/Parsers/ParserSelectQuery.cpp | 15 +++++++++++++++ .../0_stateless/01917_distinct_on.reference | 3 +++ tests/queries/0_stateless/01917_distinct_on.sql | 9 +++++++++ 4 files changed, 28 insertions(+) create mode 100644 tests/queries/0_stateless/01917_distinct_on.reference create mode 100644 tests/queries/0_stateless/01917_distinct_on.sql diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index d840830bf28..5afba23657d 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -554,6 +554,7 @@ M(584, PROJECTION_NOT_USED) \ M(585, CANNOT_PARSE_YAML) \ M(586, CANNOT_CREATE_FILE) \ + M(587, DISTINCT_ON_AND_LIMIT_BY_TOGETHER) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 548ec8879bd..12e83486af8 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -21,6 +22,7 @@ namespace ErrorCodes extern const int LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED; extern const int ROW_AND_ROWS_TOGETHER; extern const int FIRST_AND_NEXT_TOGETHER; + extern const int DISTINCT_ON_AND_LIMIT_BY_TOGETHER; } @@ -32,6 +34,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_select("SELECT"); ParserKeyword s_all("ALL"); ParserKeyword s_distinct("DISTINCT"); + ParserKeyword s_distinct_on("DISTINCT ON"); ParserKeyword s_from("FROM"); ParserKeyword s_prewhere("PREWHERE"); ParserKeyword s_where("WHERE"); @@ -94,6 +97,8 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } + bool has_distinct_on = false; + /// SELECT [ALL/DISTINCT] [TOP N [WITH TIES]] expr list { bool has_all = false; @@ -103,6 +108,13 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (s_all.ignore(pos, expected)) has_all = true; + if (s_distinct_on.ignore(pos, expected)) { + has_distinct_on = true; + if (!exp_list.parse(pos, limit_by_expression_list, expected)) + return false; + limit_by_length = std::make_shared(Field{UInt8(1)}); + } + if (s_distinct.ignore(pos, expected)) select_query->distinct = true; @@ -264,6 +276,9 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (limit_with_ties_occured) throw Exception("Can not use WITH TIES alongside LIMIT BY", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); + if (has_distinct_on) + throw Exception("Can not use distinct on alongside LIMIT BY", ErrorCodes::DISTINCT_ON_AND_LIMIT_BY_TOGETHER); + limit_by_length = limit_length; limit_by_offset = limit_offset; limit_length = nullptr; diff --git a/tests/queries/0_stateless/01917_distinct_on.reference b/tests/queries/0_stateless/01917_distinct_on.reference new file mode 100644 index 00000000000..09e5879c7f6 --- /dev/null +++ b/tests/queries/0_stateless/01917_distinct_on.reference @@ -0,0 +1,3 @@ +1 1 1 +2 2 2 +1 2 2 diff --git a/tests/queries/0_stateless/01917_distinct_on.sql b/tests/queries/0_stateless/01917_distinct_on.sql new file mode 100644 index 00000000000..0940d8566bd --- /dev/null +++ b/tests/queries/0_stateless/01917_distinct_on.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 (`a` UInt32, `b` UInt32, `c` UInt32 ) ENGINE = Memory; +INSERT INTO t1 VALUES (1, 1, 1), (1, 1, 2), (2, 2, 2), (1, 2, 2); + +SELECT DISTINCT ON (a, b) a, b, c FROM t1; + +DROP TABLE IF EXISTS t1; + From 788e61f80e32987ca2685474b93963fc0ae30138 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 22 Jun 2021 14:43:26 +0300 Subject: [PATCH 258/931] Snapshots test --- .../test_keeper_zookeeper_converter/test.py | 50 ++++++++++++++----- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index 816faebe63d..eac2b4c45c5 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -37,12 +37,16 @@ def stop_clickhouse(): def start_clickhouse(): node.start_clickhouse() -def copy_zookeeper_data(): +def copy_zookeeper_data(make_zk_snapshots): stop_zookeeper() + + if make_zk_snapshots: # force zookeeper to create snapshot + start_zookeeper() + stop_zookeeper() + stop_clickhouse() clear_clickhouse_data() convert_zookeeper_data() - print(node.exec_in_container) start_zookeeper() start_clickhouse() @@ -97,7 +101,13 @@ def compare_states(zk1, zk2, path="/"): print("Checking child", os.path.join(path, children)) compare_states(zk1, zk2, os.path.join(path, children)) -def test_smoke(started_cluster): +@pytest.mark.parametrize( + ('create_snapshots'), + [ + True, False + ] +) +def test_smoke(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() @@ -105,7 +115,7 @@ def test_smoke(started_cluster): assert genuine_connection.get("/test")[0] == b"data" - copy_zookeeper_data() + copy_zookeeper_data(create_snapshots) genuine_connection = get_genuine_zk() fake_connection = get_fake_zk() @@ -115,7 +125,13 @@ def test_smoke(started_cluster): def get_bytes(s): return s.encode() -def test_simple_crud_requests(started_cluster): +@pytest.mark.parametrize( + ('create_snapshots'), + [ + True, False + ] +) +def test_simple_crud_requests(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() @@ -144,7 +160,7 @@ def test_simple_crud_requests(started_cluster): for i in range(10): genuine_connection.create("/test_ephemeral/" + str(i), get_bytes("dataX" + str(i)), ephemeral=True) - copy_zookeeper_data() + copy_zookeeper_data(create_snapshots) genuine_connection = get_genuine_zk() fake_connection = get_fake_zk() @@ -159,8 +175,13 @@ def test_simple_crud_requests(started_cluster): second_children = list(sorted(fake_connection.get_children("/test_sequential"))) assert first_children == second_children, "Childrens are not equal on path " + path - -def test_multi_and_failed_requests(started_cluster): +@pytest.mark.parametrize( + ('create_snapshots'), + [ + True, False + ] +) +def test_multi_and_failed_requests(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() @@ -196,15 +217,20 @@ def test_multi_and_failed_requests(started_cluster): assert genuine_connection.exists('/test_bad_transaction2') is None assert genuine_connection.exists('/test_multitransactions/freddy0') is not None - copy_zookeeper_data() + copy_zookeeper_data(create_snapshots) genuine_connection = get_genuine_zk() fake_connection = get_fake_zk() compare_states(genuine_connection, fake_connection) - -def test_acls(started_cluster): +@pytest.mark.parametrize( + ('create_snapshots'), + [ + True, False + ] +) +def test_acls(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() genuine_connection.add_auth('digest', 'user1:password1') @@ -233,7 +259,7 @@ def test_acls(started_cluster): with pytest.raises(Exception): no_auth_connection.set("/test_multi_all_acl", b"Z") - copy_zookeeper_data() + copy_zookeeper_data(create_snapshots) genuine_connection = get_genuine_zk() genuine_connection.add_auth('digest', 'user1:password1') From 92ea82eac92886f838b910a3a4f1b23fddf8a6d9 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 22 Jun 2021 14:50:09 +0300 Subject: [PATCH 259/931] fix test --- tests/clickhouse-test | 16 ++++++--- .../0_stateless/00505_distributed_secure.data | 35 ++++++++++--------- .../0_stateless/00505_secure.reference | 1 - 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 3fa71215b17..14d7f0dab4e 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -822,12 +822,18 @@ def main(args): else: args.shard = False - if args.database and args.database != "test": - clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=None, universal_newlines=True) - clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + args.database + get_db_engine(args, args.database))) + def create_common_database(args, db_name): + create_database_retries = 0 + while create_database_retries < MAX_RETRIES: + clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) + (_, stderr) = clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + db_name + get_db_engine(args, db_name))) + if not need_retry(stderr): + break + create_database_retries += 1 - clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=None, universal_newlines=True) - clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS test" + get_db_engine(args, 'test'))) + if args.database and args.database != "test": + create_common_database(args, args.database) + create_common_database(args, "test") def is_test_from_dir(suite_dir, case): case_file = os.path.join(suite_dir, case) diff --git a/tests/queries/0_stateless/00505_distributed_secure.data b/tests/queries/0_stateless/00505_distributed_secure.data index dc2d37dc5df..96a96ef4b68 100644 --- a/tests/queries/0_stateless/00505_distributed_secure.data +++ b/tests/queries/0_stateless/00505_distributed_secure.data @@ -1,22 +1,23 @@ -DROP TABLE IF EXISTS test.secure1; -DROP TABLE IF EXISTS test.secure2; -DROP TABLE IF EXISTS test.secure3; +DROP TABLE IF EXISTS secure1; +DROP TABLE IF EXISTS secure2; +DROP TABLE IF EXISTS secure3; -CREATE TABLE test.secure1 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = MergeTree(date, (a, date), 8192); -CREATE TABLE test.secure2 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost_secure, 'test', 'secure1'); -CREATE TABLE test.secure3 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost_secure, 'test', 'secure2'); +CREATE TABLE secure1 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = MergeTree(date, (a, date), 8192); +CREATE TABLE secure2 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost_secure, currentDatabase(), 'secure1'); +CREATE TABLE secure3 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost_secure, currentDatabase(), 'secure2'); -INSERT INTO test.secure1 VALUES (1, 2, 3, 4, 5); -INSERT INTO test.secure1 VALUES (11,12,13,14,15); -INSERT INTO test.secure2 VALUES (21,22,23,24,25); -INSERT INTO test.secure3 VALUES (31,32,33,34,35); +INSERT INTO secure1 VALUES (1, 2, 3, 4, 5); +INSERT INTO secure1 VALUES (11,12,13,14,15); +INSERT INTO secure2 VALUES (21,22,23,24,25); +INSERT INTO secure3 VALUES (31,32,33,34,35); -SELECT 'sleep', sleep(1); +SYSTEM FLUSH DISTRIBUTED secure2; +SYSTEM FLUSH DISTRIBUTED secure3; -SELECT * FROM test.secure1 ORDER BY a; -SELECT * FROM test.secure2 ORDER BY a; -SELECT * FROM test.secure3 ORDER BY a; +SELECT * FROM secure1 ORDER BY a; +SELECT * FROM secure2 ORDER BY a; +SELECT * FROM secure3 ORDER BY a; -DROP TABLE test.secure1; -DROP TABLE test.secure2; -DROP TABLE test.secure3; +DROP TABLE secure1; +DROP TABLE secure2; +DROP TABLE secure3; diff --git a/tests/queries/0_stateless/00505_secure.reference b/tests/queries/0_stateless/00505_secure.reference index c925bdd13bf..73bdbdbafbe 100644 --- a/tests/queries/0_stateless/00505_secure.reference +++ b/tests/queries/0_stateless/00505_secure.reference @@ -2,7 +2,6 @@ 2 3 4 -sleep 0 1970-01-02 2 3 4 5 1970-01-12 12 13 14 15 1970-01-22 22 23 24 25 From 447fef702d46c29f1922bce2e5576634dd7db726 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 22 Jun 2021 16:08:12 +0300 Subject: [PATCH 260/931] fix fuzzer in query with 'WITH FILL' and 'WITH TOTALS' --- src/Processors/QueryPlan/FillingStep.cpp | 6 ++--- .../Transforms/FillingTransform.cpp | 11 ++++++++-- src/Processors/Transforms/FillingTransform.h | 4 +++- .../01921_with_fill_with_totals.reference | 22 ++++++++++++++----- .../01921_with_fill_with_totals.sql | 11 +++++++++- 5 files changed, 41 insertions(+), 13 deletions(-) diff --git a/src/Processors/QueryPlan/FillingStep.cpp b/src/Processors/QueryPlan/FillingStep.cpp index 5393f1f5133..ba3588efa72 100644 --- a/src/Processors/QueryPlan/FillingStep.cpp +++ b/src/Processors/QueryPlan/FillingStep.cpp @@ -40,10 +40,8 @@ void FillingStep::transformPipeline(QueryPipeline & pipeline, const BuildQueryPi { pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type) -> ProcessorPtr { - if (stream_type == QueryPipeline::StreamType::Totals) - return nullptr; - - return std::make_shared(header, sort_description); + bool on_totals = stream_type == QueryPipeline::StreamType::Totals; + return std::make_shared(header, sort_description, on_totals); }); } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 8419daf9186..45e46649b3a 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -30,12 +30,16 @@ Block FillingTransform::transformHeader(Block header, const SortDescription & so } FillingTransform::FillingTransform( - const Block & header_, const SortDescription & sort_description_) + const Block & header_, const SortDescription & sort_description_, bool on_totals_) : ISimpleTransform(header_, transformHeader(header_, sort_description_), true) , sort_description(sort_description_) + , on_totals(on_totals_) , filling_row(sort_description_) , next_row(sort_description_) { + if (on_totals) + return; + auto try_convert_fields = [](auto & descr, const auto & type) { auto max_type = Field::Types::Null; @@ -106,7 +110,7 @@ FillingTransform::FillingTransform( IProcessor::Status FillingTransform::prepare() { - if (input.isFinished() && !output.isFinished() && !has_input && !generate_suffix) + if (!on_totals && input.isFinished() && !output.isFinished() && !has_input && !generate_suffix) { should_insert_first = next_row < filling_row; @@ -126,6 +130,9 @@ IProcessor::Status FillingTransform::prepare() void FillingTransform::transform(Chunk & chunk) { + if (on_totals) + return; + Columns old_fill_columns; Columns old_other_columns; MutableColumns res_fill_columns; diff --git a/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h index 33717b079a0..7ccebadfb6d 100644 --- a/src/Processors/Transforms/FillingTransform.h +++ b/src/Processors/Transforms/FillingTransform.h @@ -13,7 +13,7 @@ namespace DB class FillingTransform : public ISimpleTransform { public: - FillingTransform(const Block & header_, const SortDescription & sort_description_); + FillingTransform(const Block & header_, const SortDescription & sort_description_, bool on_totals_); String getName() const override { return "FillingTransform"; } @@ -28,6 +28,8 @@ private: void setResultColumns(Chunk & chunk, MutableColumns & fill_columns, MutableColumns & other_columns) const; const SortDescription sort_description; /// Contains only rows with WITH FILL. + const bool on_totals; /// FillingTransform does nothing on totals. + FillingRow filling_row; /// Current row, which is used to fill gaps. FillingRow next_row; /// Row to which we need to generate filling rows. diff --git a/tests/queries/0_stateless/01921_with_fill_with_totals.reference b/tests/queries/0_stateless/01921_with_fill_with_totals.reference index 47c8c60e3c3..1f209c7db3d 100644 --- a/tests/queries/0_stateless/01921_with_fill_with_totals.reference +++ b/tests/queries/0_stateless/01921_with_fill_with_totals.reference @@ -1,8 +1,20 @@ -20 0 -19 0 -18 0 -17 0 -16 0 +15 0 +14 0 +13 0 +12 0 +11 0 +10 0 +9 0 +8 0 +7 7 +6 0 +5 0 +4 4 +3 0 +2 0 +1 1 + +0 12 15 0 14 0 13 0 diff --git a/tests/queries/0_stateless/01921_with_fill_with_totals.sql b/tests/queries/0_stateless/01921_with_fill_with_totals.sql index 9d201848141..1821e5b2413 100644 --- a/tests/queries/0_stateless/01921_with_fill_with_totals.sql +++ b/tests/queries/0_stateless/01921_with_fill_with_totals.sql @@ -5,4 +5,13 @@ FROM numbers(10) WHERE number % 3 = 1 GROUP BY number WITH TOTALS -ORDER BY number DESC WITH FILL FROM 20; +ORDER BY number DESC WITH FILL FROM 15; + +SELECT + number, + sum(number) +FROM numbers(10) +WHERE number % 3 = 1 +GROUP BY number + WITH TOTALS +ORDER BY 10, number DESC WITH FILL FROM 15; From 47b29092b4d0adff8f734f5b6b847d660cf2e38f Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 22 Jun 2021 16:47:42 +0300 Subject: [PATCH 261/931] Fix bug in MergerMutator parts selector --- .../MergeTree/MergeTreeDataMergerMutator.cpp | 23 +++++++++++++++---- ...nt_ttl_and_normal_merges_zookeeper_long.sh | 2 +- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 846ad7b026d..766d988500d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -264,6 +264,10 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( if (!can_merge_callback(nullptr, part, nullptr)) continue; + /// This part can be merged only with next parts (no prev part exists), so start + /// new interval if previous was not empty. + if (!parts_ranges.back().empty()) + parts_ranges.emplace_back(); } else { @@ -271,12 +275,21 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( /// interval (in the same partition) if (!can_merge_callback(*prev_part, part, nullptr)) { - /// Starting new interval in the same partition - assert(!parts_ranges.back().empty()); - parts_ranges.emplace_back(); - - /// Now we have no previous part, but it affects only logging + /// Now we have no previous part prev_part = nullptr; + + /// Mustn't be empty + assert(!parts_ranges.back().empty()); + + /// Some parts cannot be merged with previous parts and also cannot be merged with themselves, + /// for example, merge is already assigned for such parts, or they participate in quorum inserts + /// and so on. + /// Also we don't start new interval here (maybe all next parts cannot be merged and we don't want to have empty interval) + if (!can_merge_callback(nullptr, part, nullptr)) + continue; + + /// Starting new interval in the same partition + parts_ranges.emplace_back(); } } diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index f84a69e8eb0..80e7d6b4c00 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -15,7 +15,7 @@ for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i( key DateTime ) - ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/ttl_table', '$i') + ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i') ORDER BY tuple() TTL key + INTERVAL 1 SECOND SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100;" From 9e42833947c06783bbdf4371d19f11b7de8fcb75 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 22 Jun 2021 17:14:00 +0300 Subject: [PATCH 262/931] Added translation --- .../reference/quantiles.md | 1 - .../reference/quantileexact.md | 98 +++++++++++++++++ .../reference/quantiles.md | 104 +++++++++++++++++- 3 files changed, 199 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 6fcc7f2d0fe..d8320067dd1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -89,7 +89,6 @@ Type of array values: - [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type. - [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type. - **Example** Query: diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 82ebae1c14e..f5c33bdd79b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -161,6 +161,104 @@ SELECT quantileExactHigh(number) FROM numbers(10) └───────────────────────────┘ ``` +## quantileExactExclusive {#quantileexactexclusive} + +Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. + +Эта функция эквивалентна Excel функции [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba), [тип R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). + +Внутренние состояния функций `quantileExactExclusive` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive), это повысит эффективность запроса. + +**Синтакс** + +``` sql +quantileExactExclusive(level)(expr) +``` + +**Аргументы** + +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1). Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../float.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемое значение** + +- Квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num); +``` + +Результат: + +``` text +┌─quantileExactExclusive(0.6)(x)─┐ +│ 599.6 │ +└────────────────────────────────┘ +``` + +## quantileExactInclusive {#quantileexactinclusive} + +Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. + +Эта функция эквивалентна Excel функции [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed), [тип R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). + +Внутренние состояния функций `quantileExactInclusive` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive), это повысит эффективность запроса. + +**Синтакс** + +``` sql +quantileExactInclusive(level)(expr) +``` + +**Аргументы** + +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1]. Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../float.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемые значения** + +- Квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantileExactInclusive(0.6)(x) FROM (SELECT number AS x FROM num); +``` + +Результат: + +``` text +┌─quantileExactInclusive(0.6)(x)─┐ +│ 599.4 │ +└────────────────────────────────┘ +``` + **Смотрите также** - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index 671cbc1fc4d..1ed705c5bac 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -2,9 +2,107 @@ toc_priority: 201 --- -# quantiles {#quantiles} +# quantiles Functions {#quantiles-functions} +## quantiles {#quantiles} -Syntax: `quantiles(level1, level2, …)(x)` +Синтаксис: `quantiles(level1, level2, …)(x)` -All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. +Все функции для вычисления квантилей имеют соответствующие функции для вычисления нескольких квантилей: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. Эти функции вычисляют все квантили указанных уровней в один проход и возвращают массив с вычисленными значениями. +## quantilesExactExclusive {#quantilesexactexclusive} + +Точно вычисляет [квантили](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. + +Эта функция эквивалентна Excel функции [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba), [тип R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). + +Работает более эффективно с наборами уровней, чем [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). + +**Синтакс** + +``` sql +quantilesExactExclusive(level1, level2, ...)(expr) +``` + +**Аргументы** + +- `level` — уровень квантилей. Возможные значения: (0, 1). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемые значения** + +- [Массив](../../../sql-reference/data-types/array.md) квантилей указанных уровней. + +Тип значений массива: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +``` + +Результат: + +``` text +┌─quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐ +│ [249.25,499.5,749.75,899.9,949.9499999999999,989.99,998.999] │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +## quantilesExactInclusive {#quantilesexactinclusive} + +Точно вычисляет [квантили](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. + +Эта функция эквивалентна Excel функции [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed), [тип R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). + +Работает более эффективно с наборами уровней, чем [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). + +**Синтаксис** + +``` sql +quantilesExactInclusive(level1, level2, ...)(expr) +``` + +**Аргументы** + +- `level` — уровень квантилей. Возможные значения: [0, 1]. +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемые значения** + +- [Массив](../../../sql-reference/data-types/array.md) квантилей указанных уровней. + +Тип значений массива: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE num AS numbers(1000); + +SELECT quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +``` + +Результат: + +``` text +┌─quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x)─┐ +│ [249.75,499.5,749.25,899.1,949.05,989.01,998.001] │ +└─────────────────────────────────────────────────────────────────────┘ +``` From 54d1bef0876c8886b3679bc7d4625a6019b3840b Mon Sep 17 00:00:00 2001 From: George Date: Tue, 22 Jun 2021 17:17:31 +0300 Subject: [PATCH 263/931] Small update --- .../aggregate-functions/reference/quantiles.md | 1 + .../aggregate-functions/reference/quantileexact.md | 8 +++++--- .../aggregate-functions/reference/quantiles.md | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index d8320067dd1..06bee5ed038 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -3,6 +3,7 @@ toc_priority: 201 --- # quantiles Functions {#quantiles-functions} + ## quantiles {#quantiles} Syntax: `quantiles(level1, level2, …)(x)` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index f5c33bdd79b..eada2a16a8f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -2,7 +2,9 @@ toc_priority: 202 --- -# quantileExact {#quantileexact} +# Функции quantileExact {#quantileexact-functions} + +## quantileExact {#quantileexact} Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. @@ -50,7 +52,7 @@ SELECT quantileExact(number) FROM numbers(10) └───────────────────────┘ ``` -# quantileExactLow {#quantileexactlow} +## quantileExactLow {#quantileexactlow} Как и `quantileExact`, эта функция вычисляет точный [квантиль](https://en.wikipedia.org/wiki/Quantile) числовой последовательности данных. @@ -109,7 +111,7 @@ SELECT quantileExactLow(number) FROM numbers(10) │ 4 │ └──────────────────────────┘ ``` -# quantileExactHigh {#quantileexacthigh} +## quantileExactHigh {#quantileexacthigh} Как и `quantileExact`, эта функция вычисляет точный [квантиль](https://en.wikipedia.org/wiki/Quantile) числовой последовательности данных. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index 1ed705c5bac..36fc436c56c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -2,7 +2,8 @@ toc_priority: 201 --- -# quantiles Functions {#quantiles-functions} +# Функции для нескольких квантилей {#quantiles-functions} + ## quantiles {#quantiles} Синтаксис: `quantiles(level1, level2, …)(x)` From 4af3e38b52a7d9029e6fbf16d3114bcd80c36fb2 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 22 Jun 2021 17:25:48 +0300 Subject: [PATCH 264/931] Fixed links --- .../aggregate-functions/reference/quantileexact.md | 4 ++-- .../sql-reference/aggregate-functions/reference/quantiles.md | 4 ++-- .../aggregate-functions/reference/quantileexact.md | 4 ++-- .../sql-reference/aggregate-functions/reference/quantiles.md | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index e7890f231bb..bb1906f3a8c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -178,7 +178,7 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: (0, 1). Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../float.md). +- `level` — Level of quantile. Optional. Possible values: (0, 1). Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -227,7 +227,7 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: [0, 1]. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../float.md). +- `level` — Level of quantile. Optional. Possible values: [0, 1]. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 06bee5ed038..60ad80abae1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -28,7 +28,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: (0, 1). +- `level` — Leveles of quantiles. Possible values: (0, 1). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -77,7 +77,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: [0, 1]. +- `level` — Leveles of quantiles. Possible values: [0, 1]. [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index eada2a16a8f..7f5c0d50213 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -181,7 +181,7 @@ quantileExactExclusive(level)(expr) **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1). Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../float.md). +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1). Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** @@ -230,7 +230,7 @@ quantileExactInclusive(level)(expr) **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1]. Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../float.md). +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1]. Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index 36fc436c56c..ed9e124a3ce 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -28,7 +28,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Аргументы** -- `level` — уровень квантилей. Возможные значения: (0, 1). +- `level` — уровень квантилей. Возможные значения: (0, 1). [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** @@ -77,7 +77,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Аргументы** -- `level` — уровень квантилей. Возможные значения: [0, 1]. +- `level` — уровень квантилей. Возможные значения: [0, 1]. [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** From 6147ad643220b95c790a89da4206deb36e2dbbbc Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 22 Jun 2021 17:27:03 +0300 Subject: [PATCH 265/931] Fix tests --- tests/queries/0_stateless/01273_arrow.reference | 2 -- tests/queries/0_stateless/01273_arrow.sh | 7 ------- .../0_stateless/01273_arrow_decimal.reference | 2 ++ tests/queries/0_stateless/01273_arrow_decimal.sh | 14 ++++++++++++++ 4 files changed, 16 insertions(+), 9 deletions(-) create mode 100644 tests/queries/0_stateless/01273_arrow_decimal.reference create mode 100755 tests/queries/0_stateless/01273_arrow_decimal.sh diff --git a/tests/queries/0_stateless/01273_arrow.reference b/tests/queries/0_stateless/01273_arrow.reference index 9f74ab344e5..0dc503f65e4 100644 --- a/tests/queries/0_stateless/01273_arrow.reference +++ b/tests/queries/0_stateless/01273_arrow.reference @@ -58,5 +58,3 @@ dest from null: -108 108 -1016 1116 -1032 1132 -1064 1164 -1.032 -1.064 string-0 fixedstring\0\0\0\0 2001-02-03 2002-02-03 04:05:06 127 255 32767 65535 2147483647 4294967295 9223372036854775807 9223372036854775807 -1.032 -1.064 string-2 fixedstring-2\0\0 2004-06-07 2004-02-03 04:05:06 \N \N \N \N \N \N \N \N \N \N \N \N \N \N -0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 -0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 diff --git a/tests/queries/0_stateless/01273_arrow.sh b/tests/queries/0_stateless/01273_arrow.sh index bd6e3089859..554d75700a6 100755 --- a/tests/queries/0_stateless/01273_arrow.sh +++ b/tests/queries/0_stateless/01273_arrow.sh @@ -102,10 +102,3 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types2" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types3" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types4" - -${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS arrow_decimal" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE arrow_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory" -${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE arrow_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123)" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO arrow_decimal FORMAT Arrow" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal" -${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_decimal" diff --git a/tests/queries/0_stateless/01273_arrow_decimal.reference b/tests/queries/0_stateless/01273_arrow_decimal.reference new file mode 100644 index 00000000000..a512796de07 --- /dev/null +++ b/tests/queries/0_stateless/01273_arrow_decimal.reference @@ -0,0 +1,2 @@ +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 +0.1230 0.12312312 0.1231231231230000 0.12312312312312312300000000000000 diff --git a/tests/queries/0_stateless/01273_arrow_decimal.sh b/tests/queries/0_stateless/01273_arrow_decimal.sh new file mode 100755 index 00000000000..296df040e7a --- /dev/null +++ b/tests/queries/0_stateless/01273_arrow_decimal.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -e + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS arrow_decimal" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE arrow_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory" +${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE arrow_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123)" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO arrow_decimal FORMAT Arrow" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal" +${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_decimal" From 7b5f34cb558db2d43d0c546b1df532b687f4e4bc Mon Sep 17 00:00:00 2001 From: tavplubix Date: Tue, 22 Jun 2021 17:28:12 +0300 Subject: [PATCH 266/931] Update 01149_zookeeper_mutation_stuck_after_replace_partition.sql --- .../01149_zookeeper_mutation_stuck_after_replace_partition.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql index 790cee9a90b..fd3f1f3fcfe 100644 --- a/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql +++ b/tests/queries/0_stateless/01149_zookeeper_mutation_stuck_after_replace_partition.sql @@ -15,6 +15,7 @@ alter table rmt update s = 's'||toString(n) where 1; select * from rmt; alter table rmt replace partition '0' from mt; + system sync replica rmt; select table, partition_id, name, rows from system.parts where database=currentDatabase() and table in ('mt', 'rmt') and active=1 order by table, name; From 80a001ab0cf7bd7bdfc0b57086d3209e188ff913 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 22 Jun 2021 17:32:02 +0300 Subject: [PATCH 267/931] Use settings from global context in StorageView --- src/Storages/StorageView.cpp | 65 +++---------------- src/Storages/StorageView.h | 3 +- src/TableFunctions/TableFunctionView.cpp | 2 +- .../00599_create_view_with_subquery.reference | 2 +- .../00916_create_or_replace_view.reference | 4 +- ...76_predicate_optimizer_with_view.reference | 4 -- .../01602_show_create_view.reference | 10 +-- .../01866_view_persist_settings.reference | 44 +++++-------- .../01866_view_persist_settings.sql | 36 ++++++---- 9 files changed, 55 insertions(+), 115 deletions(-) diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 671ee52d36f..5119c5b121d 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -29,64 +29,12 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -namespace -{ - -void addSettingsChanges(ASTPtr ast, const Settings & settings) -{ - auto * settings_ast = ast->as(); - if (!settings_ast) - throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "ASTSetQuery expected"); - - settings_ast->is_standalone = false; - if (settings_ast->changes.tryGet("join_use_nulls") == nullptr) - settings_ast->changes.emplace_back("join_use_nulls", Field(settings.join_use_nulls)); -} - -/// Save to AST settings from context that affects view behaviour. -void saveSettingsToAst(ASTSelectWithUnionQuery * select, const Settings & settings) -{ - /// Check SETTINGS section on the top level - if (select->settings_ast) - { - addSettingsChanges(select->settings_ast, settings); - return; - } - - /// We cannot add SETTINGS on the top level because it will clash with section from inner SELECT - /// and will got query: SELECT ... SETTINGS ... SETTINGS ... - - /// Process every select in ast and add SETTINGS section to each - for (const auto & child : select->list_of_selects->children) - { - auto * child_select = child->as(); - if (!child_select) - continue; - - ASTPtr ast_set_query = child_select->settings(); - if (ast_set_query) - { - /// Modify existing SETTINGS section - addSettingsChanges(ast_set_query, settings); - } - else - { - /// Add SETTINGS section to query - ast_set_query = std::make_shared(); - addSettingsChanges(ast_set_query, settings); - child_select->setExpression(ASTSelectQuery::Expression::SETTINGS, std::move(ast_set_query)); - } - } -} - -} StorageView::StorageView( const StorageID & table_id_, const ASTCreateQuery & query, const ColumnsDescription & columns_, - const String & comment, - const Settings & settings) + const String & comment) : IStorage(table_id_) { StorageInMemoryMetadata storage_metadata; @@ -95,8 +43,6 @@ StorageView::StorageView( if (!query.select) throw Exception("SELECT query is not specified for " + getName(), ErrorCodes::INCORRECT_QUERY); - - saveSettingsToAst(query.select, settings); SelectQueryDescription description; description.inner_query = query.select->ptr(); @@ -140,7 +86,12 @@ void StorageView::read( current_inner_query = query_info.view_query->clone(); } - InterpreterSelectWithUnionQuery interpreter(current_inner_query, context, {}, column_names); + auto modified_context = Context::createCopy(context); + /// Use settings from global context, + /// because difference between settings set on VIEW creation and query execution can break queries + modified_context->setSettings(context->getGlobalContext()->getSettingsRef()); + + InterpreterSelectWithUnionQuery interpreter(current_inner_query, modified_context, {}, column_names); interpreter.buildQueryPlan(query_plan); /// It's expected that the columns read from storage are not constant. @@ -228,7 +179,7 @@ void registerStorageView(StorageFactory & factory) if (args.query.storage) throw Exception("Specifying ENGINE is not allowed for a View", ErrorCodes::INCORRECT_QUERY); - return StorageView::create(args.table_id, args.query, args.columns, args.comment, args.getLocalContext()->getSettingsRef()); + return StorageView::create(args.table_id, args.query, args.columns, args.comment); }); } diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index 1fbe98807ea..a59328cd471 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -53,8 +53,7 @@ protected: const StorageID & table_id_, const ASTCreateQuery & query, const ColumnsDescription & columns_, - const String & comment, - const Settings & settings); + const String & comment); }; } diff --git a/src/TableFunctions/TableFunctionView.cpp b/src/TableFunctions/TableFunctionView.cpp index 7d61aef8a19..2cab8aeca25 100644 --- a/src/TableFunctions/TableFunctionView.cpp +++ b/src/TableFunctions/TableFunctionView.cpp @@ -42,7 +42,7 @@ StoragePtr TableFunctionView::executeImpl( const ASTPtr & /*ast_function*/, ContextPtr context, const std::string & table_name, ColumnsDescription /*cached_columns*/) const { auto columns = getActualTableStructure(context); - auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns, String{}, context->getSettingsRef()); + auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns, ""); res->startup(); return res; } diff --git a/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference index ff9dc540532..0458f650fd0 100644 --- a/tests/queries/0_stateless/00599_create_view_with_subquery.reference +++ b/tests/queries/0_stateless/00599_create_view_with_subquery.reference @@ -1 +1 @@ -CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n)\nSETTINGS join_use_nulls = 0 +CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n) diff --git a/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference index 31b08b602f8..50323e47556 100644 --- a/tests/queries/0_stateless/00916_create_or_replace_view.reference +++ b/tests/queries/0_stateless/00916_create_or_replace_view.reference @@ -1,2 +1,2 @@ -CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers\nSETTINGS join_use_nulls = 0 -CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers\nSETTINGS join_use_nulls = 0 +CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers +CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers diff --git a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference index fb7a1676cb4..620c5c7c8d1 100644 --- a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference +++ b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference @@ -8,7 +8,6 @@ FROM SELECT * FROM default.test HAVING id = 1 - SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 1 SELECT @@ -21,7 +20,6 @@ FROM SELECT * FROM default.test HAVING id = 2 - SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 2 SELECT id @@ -30,7 +28,6 @@ FROM SELECT * FROM default.test HAVING id = 1 - SETTINGS join_use_nulls = 0 ) AS test_view WHERE id = 1 SELECT id @@ -39,6 +36,5 @@ FROM SELECT * FROM default.test HAVING id = 1 - SETTINGS join_use_nulls = 0 ) AS s WHERE id = 1 diff --git a/tests/queries/0_stateless/01602_show_create_view.reference b/tests/queries/0_stateless/01602_show_create_view.reference index 2130834910c..5d4bd2cd972 100644 --- a/tests/queries/0_stateless/01602_show_create_view.reference +++ b/tests/queries/0_stateless/01602_show_create_view.reference @@ -1,7 +1,7 @@ -CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 +CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl CREATE MATERIALIZED VIEW test_1602.vv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nENGINE = MergeTree\nPARTITION BY toYYYYMM(EventDate)\nORDER BY (CounterID, EventDate, intHash32(UserID))\nSETTINGS index_granularity = 8192 AS\nSELECT *\nFROM test_1602.tbl CREATE LIVE VIEW test_1602.vvv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 -CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 -CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 -CREATE VIEW test_1602.TABLE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl\nSETTINGS join_use_nulls = 0 +CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.TABLE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl diff --git a/tests/queries/0_stateless/01866_view_persist_settings.reference b/tests/queries/0_stateless/01866_view_persist_settings.reference index 529b62a4024..07c96e76875 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.reference +++ b/tests/queries/0_stateless/01866_view_persist_settings.reference @@ -1,48 +1,34 @@ -SELECT - a, - b, - c -FROM -( - SELECT * - FROM - ( - SELECT - number + 1 AS a, - number + 11 AS b - FROM numbers(2) - ) AS t1 - FULL OUTER JOIN - ( - SELECT - number + 2 AS a, - number + 22 AS c - FROM numbers(2) - ) AS t2 USING (a) - ORDER BY a ASC - SETTINGS max_block_size = 666, join_use_nulls = 0 -) AS view_no_nulls +join_use_nulls = 1 +- 1 11 0 2 12 22 3 0 23 +- 1 11 0 2 12 22 3 0 23 +- 1 11 \N 2 12 22 3 \N 23 -1 11 \N -2 12 22 -3 \N 23 +- 1 11 0 2 12 22 3 0 23 +join_use_nulls = 0 +- 1 11 0 2 12 22 3 0 23 +- +1 11 0 +2 12 22 +3 0 23 +- 1 11 \N 2 12 22 3 \N 23 -1 11 \N +- +1 11 0 2 12 22 -3 \N 23 +3 0 23 diff --git a/tests/queries/0_stateless/01866_view_persist_settings.sql b/tests/queries/0_stateless/01866_view_persist_settings.sql index 93dcb725179..71d6c856b9b 100644 --- a/tests/queries/0_stateless/01866_view_persist_settings.sql +++ b/tests/queries/0_stateless/01866_view_persist_settings.sql @@ -8,17 +8,13 @@ SET join_use_nulls = 0; CREATE OR REPLACE VIEW view_no_nulls AS SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 -USING a ORDER BY a -SETTINGS max_block_size = 666; - --- check that max_block_size not rewriten -EXPLAIN SYNTAX SELECT * FROM view_no_nulls; +USING a ORDER BY a; CREATE OR REPLACE VIEW view_nulls_set AS SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 USING a ORDER BY a -SETTINGS join_use_nulls = 1, max_block_size = 666; +SETTINGS join_use_nulls = 1; SET join_use_nulls = 1; @@ -35,17 +31,29 @@ SETTINGS join_use_nulls = 0; SET join_use_nulls = 1; -SELECT * from view_no_nulls; -SELECT * from view_no_nulls_set; -SELECT * from view_nulls_set; -SELECT * from view_nulls; +SELECT 'join_use_nulls = 1'; + +SELECT '-'; +SELECT * FROM view_no_nulls; +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; SET join_use_nulls = 0; -SELECT * from view_no_nulls; -SELECT * from view_no_nulls_set; -SELECT * from view_nulls_set; -SELECT * from view_nulls; +SELECT 'join_use_nulls = 0'; + +SELECT '-'; +SELECT * FROM view_no_nulls; +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; DROP TABLE IF EXISTS view_no_nulls; DROP TABLE IF EXISTS view_no_nulls_set; From 3578a79e08f627254446b53bebf65b889303390f Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 22 Jun 2021 17:41:20 +0300 Subject: [PATCH 268/931] fix style --- src/Functions/geoToH3.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index 4fa20d0ad62..90e29248d32 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -83,12 +83,12 @@ public: LatLng coord; coord.lng = degsToRads(lon); coord.lat = degsToRads(lat); - + H3Index hindex; H3Error err = latLngToCell(&coord, res, &hindex); if (err) { throw Exception( - "Incorrect coorinates lat:" + std::to_string(coord.lat) + " lng:" + std::to_string(coord.lng) + " err:" + std::to_string(err), + "Incorrect coordinates lat:" + std::to_string(coord.lat) + " lng:" + std::to_string(coord.lng) + " err:" + std::to_string(err), ErrorCodes::INCORRECT_DATA); } From 8c367eecc68929e65246d009593df9c854473451 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 22 Jun 2021 17:47:40 +0300 Subject: [PATCH 269/931] fix --- .../aggregate-functions/reference/quantileexact.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 7f5c0d50213..2a9ac46298b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -233,7 +233,7 @@ quantileExactInclusive(level)(expr) - `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1]. Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). -**Возвращаемые значения** +**Возвращаемое значение** - Квантиль заданного уровня. From 0a937e7377d71689b77ce3d28c95394012c90fe7 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 22 Jun 2021 17:02:42 +0200 Subject: [PATCH 270/931] Support Map type in mapAdd and mapSubtract --- .../functions/tuple-map-functions.md | 17 +- src/Functions/array/mapOp.cpp | 397 ++++++++++++------ .../01318_map_add_map_subtract.sql | 4 +- ...map_add_map_subtract_on_map_type.reference | 55 +++ ...01318_map_add_map_subtract_on_map_type.sql | 46 ++ 5 files changed, 376 insertions(+), 143 deletions(-) create mode 100644 tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.reference create mode 100644 tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.sql diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 8b0710c0182..ff2f11322a4 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -70,23 +70,23 @@ Result: Collect all the keys and sum corresponding values. -**Syntax** +**Syntax** ``` sql -mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...]) +mapAdd(arg1, arg2 [, ...]) ``` -**Arguments** +**Arguments** -Arguments are [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array. +Arguments are [maps](../../sql-reference/data-types/map.md) or [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array. **Returned value** -- Returns one [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values. +- Depending on the arguments returns one [map](../../sql-reference/data-types/map.md) o [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2), where the first array contains the sorted keys and the second array contains values. **Example** -Query: +Query with a tuple map: ``` sql SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type; @@ -100,6 +100,11 @@ Result: └───────────────┴────────────────────────────────────┘ ``` +Query with `Map` type: + +``` sql +``` + ## mapSubtract {#function-mapsubtract} Collect all the keys and subtract corresponding values. diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp index 1a19ee41d2f..da394c47f80 100644 --- a/src/Functions/array/mapOp.cpp +++ b/src/Functions/array/mapOp.cpp @@ -1,13 +1,18 @@ #include -#include +#include +#include #include +#include +#include +#include #include #include #include #include #include #include -#include +#include "Columns/ColumnMap.h" +#include "DataTypes/DataTypeMap.h" namespace DB @@ -24,8 +29,8 @@ namespace struct TupArg { - const IColumn & key_column; - const IColumn & val_column; + const ColumnPtr & key_column; + const ColumnPtr & val_column; const IColumn::Offsets & key_offsets; const IColumn::Offsets & val_offsets; bool is_const; @@ -52,17 +57,39 @@ private: bool isVariadic() const override { return true; } bool useDefaultImplementationForConstants() const override { return true; } - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + void checkTypes( + DataTypePtr & key_type, DataTypePtr & promoted_val_type, const DataTypePtr & check_key_type, DataTypePtr & check_val_type) const + { + if (!(check_key_type->equals(*key_type))) + throw Exception( + "Expected same " + key_type->getName() + " type for all keys in " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + WhichDataType which_val(promoted_val_type); + WhichDataType which_ch_val(check_val_type); + + if (which_ch_val.isFloat() != which_val.isFloat()) + throw Exception( + "All value types in " + getName() + " should be ether or float or integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + if (!(check_val_type->equals(*promoted_val_type))) + { + throw Exception( + "All value types in " + getName() + " should be promotable to " + promoted_val_type->getName() + ", got " + + check_val_type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + } + + DataTypePtr getReturnTypeForTuples(const DataTypes & arguments) const { - bool is_float = false; DataTypePtr key_type, val_type, res; - if (arguments.size() < 2) - throw Exception{getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; - - for (const auto & tup_arg : arguments) + for (const auto & arg : arguments) { - const DataTypeTuple * tup = checkAndGetDataType(tup_arg.get()); + const DataTypeArray * k; + const DataTypeArray * v; + + const DataTypeTuple * tup = checkAndGetDataType(arg.get()); if (!tup) throw Exception{getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; @@ -71,8 +98,8 @@ private: throw Exception( "Each tuple in " + getName() + " arguments should consist of two arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - const DataTypeArray * k = checkAndGetDataType(elems[0].get()); - const DataTypeArray * v = checkAndGetDataType(elems[1].get()); + k = checkAndGetDataType(elems[0].get()); + v = checkAndGetDataType(elems[1].get()); if (!k || !v) throw Exception( @@ -80,62 +107,100 @@ private: auto result_type = v->getNestedType(); if (!result_type->canBePromoted()) - throw Exception{"Values to be summed are expected to be Numeric, Float or Decimal.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + throw Exception{ + "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; - WhichDataType which_val(result_type); - - auto promoted_type = result_type->promoteNumericType(); + auto promoted_val_type = result_type->promoteNumericType(); if (!key_type) { key_type = k->getNestedType(); - val_type = promoted_type; - is_float = which_val.isFloat(); + val_type = promoted_val_type; + res = std::make_shared( + DataTypes{std::make_shared(k->getNestedType()), std::make_shared(promoted_val_type)}); } else - { - if (!(k->getNestedType()->equals(*key_type))) - throw Exception( - "All key types in " + getName() + " should be same: " + key_type->getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - if (is_float != which_val.isFloat()) - throw Exception( - "All value types in " + getName() + " should be or float or integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - if (!(promoted_type->equals(*val_type))) - { - throw Exception( - "All value types in " + getName() + " should be promotable to " + val_type->getName() + ", got " - + promoted_type->getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - } - } - - if (!res) - { - res = std::make_shared( - DataTypes{std::make_shared(k->getNestedType()), std::make_shared(promoted_type)}); - } + checkTypes(key_type, val_type, k->getNestedType(), promoted_val_type); } return res; } - template - ColumnPtr execute2(size_t row_count, TupleMaps & args, const DataTypeTuple & res_type) const + DataTypePtr getReturnTypeForMaps(const DataTypes & arguments) const { - MutableColumnPtr res_tuple = res_type.createColumn(); + DataTypePtr key_type, val_type, res; - auto * to_tuple = assert_cast(res_tuple.get()); - auto & to_keys_arr = assert_cast(to_tuple->getColumn(0)); - auto & to_keys_data = to_keys_arr.getData(); - auto & to_keys_offset = to_keys_arr.getOffsets(); + for (const auto & arg : arguments) + { + const auto * map = checkAndGetDataType(arg.get()); + if (!map) + throw Exception{getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; - auto & to_vals_arr = assert_cast(to_tuple->getColumn(1)); - auto & to_vals_data = to_vals_arr.getData(); + const auto & v = map->getValueType(); + + if (!v->canBePromoted()) + throw Exception{ + "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + + auto promoted_val_type = v->promoteNumericType(); + if (!key_type) + { + key_type = map->getKeyType(); + val_type = promoted_val_type; + res = std::make_shared(DataTypes({key_type, promoted_val_type})); + } + else + checkTypes(key_type, val_type, map->getKeyType(), promoted_val_type); + } + + return res; + } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (arguments.size() < 2) + throw Exception{getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + + if (arguments[0]->getTypeId() == TypeIndex::Tuple) + return getReturnTypeForTuples(arguments); + else if (arguments[0]->getTypeId() == TypeIndex::Map) + return getReturnTypeForMaps(arguments); + else + throw Exception{getName() + " only accepts maps", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + } + + template + ColumnPtr execute2(size_t row_count, TupleMaps & args, const DataTypePtr res_type) const + { + MutableColumnPtr res_column = res_type->createColumn(); + IColumn *to_keys_data, *to_vals_data; + ColumnArray::Offsets * to_keys_offset; + ColumnArray::Offsets * to_vals_offset = nullptr; + + // prepare output destinations + if (res_type->getTypeId() == TypeIndex::Tuple) + { + auto * to_tuple = assert_cast(res_column.get()); + auto & to_keys_arr = assert_cast(to_tuple->getColumn(0)); + to_keys_data = &to_keys_arr.getData(); + to_keys_offset = &to_keys_arr.getOffsets(); + + auto & to_vals_arr = assert_cast(to_tuple->getColumn(1)); + to_vals_data = &to_vals_arr.getData(); + to_vals_offset = &to_vals_arr.getOffsets(); + } + else + { + assert(res_type->getTypeId() == TypeIndex::Map); + + auto * to_map = assert_cast(res_column.get()); + auto & to_wrapper_arr = to_map->getNestedColumn(); + to_keys_offset = &to_wrapper_arr.getOffsets(); + + auto & to_map_tuple = to_map->getNestedData(); + to_keys_data = &to_map_tuple.getColumn(0); + to_vals_data = &to_map_tuple.getColumn(1); + } - size_t res_offset = 0; std::map summing_map; for (size_t i = 0; i < row_count; i++) @@ -147,7 +212,7 @@ private: if (!arg.is_const) { - offset = i > 0 ? arg.key_offsets[i - 1] : 0; + offset = arg.key_offsets[i - 1]; len = arg.key_offsets[i] - offset; if (arg.val_offsets[i] != arg.key_offsets[i]) @@ -155,20 +220,30 @@ private: "Key and value array should have same amount of elements", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); } + Field temp_val; for (size_t j = 0; j < len; j++) { KeyType key; - if constexpr (is_str_key) + if constexpr (std::is_same::value) { - // have to use Field to get strings - key = arg.key_column[offset + j].get(); + if (const auto * col_fixed = checkAndGetColumn(arg.key_column.get())) + key = col_fixed->getDataAt(offset + j).toString(); + else if (const auto * col_str = checkAndGetColumn(arg.key_column.get())) + key = col_str->getDataAt(offset + j).toString(); + else + // should not happen + throw Exception( + "Expected String or FixedString, got " + std::string(getTypeName(arg.key_column->getDataType())) + + " in " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } else { - key = assert_cast &>(arg.key_column).getData()[offset + j]; + key = assert_cast *>(arg.key_column.get())->getData()[offset + j]; } - ValType value = arg.val_column[offset + j].get(); + arg.val_column->get(offset + j, temp_val); + ValType value = temp_val.get(); if constexpr (op_type == OpTypes::ADD) { @@ -190,132 +265,184 @@ private: for (const auto & elem : summing_map) { - res_offset++; - to_keys_data.insert(elem.first); - to_vals_data.insert(elem.second); + to_keys_data->insert(elem.first); + to_vals_data->insert(elem.second); } - to_keys_offset.push_back(res_offset); + to_keys_offset->push_back(to_keys_data->size()); summing_map.clear(); } - // same offsets as in keys - to_vals_arr.getOffsets().insert(to_keys_offset.begin(), to_keys_offset.end()); + if (to_vals_offset) + { + // same offsets as in keys + to_vals_offset->insert(to_keys_offset->begin(), to_keys_offset->end()); + } - return res_tuple; + return res_column; } - template - ColumnPtr execute1(size_t row_count, const DataTypeTuple & res_type, TupleMaps & args) const + template + ColumnPtr execute1(size_t row_count, const DataTypePtr res_type, const DataTypePtr res_value_type, TupleMaps & args) const { - const auto & promoted_type = (assert_cast(res_type.getElements()[1].get()))->getNestedType(); -#define MATCH_EXECUTE(is_str) \ - switch (promoted_type->getTypeId()) \ - { \ - case TypeIndex::Int64: return execute2(row_count, args, res_type); \ - case TypeIndex::UInt64: return execute2(row_count, args, res_type); \ - case TypeIndex::Float64: return execute2(row_count, args, res_type); \ - default: \ - throw Exception{"Illegal columns in arguments of function " + getName(), ErrorCodes::ILLEGAL_COLUMN}; \ - } - - if constexpr (is_str_key) + switch (res_value_type->getTypeId()) { - MATCH_EXECUTE(true) + case TypeIndex::Int64: + return execute2(row_count, args, res_type); + case TypeIndex::Int128: + return execute2(row_count, args, res_type); + case TypeIndex::Int256: + return execute2(row_count, args, res_type); + case TypeIndex::UInt64: + return execute2(row_count, args, res_type); + case TypeIndex::UInt128: + return execute2(row_count, args, res_type); + case TypeIndex::UInt256: + return execute2(row_count, args, res_type); + case TypeIndex::Float64: + return execute2(row_count, args, res_type); + default: + throw Exception{ + "Illegal column type " + res_value_type->getName() + " for values in arguments of function " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; } - else - { - MATCH_EXECUTE(false) - } -#undef MATCH_EXECUTE } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override { + DataTypePtr key_type; + size_t row_count; const DataTypeTuple * tup_type = checkAndGetDataType((arguments[0]).type.get()); - const DataTypeArray * key_array_type = checkAndGetDataType(tup_type->getElements()[0].get()); - const DataTypeArray * val_array_type = checkAndGetDataType(tup_type->getElements()[1].get()); - - /* determine output type */ - const DataTypeTuple & res_type - = DataTypeTuple(DataTypes{std::make_shared(key_array_type->getNestedType()), - std::make_shared(val_array_type->getNestedType()->promoteNumericType())}); - + DataTypePtr res_type; + DataTypePtr res_value_type; TupleMaps args{}; args.reserve(arguments.size()); //prepare columns, extract data columns for direct access and put them to the vector - for (const auto & col : arguments) + if (tup_type) { - const ColumnTuple * tup; - bool is_const = isColumnConst(*col.column); - if (is_const) + const DataTypeArray * key_array_type = checkAndGetDataType(tup_type->getElements()[0].get()); + const DataTypeArray * val_array_type = checkAndGetDataType(tup_type->getElements()[1].get()); + + /* determine output type */ + res_value_type = val_array_type->getNestedType()->promoteNumericType(); + res_type = std::make_shared(DataTypes{ + std::make_shared(key_array_type->getNestedType()), std::make_shared(res_value_type)}); + + for (const auto & col : arguments) { - const auto * c = assert_cast(col.column.get()); - tup = assert_cast(c->getDataColumnPtr().get()); + const ColumnTuple * tup; + bool is_const = isColumnConst(*col.column); + if (is_const) + { + const auto * c = assert_cast(col.column.get()); + tup = assert_cast(c->getDataColumnPtr().get()); + } + else + tup = assert_cast(col.column.get()); + + const auto & arr1 = assert_cast(tup->getColumn(0)); + const auto & arr2 = assert_cast(tup->getColumn(1)); + + const auto & key_offsets = arr1.getOffsets(); + const auto & key_column = arr1.getDataPtr(); + + const auto & val_offsets = arr2.getOffsets(); + const auto & val_column = arr2.getDataPtr(); + + args.push_back({key_column, val_column, key_offsets, val_offsets, is_const}); + } + + key_type = key_array_type->getNestedType(); + } + else + { + const DataTypeMap * map_type = checkAndGetDataType((arguments[0]).type.get()); + if (map_type) + { + key_type = map_type->getKeyType(); + res_value_type = map_type->getValueType()->promoteNumericType(); + res_type = std::make_shared(DataTypes{map_type->getKeyType(), res_value_type}); + + for (const auto & col : arguments) + { + const ColumnMap * map; + bool is_const = isColumnConst(*col.column); + if (is_const) + { + const auto * c = assert_cast(col.column.get()); + map = assert_cast(c->getDataColumnPtr().get()); + } + else + map = assert_cast(col.column.get()); + + const auto & map_arr = map->getNestedColumn(); + const auto & key_offsets = map_arr.getOffsets(); + const auto & val_offsets = key_offsets; + + const auto & map_tup = map->getNestedData(); + const auto & key_column = map_tup.getColumnPtr(0); + const auto & val_column = map_tup.getColumnPtr(1); + + args.push_back({key_column, val_column, key_offsets, val_offsets, is_const}); + } } else - tup = assert_cast(col.column.get()); + throw Exception{ + "Illegal column type " + key_type->getName() + " in arguments of function " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + } - const auto & arr1 = assert_cast(tup->getColumn(0)); - const auto & arr2 = assert_cast(tup->getColumn(1)); - - const auto & key_offsets = arr1.getOffsets(); - const auto & key_column = arr1.getData(); - - const auto & val_offsets = arr2.getOffsets(); - const auto & val_column = arr2.getData(); - - // we can check const columns before any processing - if (is_const) + // we can check const columns before any processing + for (auto & arg : args) + { + if (arg.is_const) { - if (val_offsets[0] != key_offsets[0]) + if (arg.val_offsets[0] != arg.key_offsets[0]) throw Exception( "Key and value array should have same amount of elements", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); } - - args.push_back({key_column, val_column, key_offsets, val_offsets, is_const}); } - size_t row_count = arguments[0].column->size(); - auto key_type_id = key_array_type->getNestedType()->getTypeId(); - - switch (key_type_id) + row_count = arguments[0].column->size(); + switch (key_type->getTypeId()) { case TypeIndex::Enum8: case TypeIndex::Int8: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Enum16: case TypeIndex::Int16: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Int32: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Int64: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Int128: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Int256: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::UInt8: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::Date: case TypeIndex::UInt16: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::DateTime: case TypeIndex::UInt32: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::UInt64: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::UInt128: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::UInt256: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::UUID: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); case TypeIndex::FixedString: case TypeIndex::String: - return execute1(row_count, res_type, args); + return execute1(row_count, res_type, res_value_type, args); default: - throw Exception{"Illegal columns in arguments of function " + getName(), ErrorCodes::ILLEGAL_COLUMN}; + throw Exception{ + "Illegal column type " + key_type->getName() + " for keys in arguments of function " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; } } }; diff --git a/tests/queries/0_stateless/01318_map_add_map_subtract.sql b/tests/queries/0_stateless/01318_map_add_map_subtract.sql index 40c08e0a147..6ead7a2db46 100644 --- a/tests/queries/0_stateless/01318_map_add_map_subtract.sql +++ b/tests/queries/0_stateless/01318_map_add_map_subtract.sql @@ -2,7 +2,7 @@ drop table if exists map_test; create table map_test engine=TinyLog() as (select ([1, number], [toInt32(2),2]) as map from numbers(1, 10)); -- mapAdd -select mapAdd([1], [1]); -- { serverError 42 } +select mapAdd([1], [1]); -- { serverError 43 } select mapAdd(([1], [1])); -- { serverError 42 } select mapAdd(([1], [1]), map) from map_test; -- { serverError 43 } select mapAdd(([toUInt64(1)], [1]), map) from map_test; -- { serverError 43 } @@ -27,7 +27,7 @@ select mapAdd(([toInt64(1), 2], [toInt64(1), 1]), ([toInt64(1), 2], [toInt64(1), select mapAdd(([1, 2], [toFloat32(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); -select mapAdd(([toFloat32(1), 2], [toFloat64(1.1), 1]), ([toFloat32(1), 2], [2.2, 1])) as res, toTypeName(res); -- { serverError 44 } +select mapAdd(([toFloat32(1), 2], [toFloat64(1.1), 1]), ([toFloat32(1), 2], [2.2, 1])) as res, toTypeName(res); -- { serverError 43 } select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [1, 1])) as res, toTypeName(res); -- { serverError 43 } select mapAdd((['a', 'b'], [1, 1]), ([key], [1])) from values('key String', ('b'), ('c'), ('d')); select mapAdd((cast(['a', 'b'], 'Array(FixedString(1))'), [1, 1]), ([key], [1])) as res, toTypeName(res) from values('key FixedString(1)', ('b'), ('c'), ('d')); diff --git a/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.reference b/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.reference new file mode 100644 index 00000000000..96bafc2c79c --- /dev/null +++ b/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.reference @@ -0,0 +1,55 @@ +{1:5} +{1:3,2:2} +{1:3,3:2} +{1:3,4:2} +{1:3,5:2} +{1:3,6:2} +{1:3,7:2} +{1:3,8:2} +{1:3,9:2} +{1:3,10:2} +{1:5,2:2} +{1:3,2:4} +{1:3,2:2,3:2} +{1:3,2:2,4:2} +{1:3,2:2,5:2} +{1:3,2:2,6:2} +{1:3,2:2,7:2} +{1:3,2:2,8:2} +{1:3,2:2,9:2} +{1:3,2:2,10:2} +{1:2,2:2} Map(UInt8,UInt64) +{1:2,2:2} Map(UInt16,UInt64) +{1:2,2:2} Map(UInt32,UInt64) +{1:2,2:2} Map(UInt64,UInt64) +{1:2,2:2} Map(UInt128,UInt128) +{1:2,2:2} Map(UInt256,UInt256) +{1:2,2:2} Map(Int16,UInt64) +{1:2,2:2} Map(Int16,Int64) +{1:2,2:2} Map(Int32,Int64) +{1:2,2:2} Map(Int64,Int64) +{1:2,2:2} Map(Int128,Int128) +{1:2,2:2} Map(Int256,Int256) +{1:3.300000023841858,2:2} Map(UInt8,Float64) +{1:3.3000000000000003,2:2} Map(UInt8,Float64) +{'a':1,'b':2} +{'a':1,'b':1,'c':1} +{'a':1,'b':1,'d':1} +{'a':1,'b':2} Map(String,UInt64) +{'a':1,'b':1,'c':1} Map(String,UInt64) +{'a':1,'b':1,'d':1} Map(String,UInt64) +{'a':1,'b':2} +{'a':1,'b':1,'c':1} +{'a':1,'b':1,'d':1} +{'a':2} Map(Enum16(\'a\' = 1, \'b\' = 2),Int64) +{'b':2} Map(Enum16(\'a\' = 1, \'b\' = 2),Int64) +{'a':2} Map(Enum8(\'a\' = 1, \'b\' = 2),Int64) +{'b':2} Map(Enum8(\'a\' = 1, \'b\' = 2),Int64) +{'00000000-89ab-cdef-0123-456789abcdef':2} Map(UUID,Int64) +{'11111111-89ab-cdef-0123-456789abcdef':4} Map(UUID,Int64) +{1:0,2:0} Map(UInt8,UInt64) +{1:18446744073709551615,2:18446744073709551615} Map(UInt8,UInt64) +{1:-1,2:-1} Map(UInt8,Int64) +{1:-1.0999999761581423,2:0} Map(UInt8,Float64) +{1:-1,2:-1} Map(UInt8,Int64) +{1:-2,2:-2,3:1} Map(UInt8,Int64) diff --git a/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.sql b/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.sql new file mode 100644 index 00000000000..9f0f1cb0489 --- /dev/null +++ b/tests/queries/0_stateless/01318_map_add_map_subtract_on_map_type.sql @@ -0,0 +1,46 @@ +drop table if exists mapop_test; +set allow_experimental_map_type = 1; +create table mapop_test engine=TinyLog() as (select map(1, toInt32(2), number, 2) as m from numbers(1, 10)); + +-- mapAdd +select mapAdd(map(1, 1)); -- { serverError 42 } +select mapAdd(map(1, 1), m) from mapop_test; -- { serverError 43 } + +select mapAdd(map(toUInt64(1), toInt32(1)), m) from mapop_test; +select mapAdd(cast(m, 'Map(UInt8, UInt8)'), map(1, 1), map(2,2)) from mapop_test; + +-- cleanup +drop table mapop_test; + +-- check types +select mapAdd(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt16(1), toUInt16(1), 2, 1), map(toUInt16(1), toUInt16(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt32(1), toUInt32(1), 2, 1), map(toUInt32(1), toUInt32(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt64(1), toUInt64(1), 2, 1), map(toUInt64(1), toUInt64(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt128(1), toUInt128(1), 2, 1), map(toUInt128(1), toUInt128(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt256(1), toUInt256(1), 2, 1), map(toUInt256(1), toUInt256(1), 2, 1)) as res, toTypeName(res); + +select mapAdd(map(toInt8(1), 1, 2, 1), map(toInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt16(1), toInt16(1), 2, 1), map(toInt16(1), toInt16(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt32(1), toInt32(1), 2, 1), map(toInt32(1), toInt32(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt64(1), toInt64(1), 2, 1), map(toInt64(1), toInt64(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt128(1), toInt128(1), 2, 1), map(toInt128(1), toInt128(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt256(1), toInt256(1), 2, 1), map(toInt256(1), toInt256(1), 2, 1)) as res, toTypeName(res); + +select mapAdd(map(1, toFloat32(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapAdd(map(1, toFloat64(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapAdd(map(1, toFloat64(1.1), 2, 1), map(1, 1, 2, 1)) as res, toTypeName(res); -- { serverError 43 } +select mapAdd(map('a', 1, 'b', 1), map(key, 1)) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(cast('a', 'FixedString(1)'), 1, 'b', 1), map(key, 1)) as res, toTypeName(res) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(cast('a', 'LowCardinality(String)'), 1, 'b', 1), map(key, 1)) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key Enum16(\'a\'=1, \'b\'=2), val Int16', ('a', 1), ('b', 1)); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key Enum8(\'a\'=1, \'b\'=2), val Int16', ('a', 1), ('b', 1)); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key UUID, val Int32', ('00000000-89ab-cdef-0123-456789abcdef', 1), ('11111111-89ab-cdef-0123-456789abcdef', 2)); + +-- mapSubtract, same rules as mapAdd +select mapSubtract(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 2, 2, 2)) as res, toTypeName(res); -- overflow +select mapSubtract(map(toUInt8(1), toInt32(1), 2, 1), map(toUInt8(1), toInt16(2), 2, 2)) as res, toTypeName(res); +select mapSubtract(map(1, toFloat32(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(1), toInt32(1), 2, 1), map(toUInt8(1), toInt16(2), 2, 2)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(3), toInt32(1)), map(toUInt8(1), toInt32(2), 2, 2)) as res, toTypeName(res); From fb8374811a72beb6873c2efc33084ced6aba4df0 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 22 Jun 2021 18:03:08 +0300 Subject: [PATCH 271/931] Add new line at the end of files --- tests/queries/0_stateless/01273_arrow.sh | 1 + tests/queries/0_stateless/01273_arrow_decimal.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/01273_arrow.sh b/tests/queries/0_stateless/01273_arrow.sh index 554d75700a6..ad8a6f0fdb9 100755 --- a/tests/queries/0_stateless/01273_arrow.sh +++ b/tests/queries/0_stateless/01273_arrow.sh @@ -102,3 +102,4 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types2" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types3" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_types4" + diff --git a/tests/queries/0_stateless/01273_arrow_decimal.sh b/tests/queries/0_stateless/01273_arrow_decimal.sh index 296df040e7a..22496035ea9 100755 --- a/tests/queries/0_stateless/01273_arrow_decimal.sh +++ b/tests/queries/0_stateless/01273_arrow_decimal.sh @@ -12,3 +12,4 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE arrow_decimal VALUES (0.123, 0.1 ${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO arrow_decimal FORMAT Arrow" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_decimal" ${CLICKHOUSE_CLIENT} --query="DROP TABLE arrow_decimal" + From 8091bd382de6eae0090d0fbd962d0843cea3450b Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Tue, 22 Jun 2021 19:15:19 +0300 Subject: [PATCH 272/931] The small fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Внес небольшую поправку. --- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- .../external-dictionaries/external-dicts-dict-sources.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index ebc04d01de3..5111f72ea80 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -525,7 +525,7 @@ Setting fields: - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – The configuration parameter that controls unexpected connection loss before the query is executed. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. +- `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index ac371cc2c16..5e34aaa10e3 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -494,7 +494,7 @@ SOURCE(MYSQL( - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). -- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения перед выполнением запроса. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. +- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. From e0f330331bbc4423c682d6c6b5665dd5b7c513c6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 22 Jun 2021 18:56:47 +0300 Subject: [PATCH 273/931] Fix exception --- programs/client/Client.cpp | 13 ++++++++++++- .../queries/0_stateless/01921_test_progress_bar.py | 3 +-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c6748b16723..a3419003e2b 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -577,7 +577,18 @@ private: } if (!history_file.empty() && !fs::exists(history_file)) - FS::createFile(history_file); + { + /// Avoid TOCTOU issue. + try + { + FS::createFile(history_file); + } + catch (const ErrnoException & e) + { + if (e.getErrno() != EEXIST) + throw; + } + } LineReader::Patterns query_extenders = {"\\"}; LineReader::Patterns query_delimiters = {";", "\\G"}; diff --git a/tests/queries/0_stateless/01921_test_progress_bar.py b/tests/queries/0_stateless/01921_test_progress_bar.py index 8e917d4cc46..a95d5994607 100755 --- a/tests/queries/0_stateless/01921_test_progress_bar.py +++ b/tests/queries/0_stateless/01921_test_progress_bar.py @@ -12,9 +12,8 @@ log = None # uncomment the line below for debugging #log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name='client1>', log=log) as client1: client1.expect(prompt) client1.send('SELECT number FROM numbers(100) FORMAT Null') client1.expect('Progress: 100\.00 rows, 800\.00 B.*' + end_of_block) - # 0 rows becuase Format Null. client1.expect('0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec.' + end_of_block) From 9d084510c78ab47e8abc5481729457d559847017 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:24:58 +0300 Subject: [PATCH 274/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 4bde49b8cc0..c79d0d3dba2 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -3,7 +3,7 @@ toc_priority: 55 toc_title: s3Cluster --- -# S3Cluster Table Function {#s3Cluster-table-function} +# s3Cluster Table Function {#s3Cluster-table-function} Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator, it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatch each file dynamically. On the worker node, it asks the initiator about the next task to process, processes it. This is repeated until the tasks are finished. From 30d730ab5061e987eed4b18453db09ad891f2b99 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:27:23 +0300 Subject: [PATCH 275/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index c79d0d3dba2..07e053cf962 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -5,7 +5,7 @@ toc_title: s3Cluster # s3Cluster Table Function {#s3Cluster-table-function} -Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator, it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatch each file dynamically. On the worker node, it asks the initiator about the next task to process, processes it. This is repeated until the tasks are finished. +Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. **Syntax** From 5edc97cd93feb06f78e12d322e18e56813a5015c Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:28:25 +0300 Subject: [PATCH 276/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 07e053cf962..f16fdf053c9 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Arguments** - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. -- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. From 39fef21d673fdb059ac4cb0ba87380dc21623cfd Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:28:36 +0300 Subject: [PATCH 277/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index f16fdf053c9..51a55028ddc 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -29,7 +29,7 @@ A table with the specified structure for reading or writing data in the specifie Selecting the data from all files in the cluster `cluster_simple`: ``` sql -SELECT * from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); +SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); ``` Count the total amount of rows in all files in the cluster `cluster_simple`: From a870dec0ef5668f29bd82c0a43bde0d7a74a9785 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:28:44 +0300 Subject: [PATCH 278/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 51a55028ddc..f8ebd93c8a7 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -26,7 +26,7 @@ A table with the specified structure for reading or writing data in the specifie **Examples** -Selecting the data from all files in the cluster `cluster_simple`: +Select the data from all files in the cluster `cluster_simple`: ``` sql SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); From 1230efb03bfc18584ebe42adb64de792dc1b4533 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:28:53 +0300 Subject: [PATCH 279/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index f8ebd93c8a7..4ef797c9734 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -35,7 +35,7 @@ SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickho Count the total amount of rows in all files in the cluster `cluster_simple`: ``` sql -SELECT count(*) from s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); +SELECT count(*) FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); ``` !!! warning "Warning" From d172509c7792a742241f149c10e5a2bf40b8ba05 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:29:07 +0300 Subject: [PATCH 280/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 4ef797c9734..b5901a94974 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -44,4 +44,4 @@ SELECT count(*) FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{ **See Also** - [S3 engine](../../engines/table-engines/integrations/s3.md) -- [S3 table function](../../sql-reference/table-functions/s3.md) +- [s3 table function](../../sql-reference/table-functions/s3.md) From e0199d8006a3c1510eafb4dc707096948af3e04f Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Tue, 22 Jun 2021 20:05:20 +0300 Subject: [PATCH 281/931] Fix DiskS3 restore --- src/Disks/S3/DiskS3.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 1a50f6de74b..e52a19de99a 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -883,6 +883,7 @@ void DiskS3::restoreFileOperations(const RestoreInformation & restore_informatio to_path /= from_path.parent_path().filename(); else to_path /= from_path.filename(); + fs::create_directories(to_path); fs::copy(from_path, to_path, fs::copy_options::recursive | fs::copy_options::overwrite_existing); fs::remove_all(from_path); } From a1e65ae260d529269d48eb46e8a974c7d171aa1e Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Tue, 22 Jun 2021 21:50:51 +0300 Subject: [PATCH 282/931] Translate to Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Выполнил перевод на русский язык. --- .../table-functions/s3Cluster.md | 1 + .../table-functions/s3Cluster.md | 48 +++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 docs/ru/sql-reference/table-functions/s3Cluster.md diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index b5901a94974..9e2291a346d 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -17,6 +17,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. - `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md new file mode 100644 index 00000000000..0f3c8f68c9c --- /dev/null +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -0,0 +1,48 @@ +--- +toc_priority: 55 +toc_title: s3Cluster +--- + +# Табличная Функция s3Cluster {#s3Cluster-table-function} + +Позволяет обрабатывать файлы из [Amazon S3](https://aws.amazon.com/s3/) параллельно из многих узлов в указанном кластере. На узле-инициаторе функция создает соединение со всеми узлами в кластере, раскрывает звездочки в пути к файлу S3 и динамически отправляет каждый файл. На рабочем узле функция запрашивает у инициатора следующую задачу для обработки и обрабатывает ее. Это повторяется до тех пор, пока все задачи не будут завершены. + +**Синтаксис** + +``` sql +s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure) +``` + +**Аргументы** + +- `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам. +- `source` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные символы в режиме "только чтение": `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `access_key_id` и `secret_access_key` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры. +- `format` — [формат](../../interfaces/formats.md#formats) файла. +- `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. + +**Возвращаемое значение** + +Таблица с указанной структурой для чтения или записи данных в указанный файл. + +**Примеры** + +Выведем данные из всех файлов кластера `cluster_simple`: + +``` sql +SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); +``` + +Подсчитаем общее количество строк во всех файлах кластера `cluster_simple`: + +``` sql +SELECT count(*) FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); +``` + +!!! warning "Внимание" + Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. + +**Смотрите также** + +- [Движок таблиц S3](../../engines/table-engines/integrations/s3.md) +- [Табличная функция s3](../../sql-reference/table-functions/s3.md) From 99e08b7406c5161ba21b78f4904bfdc32d99877a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 22 Jun 2021 23:22:13 +0300 Subject: [PATCH 283/931] Make network receive time metric to correctly include the time spent waiting for data from the client to INSERT #9958 --- src/IO/ReadBufferFromPocoSocket.cpp | 8 +++++++- ...network_receive_time_metric_insert.reference | 1 + .../01923_network_receive_time_metric_insert.sh | 17 +++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01923_network_receive_time_metric_insert.reference create mode 100755 tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index d1ceaaf6a35..e043764d280 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -76,7 +76,13 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const { - return available() || socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); + if (available()) + return true; + + Stopwatch watch; + bool res = socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); + ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); + return res; } } diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.reference b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh new file mode 100755 index 00000000000..cd3202e94c9 --- /dev/null +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" + +# Rate limit is chosen for operation to spent about one second. +seq 1 1000 | pv --quiet --rate-limit 3893 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" + +# We check that the value of NetworkReceiveElapsedMicroseconds is correctly include the time spent waiting data from the client. +${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; + SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')] >= 1000000 FROM system.query_log + WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE t" From a92bd49229ea4abc49880054ebe02b02b68d5183 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 22 Jun 2021 18:50:25 +0300 Subject: [PATCH 284/931] Supress PVS --- src/Coordination/ZooKeeperDataReader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 51965b499a2..8bcce25cfee 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -442,7 +442,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) if (request == nullptr) return true; - for (const auto & subrequest : dynamic_cast(request.get())->requests) + for (const auto & subrequest : dynamic_cast(request.get())->requests) //-V522 if (subrequest == nullptr) return true; return false; From bb41ba6929a1bbe3d5b8ecbce8c7e11585409db2 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 22 Jun 2021 23:24:13 +0300 Subject: [PATCH 285/931] Update 01923_network_receive_time_metric_insert.sh --- .../0_stateless/01923_network_receive_time_metric_insert.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index cd3202e94c9..8d66cfddb3e 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -9,7 +9,7 @@ ${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE # Rate limit is chosen for operation to spent about one second. seq 1 1000 | pv --quiet --rate-limit 3893 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" -# We check that the value of NetworkReceiveElapsedMicroseconds is correctly include the time spent waiting data from the client. +# We check that the value of NetworkReceiveElapsedMicroseconds correctly includes the time spent waiting data from the client. ${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')] >= 1000000 FROM system.query_log WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" From 779b5df1a25dfe57e6ef8f02ae5d999ac89af403 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 23 Jun 2021 00:10:53 +0300 Subject: [PATCH 286/931] Apply suggestions from code review Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../reference/quantileexact.md | 6 +++--- .../reference/quantiles.md | 4 ++-- .../reference/quantileexact.md | 20 +++++++++++-------- .../reference/quantiles.md | 12 +++++------ 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index bb1906f3a8c..5091b023f67 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -178,7 +178,7 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: (0, 1). Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). +- `level` — Level of quantile. Optional. Possible values: (0, 1) — bounds not included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -217,7 +217,7 @@ To get exact value, all the passed values ​​are combined into an array, whic This function is equivalent to [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed) Excel function, ([type R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)). -When using multiple `quantileExactInclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive) function. +When using multiple `quantileExactInclusive` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactinclusive) function. **Syntax** @@ -227,7 +227,7 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: [0, 1]. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). +- `level` — Level of quantile. Optional. Possible values: [0, 1] — bounds included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 60ad80abae1..200a3f44dd3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -28,7 +28,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: (0, 1). [Float](../../../sql-reference/data-types/float.md). +- `level` — Leveles of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** @@ -77,7 +77,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: [0, 1]. [Float](../../../sql-reference/data-types/float.md). +- `level` — Leveles of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). **Returned value** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 2a9ac46298b..9a6fca678c5 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -171,9 +171,10 @@ SELECT quantileExactHigh(number) FROM numbers(10) Эта функция эквивалентна Excel функции [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba), [тип R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -Внутренние состояния функций `quantileExactExclusive` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive), это повысит эффективность запроса. +Если в одном запросе вызывается несколько функций `quantileExactExclusive` с разными значениями `level`, эти функции вычисляются независимо друг от друга. В таких случаях используйте функцию [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive), запрос будет выполняться эффективнее. + +**Синтаксис** -**Синтакс** ``` sql quantileExactExclusive(level)(expr) @@ -181,8 +182,10 @@ quantileExactExclusive(level)(expr) **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1). Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). -- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1) — граничные значения не учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). + +- `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + **Возвращаемое значение** @@ -220,9 +223,10 @@ SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num); Эта функция эквивалентна Excel функции [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed), [тип R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -Внутренние состояния функций `quantileExactInclusive` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactexclusive), это повысит эффективность запроса. +Если в одном запросе вызывается несколько функций `quantileExactInclusive` с разными значениями `level`, эти функции вычисляются независимо друг от друга. В таких случаях используйте функцию [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantilesexactinclusive), запрос будет выполняться эффективнее. + +**Синтаксис** -**Синтакс** ``` sql quantileExactInclusive(level)(expr) @@ -230,7 +234,8 @@ quantileExactInclusive(level)(expr) **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1]. Значения по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1] — граничные значения учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). + - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** @@ -265,4 +270,3 @@ SELECT quantileExactInclusive(0.6)(x) FROM (SELECT number AS x FROM num); - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index ed9e124a3ce..37857790971 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -18,9 +18,9 @@ toc_priority: 201 Эта функция эквивалентна Excel функции [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba), [тип R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -Работает более эффективно с наборами уровней, чем [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). +С наборами уровней работает эффективнее, чем [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). -**Синтакс** +**Синтаксис** ``` sql quantilesExactExclusive(level1, level2, ...)(expr) @@ -28,8 +28,8 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Аргументы** -- `level` — уровень квантилей. Возможные значения: (0, 1). [Float](../../../sql-reference/data-types/float.md). -- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантилей. Возможные значения: (0, 1) — граничные значения не учитываются. [Float](../../../sql-reference/data-types/float.md). +- `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** @@ -67,7 +67,7 @@ SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM Эта функция эквивалентна Excel функции [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed), [тип R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -Работает более эффективно с наборами уровней, чем [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). +С наборами уровней работает эффективнее, чем [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). **Синтаксис** @@ -77,7 +77,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Аргументы** -- `level` — уровень квантилей. Возможные значения: [0, 1]. [Float](../../../sql-reference/data-types/float.md). +- `level` — уровень квантилей. Возможные значения: [0, 1] — граничные значения учитываются. [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** From 57f6811ba0de1dfdbf1ac24e47c0cb6a5c1ee1c8 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 00:13:24 +0300 Subject: [PATCH 287/931] Draft --- docs/en/operations/settings/settings.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2bde3b03048..de7f734e8e0 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1727,6 +1727,19 @@ Possible values: Default value: 0. +## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} + +Optimizes functions (if possible) to subcolumns to reduce amount of read data. + +- + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: `value`. + ## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life} - Type: seconds From afeb56274471510c04109af391753266840b5f1e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 23 Jun 2021 00:29:20 +0300 Subject: [PATCH 288/931] Small change in Roaring Bitmaps --- src/AggregateFunctions/AggregateFunctionGroupBitmapData.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index 63acc2a5f0c..067daf6dc3a 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -43,7 +44,7 @@ private: void toLarge() { - rb = std::make_shared(); + rb = std::make_unique(); for (const auto & x : small) rb->add(static_cast(x.getValue())); small.clear(); @@ -113,7 +114,7 @@ public: readVarUInt(size, in); std::unique_ptr buf(new char[size]); in.readStrict(buf.get(), size); - rb = std::make_shared(RoaringBitmap::read(buf.get())); + rb = std::make_unique(RoaringBitmap::read(buf.get())); } } @@ -140,7 +141,7 @@ public: */ std::shared_ptr getNewRoaringBitmapFromSmall() const { - std::shared_ptr ret = std::make_shared(); + std::shared_ptr ret = std::make_unique(); for (const auto & x : small) ret->add(static_cast(x.getValue())); return ret; From d3d3c4b686ba0c7305770502420d1adf29983cbf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 23 Jun 2021 00:31:40 +0300 Subject: [PATCH 289/931] Adjust fast test --- docker/test/fasttest/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index cc6aeff357f..098384d6e61 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -381,6 +381,9 @@ function run_tests # needs psql 01889_postgresql_protocol_null_fields + + # needs pv + 01923_network_receive_time_metric_insert ) time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \ From a0206dd438d478bdbc1f1de9e2a28cfc93fb761a Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 00:32:41 +0300 Subject: [PATCH 290/931] various fixes --- .../reference/quantileexact.md | 12 +++++++++--- .../aggregate-functions/reference/quantiles.md | 10 ++++++++-- .../reference/quantileexact.md | 18 ++++++++---------- .../aggregate-functions/reference/quantiles.md | 10 ++++++++-- 4 files changed, 33 insertions(+), 17 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index 5091b023f67..47164cec86d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -68,7 +68,7 @@ SELECT quantileExactLow(0.1)(number) FROM numbers(10) │ 1 │ └───────────────────────────────┘ ``` - + When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. **Syntax** @@ -178,9 +178,12 @@ quantileExactExclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: (0, 1) — bounds not included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). +**Parameters** + +- `level` — Level of quantile. Optional. Possible values: (0, 1) — bounds not included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). + **Returned value** - Quantile of the specified level. @@ -227,9 +230,12 @@ quantileExactInclusive(level)(expr) **Arguments** -- `level` — Level of quantile. Optional. Possible values: [0, 1] — bounds included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). +**Parameters** + +- `level` — Level of quantile. Optional. Possible values: [0, 1] — bounds included. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). [Float](../../../sql-reference/data-types/float.md). + **Returned value** - Quantile of the specified level. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 200a3f44dd3..c3601f91350 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -28,9 +28,12 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). +**Parameters** + +- `level` — Leveles of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md). + **Returned value** - [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels. @@ -77,9 +80,12 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Arguments** -- `level` — Leveles of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md). - `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md). +**Parameters** + +- `level` — Leveles of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md). + **Returned value** - [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 9a6fca678c5..2f1e879eaa1 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -25,7 +25,6 @@ quantileExact(level)(expr) - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). - **Возвращаемое значение** - Квантиль заданного уровня. @@ -69,7 +68,7 @@ SELECT quantileExactLow(0.1)(number) FROM numbers(10) │ 1 │ └───────────────────────────────┘ ``` - + При использовании в запросе нескольких функций `quantile*` с разными уровнями, внутренние состояния не объединяются (то есть запрос работает менее эффективно). В этом случае используйте функцию [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles). **Синтаксис** @@ -85,7 +84,6 @@ quantileExact(level)(expr) - `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). - **Возвращаемое значение** - Квантиль заданного уровня. @@ -136,7 +134,6 @@ quantileExactHigh(level)(expr) - `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). - `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). - **Возвращаемое значение** - Квантиль заданного уровня. @@ -175,17 +172,17 @@ SELECT quantileExactHigh(number) FROM numbers(10) **Синтаксис** - ``` sql quantileExactExclusive(level)(expr) ``` **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1) — граничные значения не учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). - - `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +**Параметры** + +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: (0, 1) — граничные значения не учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). **Возвращаемое значение** @@ -227,16 +224,17 @@ SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num); **Синтаксис** - ``` sql quantileExactInclusive(level)(expr) ``` **Аргументы** -- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1] — граничные значения учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). +- `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). -- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +**Параметры** + +- `level` — уровень квантиля. Необязательный параметр. Возможные значения: [0, 1] — граничные значения учитываются. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). [Float](../../../sql-reference/data-types/float.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index 37857790971..d2e7003e4e7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -28,9 +28,12 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Аргументы** -- `level` — уровень квантилей. Возможные значения: (0, 1) — граничные значения не учитываются. [Float](../../../sql-reference/data-types/float.md). - `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +**Параметры** + +- `level` — уровень квантилей. Возможные значения: (0, 1) — граничные значения не учитываются. [Float](../../../sql-reference/data-types/float.md). + **Возвращаемые значения** - [Массив](../../../sql-reference/data-types/array.md) квантилей указанных уровней. @@ -77,8 +80,11 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Аргументы** +- `expr` — выражение, зависящее от значений столбцов. Возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + +**Параметры** + - `level` — уровень квантилей. Возможные значения: [0, 1] — граничные значения учитываются. [Float](../../../sql-reference/data-types/float.md). -- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемые значения** From 7418d1c75727057297d0f6f28a7c3c3951d9440a Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 23 Jun 2021 01:08:20 +0300 Subject: [PATCH 291/931] Apply suggestions from code review Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../sql-reference/aggregate-functions/reference/quantiles.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index c3601f91350..73939f16db3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -32,7 +32,7 @@ quantilesExactExclusive(level1, level2, ...)(expr) **Parameters** -- `level` — Leveles of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md). +- `level` — Levels of quantiles. Possible values: (0, 1) — bounds not included. [Float](../../../sql-reference/data-types/float.md). **Returned value** @@ -84,7 +84,7 @@ quantilesExactInclusive(level1, level2, ...)(expr) **Parameters** -- `level` — Leveles of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md). +- `level` — Levels of quantiles. Possible values: [0, 1] — bounds included. [Float](../../../sql-reference/data-types/float.md). **Returned value** From c66a3b22b5568754cc06d58e59524701d294b32b Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 21 Jun 2021 13:36:21 +0000 Subject: [PATCH 292/931] done --- .../MergeTree/BackgroundJobsExecutor.cpp | 107 +++++++++--------- .../MergeTree/BackgroundJobsExecutor.h | 14 ++- src/Storages/MergeTree/MergeTreeData.cpp | 11 +- src/Storages/MergeTree/MergeTreeData.h | 9 +- src/Storages/StorageMergeTree.cpp | 35 +++--- src/Storages/StorageMergeTree.h | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 29 +++-- src/Storages/StorageReplicatedMergeTree.h | 4 +- 8 files changed, 113 insertions(+), 98 deletions(-) diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp index c42ea46847c..9b70979e9e4 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp @@ -82,67 +82,58 @@ bool incrementMetricIfLessThanMax(std::atomic & atomic_value, Int64 max_v } -void IBackgroundJobExecutor::jobExecutingTask() +void IBackgroundJobExecutor::execute(JobAndPool job_and_pool) try { - auto job_and_pool = getBackgroundJob(); - if (job_and_pool) /// If we have job, then try to assign into background pool + auto & pool_config = pools_configs[job_and_pool.pool_type]; + /// If corresponding pool is not full increment metric and assign new job + if (incrementMetricIfLessThanMax(CurrentMetrics::values[pool_config.tasks_metric], pool_config.max_pool_size)) { - auto & pool_config = pools_configs[job_and_pool->pool_type]; - /// If corresponding pool is not full increment metric and assign new job - if (incrementMetricIfLessThanMax(CurrentMetrics::values[pool_config.tasks_metric], pool_config.max_pool_size)) + try /// this try required because we have to manually decrement metric { - try /// this try required because we have to manually decrement metric + pools[job_and_pool.pool_type].scheduleOrThrowOnError([this, pool_config, job{std::move(job_and_pool.job)}] () { - pools[job_and_pool->pool_type].scheduleOrThrowOnError([this, pool_config, job{std::move(job_and_pool->job)}] () + try /// We don't want exceptions in background pool { - try /// We don't want exceptions in background pool + bool job_success = job(); + /// Job done, decrement metric and reset no_work counter + CurrentMetrics::values[pool_config.tasks_metric]--; + + if (job_success) { - bool job_success = job(); - /// Job done, decrement metric and reset no_work counter - CurrentMetrics::values[pool_config.tasks_metric]--; - - if (job_success) - { - /// Job done, new empty space in pool, schedule background task - runTaskWithoutDelay(); - } - else - { - /// Job done, but failed, schedule with backoff - scheduleTask(/* with_backoff = */ true); - } - + /// Job done, new empty space in pool, schedule background task + runTaskWithoutDelay(); } - catch (...) + else { - CurrentMetrics::values[pool_config.tasks_metric]--; - tryLogCurrentException(__PRETTY_FUNCTION__); + /// Job done, but failed, schedule with backoff scheduleTask(/* with_backoff = */ true); } - }); - /// We've scheduled task in the background pool and when it will finish we will be triggered again. But this task can be - /// extremely long and we may have a lot of other small tasks to do, so we schedule ourselves here. - runTaskWithoutDelay(); - } - catch (...) - { - /// With our Pool settings scheduleOrThrowOnError shouldn't throw exceptions, but for safety catch added here - CurrentMetrics::values[pool_config.tasks_metric]--; - tryLogCurrentException(__PRETTY_FUNCTION__); - scheduleTask(/* with_backoff = */ true); - } - } - else /// Pool is full and we have some work to do - { - scheduleTask(/* with_backoff = */ false); - } - } - else /// Nothing to do, no jobs - { - scheduleTask(/* with_backoff = */ true); - } + } + catch (...) + { + CurrentMetrics::values[pool_config.tasks_metric]--; + tryLogCurrentException(__PRETTY_FUNCTION__); + scheduleTask(/* with_backoff = */ true); + } + }); + /// We've scheduled task in the background pool and when it will finish we will be triggered again. But this task can be + /// extremely long and we may have a lot of other small tasks to do, so we schedule ourselves here. + runTaskWithoutDelay(); + } + catch (...) + { + /// With our Pool settings scheduleOrThrowOnError shouldn't throw exceptions, but for safety catch added here + CurrentMetrics::values[pool_config.tasks_metric]--; + tryLogCurrentException(__PRETTY_FUNCTION__); + scheduleTask(/* with_backoff = */ true); + } + } + else /// Pool is full and we have some work to do + { + scheduleTask(/* with_backoff = */ false); + } } catch (...) /// Exception while we looking for a task, reschedule { @@ -156,7 +147,7 @@ void IBackgroundJobExecutor::start() if (!scheduling_task) { scheduling_task = getContext()->getSchedulePool().createTask( - getBackgroundTaskName(), [this]{ jobExecutingTask(); }); + getBackgroundTaskName(), [this]{ backgroundTaskFunction(); }); } scheduling_task->activateAndSchedule(); @@ -180,6 +171,14 @@ void IBackgroundJobExecutor::triggerTask() scheduling_task->schedule(); } +void IBackgroundJobExecutor::backgroundTaskFunction() +{ + if (scheduleJob()) + scheduleTask(/* with_backoff = */ false); + else + scheduleTask(/* with_backoff = */ true); +} + IBackgroundJobExecutor::~IBackgroundJobExecutor() { finish(); @@ -202,9 +201,9 @@ String BackgroundJobsExecutor::getBackgroundTaskName() const return data.getStorageID().getFullTableName() + " (dataProcessingTask)"; } -std::optional BackgroundJobsExecutor::getBackgroundJob() +bool BackgroundJobsExecutor::scheduleJob() { - return data.getDataProcessingJob(); + return data.scheduleDataProcessingJob(*this); } BackgroundMovesExecutor::BackgroundMovesExecutor( @@ -223,9 +222,9 @@ String BackgroundMovesExecutor::getBackgroundTaskName() const return data.getStorageID().getFullTableName() + " (dataMovingTask)"; } -std::optional BackgroundMovesExecutor::getBackgroundJob() +bool BackgroundMovesExecutor::scheduleJob() { - return data.getDataMovingJob(); + return data.scheduleDataMovingJob(*this); } } diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.h b/src/Storages/MergeTree/BackgroundJobsExecutor.h index e9cefc7a6b0..4376ea0abc8 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.h +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.h @@ -99,6 +99,9 @@ public: /// Finish execution: deactivate background task and wait already scheduled jobs void finish(); + /// Executes job in a nested pool + void execute(JobAndPool job_and_pool); + /// Just call finish virtual ~IBackgroundJobExecutor(); @@ -110,12 +113,13 @@ protected: /// Name for task in background schedule pool virtual String getBackgroundTaskName() const = 0; - /// Get job for background execution - virtual std::optional getBackgroundJob() = 0; + + /// Schedules a job in a nested pool in this class. + virtual bool scheduleJob() = 0; private: /// Function that executes in background scheduling pool - void jobExecutingTask(); + void backgroundTaskFunction(); /// Recalculate timeouts when we have to check for a new job void scheduleTask(bool with_backoff); /// Run background task as fast as possible and reset errors counter @@ -136,7 +140,7 @@ public: protected: String getBackgroundTaskName() const override; - std::optional getBackgroundJob() override; + bool scheduleJob() override; }; /// Move jobs executor, move parts between disks in the background @@ -152,7 +156,7 @@ public: protected: String getBackgroundTaskName() const override; - std::optional getBackgroundJob() override; + bool scheduleJob() override; }; } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index abc37f52ff9..57b442f77c1 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -4619,19 +4619,20 @@ MergeTreeData::CurrentlyMovingPartsTagger::~CurrentlyMovingPartsTagger() } } -std::optional MergeTreeData::getDataMovingJob() +bool MergeTreeData::scheduleDataMovingJob(IBackgroundJobExecutor & executor) { if (parts_mover.moves_blocker.isCancelled()) - return {}; + return false; auto moving_tagger = selectPartsForMove(); if (moving_tagger->parts_to_move.empty()) - return {}; + return false; - return JobAndPool{[this, moving_tagger] () mutable + executor.execute({[this, moving_tagger] () mutable { return moveParts(moving_tagger); - }, PoolType::MOVE}; + }, PoolType::MOVE}); + return true; } bool MergeTreeData::areBackgroundMovesNeeded() const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 65d875aa9cf..da8c7fcbb65 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -57,6 +57,7 @@ class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; using ManyExpressionActions = std::vector; class MergeTreeDeduplicationLog; +class IBackgroundJobExecutor; namespace ErrorCodes { @@ -807,10 +808,10 @@ public: PinnedPartUUIDsPtr getPinnedPartUUIDs() const; - /// Return main processing background job, like merge/mutate/fetch and so on - virtual std::optional getDataProcessingJob() = 0; - /// Return job to move parts between disks/volumes and so on. - std::optional getDataMovingJob(); + /// Schedules background job to like merge/mutate/fetch an executor + virtual bool scheduleDataProcessingJob(IBackgroundJobExecutor & executor) = 0; + /// Schedules job to move parts between disks/volumes and so on. + bool scheduleDataMovingJob(IBackgroundJobExecutor & executor); bool areBackgroundMovesNeeded() const; /// Lock part in zookeeper for use common S3 data in several nodes diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 584f9345423..82e51282b6e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1001,13 +1001,13 @@ bool StorageMergeTree::mutateSelectedPart(const StorageMetadataPtr & metadata_sn return true; } -std::optional StorageMergeTree::getDataProcessingJob() //-V657 +bool StorageMergeTree::scheduleDataProcessingJob(IBackgroundJobExecutor & executor) //-V657 { if (shutdown_called) - return {}; + return false; if (merger_mutator.merges_blocker.isCancelled()) - return {}; + return false; auto metadata_snapshot = getInMemoryMetadataPtr(); std::shared_ptr merge_entry, mutate_entry; @@ -1017,21 +1017,25 @@ std::optional StorageMergeTree::getDataProcessingJob() //-V657 if (!merge_entry) mutate_entry = selectPartsToMutate(metadata_snapshot, nullptr, share_lock); - if (merge_entry || mutate_entry) + if (merge_entry) { - return JobAndPool{[this, metadata_snapshot, merge_entry, mutate_entry, share_lock] () mutable + executor.execute({[this, metadata_snapshot, merge_entry, mutate_entry, share_lock] () mutable { - if (merge_entry) - return mergeSelectedParts(metadata_snapshot, false, {}, *merge_entry, share_lock); - else if (mutate_entry) - return mutateSelectedPart(metadata_snapshot, *mutate_entry, share_lock); - - __builtin_unreachable(); - }, PoolType::MERGE_MUTATE}; + return mergeSelectedParts(metadata_snapshot, false, {}, *merge_entry, share_lock); + }, PoolType::MERGE_MUTATE}); + return true; + } + if (mutate_entry) + { + executor.execute({[this, metadata_snapshot, merge_entry, mutate_entry, share_lock] () mutable + { + return mutateSelectedPart(metadata_snapshot, *mutate_entry, share_lock); + }, PoolType::MERGE_MUTATE}); + return true; } else if (auto lock = time_after_previous_cleanup.compareAndRestartDeferred(1)) { - return JobAndPool{[this, share_lock] () + executor.execute({[this, share_lock] () { /// All use relative_data_path which changes during rename /// so execute under share lock. @@ -1041,9 +1045,10 @@ std::optional StorageMergeTree::getDataProcessingJob() //-V657 clearOldMutations(); clearEmptyParts(); return true; - }, PoolType::MERGE_MUTATE}; + }, PoolType::MERGE_MUTATE}); + return true; } - return {}; + return false; } Int64 StorageMergeTree::getCurrentMutationVersion( diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 45ea68e8a6f..6678ae06b53 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -94,7 +94,7 @@ public: CheckResults checkData(const ASTPtr & query, ContextPtr context) override; - std::optional getDataProcessingJob() override; + bool scheduleDataProcessingJob(IBackgroundJobExecutor & executor) override; MergeTreeDeduplicationLog * getDeduplicationLog() { return deduplication_log.get(); } private: diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 47f6bbd0ccc..43228dc5db9 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3155,30 +3155,35 @@ bool StorageReplicatedMergeTree::processQueueEntry(ReplicatedMergeTreeQueue::Sel }); } -std::optional StorageReplicatedMergeTree::getDataProcessingJob() +bool StorageReplicatedMergeTree::scheduleDataProcessingJob(IBackgroundJobExecutor & executor) { /// If replication queue is stopped exit immediately as we successfully executed the task if (queue.actions_blocker.isCancelled()) - return {}; + return false; /// This object will mark the element of the queue as running. ReplicatedMergeTreeQueue::SelectedEntryPtr selected_entry = selectQueueEntry(); if (!selected_entry) - return {}; - - PoolType pool_type; + return false; /// Depending on entry type execute in fetches (small) pool or big merge_mutate pool if (selected_entry->log_entry->type == LogEntry::GET_PART) - pool_type = PoolType::FETCH; - else - pool_type = PoolType::MERGE_MUTATE; - - return JobAndPool{[this, selected_entry] () mutable { - return processQueueEntry(selected_entry); - }, pool_type}; + executor.execute({[this, selected_entry] () mutable + { + return processQueueEntry(selected_entry); + }, PoolType::FETCH}); + return true; + } + else + { + executor.execute({[this, selected_entry] () mutable + { + return processQueueEntry(selected_entry); + }, PoolType::MERGE_MUTATE}); + return true; + } } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 396ec7a1741..b8a78e51240 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -215,8 +215,8 @@ public: static bool removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger); - /// Get job to execute in background pool (merge, mutate, drop range and so on) - std::optional getDataProcessingJob() override; + /// Schedules job to execute in background pool (merge, mutate, drop range and so on) + bool scheduleDataProcessingJob(IBackgroundJobExecutor & executor) override; /// Checks that fetches are not disabled with action blocker and pool for fetches /// is not overloaded From a39bca0acf5bece1ca86294942194973b2e55f07 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 21 Jun 2021 14:16:15 +0000 Subject: [PATCH 293/931] better --- src/Storages/MergeTree/BackgroundJobsExecutor.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp index 9b70979e9e4..269b00021fb 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp @@ -173,9 +173,7 @@ void IBackgroundJobExecutor::triggerTask() void IBackgroundJobExecutor::backgroundTaskFunction() { - if (scheduleJob()) - scheduleTask(/* with_backoff = */ false); - else + if (!scheduleJob()) scheduleTask(/* with_backoff = */ true); } From f4d6a6e1282fc75e82d3e8ee8a8e0a9bfacdaaf7 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 21 Jun 2021 15:18:43 +0000 Subject: [PATCH 294/931] support dynamic reloading of server config --- .../MergeTree/BackgroundJobsExecutor.cpp | 34 ++++++++++++++++--- .../MergeTree/BackgroundJobsExecutor.h | 2 +- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp index 269b00021fb..871c4a05c0f 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp @@ -25,7 +25,8 @@ IBackgroundJobExecutor::IBackgroundJobExecutor( { for (const auto & pool_config : pools_configs_) { - pools.try_emplace(pool_config.pool_type, pool_config.max_pool_size, 0, pool_config.max_pool_size, false); + const auto max_pool_size = pool_config.get_max_pool_size(); + pools.try_emplace(pool_config.pool_type, max_pool_size, 0, max_pool_size, false); pools_configs.emplace(pool_config.pool_type, pool_config); } } @@ -86,11 +87,17 @@ void IBackgroundJobExecutor::execute(JobAndPool job_and_pool) try { auto & pool_config = pools_configs[job_and_pool.pool_type]; + const auto max_pool_size = pool_config.get_max_pool_size(); + /// If corresponding pool is not full increment metric and assign new job - if (incrementMetricIfLessThanMax(CurrentMetrics::values[pool_config.tasks_metric], pool_config.max_pool_size)) + if (incrementMetricIfLessThanMax(CurrentMetrics::values[pool_config.tasks_metric], max_pool_size)) { try /// this try required because we have to manually decrement metric { + /// Synchronize pool size, because config could be reloaded + pools[job_and_pool.pool_type].setMaxThreads(max_pool_size); + pools[job_and_pool.pool_type].setQueueSize(max_pool_size); + pools[job_and_pool.pool_type].scheduleOrThrowOnError([this, pool_config, job{std::move(job_and_pool.job)}] () { try /// We don't want exceptions in background pool @@ -188,8 +195,19 @@ BackgroundJobsExecutor::BackgroundJobsExecutor( : IBackgroundJobExecutor( global_context_, global_context_->getBackgroundProcessingTaskSchedulingSettings(), - {PoolConfig{PoolType::MERGE_MUTATE, global_context_->getSettingsRef().background_pool_size, CurrentMetrics::BackgroundPoolTask}, - PoolConfig{PoolType::FETCH, global_context_->getSettingsRef().background_fetches_pool_size, CurrentMetrics::BackgroundFetchesPoolTask}}) + {PoolConfig + { + .pool_type = PoolType::MERGE_MUTATE, + .get_max_pool_size = [global_context_] () { return global_context_->getSettingsRef().background_pool_size; }, + .tasks_metric = CurrentMetrics::BackgroundPoolTask + }, + PoolConfig + { + .pool_type = PoolType::FETCH, + .get_max_pool_size = [global_context_] () { return global_context_->getSettingsRef().background_fetches_pool_size; }, + .tasks_metric = CurrentMetrics::BackgroundFetchesPoolTask + } + }) , data(data_) { } @@ -210,7 +228,13 @@ BackgroundMovesExecutor::BackgroundMovesExecutor( : IBackgroundJobExecutor( global_context_, global_context_->getBackgroundMoveTaskSchedulingSettings(), - {PoolConfig{PoolType::MOVE, global_context_->getSettingsRef().background_move_pool_size, CurrentMetrics::BackgroundMovePoolTask}}) + {PoolConfig + { + .pool_type = PoolType::MOVE, + .get_max_pool_size = [global_context_] () { return global_context_->getSettingsRef().background_move_pool_size; }, + .tasks_metric = CurrentMetrics::BackgroundMovePoolTask + } + }) , data(data_) { } diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.h b/src/Storages/MergeTree/BackgroundJobsExecutor.h index 4376ea0abc8..0d9856a2f40 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.h +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.h @@ -59,7 +59,7 @@ protected: /// This pool type PoolType pool_type; /// Max pool size in threads - size_t max_pool_size; + const std::function get_max_pool_size; /// Metric that we have to increment when we execute task in this pool CurrentMetrics::Metric tasks_metric; }; From be4aff0c1948345bc63f87a47ab363552501eaf3 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 21 Jun 2021 22:43:50 +0000 Subject: [PATCH 295/931] delete test --- .../tests/gtest_background_executor.cpp | 62 ------------------- 1 file changed, 62 deletions(-) delete mode 100644 src/Storages/tests/gtest_background_executor.cpp diff --git a/src/Storages/tests/gtest_background_executor.cpp b/src/Storages/tests/gtest_background_executor.cpp deleted file mode 100644 index 283cdf3fbf8..00000000000 --- a/src/Storages/tests/gtest_background_executor.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include -#include -#include -#include -#include -#include -using namespace std::chrono_literals; -namespace CurrentMetrics -{ - extern const Metric BackgroundPoolTask; -} - -using namespace DB; - -static std::atomic counter{0}; - -class TestJobExecutor : public IBackgroundJobExecutor -{ -public: - explicit TestJobExecutor(ContextPtr local_context) - :IBackgroundJobExecutor( - local_context, - BackgroundTaskSchedulingSettings{}, - {PoolConfig{PoolType::MERGE_MUTATE, 4, CurrentMetrics::BackgroundPoolTask}}) - {} - -protected: - String getBackgroundTaskName() const override - { - return "TestTask"; - } - - std::optional getBackgroundJob() override - { - return JobAndPool{[] { std::this_thread::sleep_for(1s); counter++; return true; }, PoolType::MERGE_MUTATE}; - } -}; - -using TestExecutorPtr = std::unique_ptr; - -TEST(BackgroundExecutor, TestMetric) -{ - const auto & context_holder = getContext(); - std::vector executors; - for (size_t i = 0; i < 100; ++i) - executors.emplace_back(std::make_unique(context_holder.context)); - - for (size_t i = 0; i < 100; ++i) - executors[i]->start(); - - for (size_t i = 0; i < 100; ++i) - { - EXPECT_TRUE(CurrentMetrics::values[CurrentMetrics::BackgroundPoolTask].load() <= 4); - std::this_thread::sleep_for(200ms); - } - - for (size_t i = 0; i < 100; ++i) - executors[i]->finish(); - - /// Sanity check - EXPECT_TRUE(counter > 50); -} From 000c937ebe47bfa38e13fda6eeba8c9b06adf404 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 22 Jun 2021 23:24:37 +0000 Subject: [PATCH 296/931] fix pvs check --- src/Storages/StorageMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 82e51282b6e..6f8b69ba419 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1019,7 +1019,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(IBackgroundJobExecutor & execut if (merge_entry) { - executor.execute({[this, metadata_snapshot, merge_entry, mutate_entry, share_lock] () mutable + executor.execute({[this, metadata_snapshot, merge_entry, share_lock] () mutable { return mergeSelectedParts(metadata_snapshot, false, {}, *merge_entry, share_lock); }, PoolType::MERGE_MUTATE}); From e148ef739d66e4eeb7129a21b26ed7d62459d2f1 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 20 Jun 2021 16:50:01 +0300 Subject: [PATCH 297/931] Drop replicas from dirname for internal_replication=true Under use_compact_format_in_distributed_parts_names=1 and internal_replication=true the server encodes all replicas for the directory name for async INSERT into Distributed, and the directory name looks like: shard1_replica1,shard1_replica2,shard3_replica3 This is required for creating connections (to specific replicas only), but in case of internal_replication=true, this can be avoided, since this path will always includes all replicas. This patch replaces all replicas with "_all_replicas" marker. Note, that initial problem was that this path may overflow the NAME_MAX if you will have more then 15 replicas, and the server will fail to create the directory. Also note, that changed directory name should not be a problem, since: - empty directories will be removed since #16729 - and replicas encoded in the directory name is also supported anyway. --- src/Interpreters/Cluster.cpp | 41 +++++++---------- src/Interpreters/Cluster.h | 6 +-- src/Storages/Distributed/DirectoryMonitor.cpp | 38 +++++++++++++--- .../configs/remote_servers.xml | 9 ++-- .../test_distributed_format/test.py | 44 ++++++++++++++----- 5 files changed, 87 insertions(+), 51 deletions(-) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 42d4b9d05a1..2fb5d7afcbd 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -179,7 +179,7 @@ String Cluster::Address::toFullString(bool use_compact_format) const // shard_num/replica_num like in system.clusters table throw Exception("shard_num/replica_num cannot be zero", ErrorCodes::LOGICAL_ERROR); - return "shard" + std::to_string(shard_index) + "_replica" + std::to_string(replica_index); + return fmt::format("shard{}_replica{}", shard_index, replica_index); } else { @@ -199,7 +199,7 @@ Cluster::Address Cluster::Address::fromFullString(const String & full_string) const char * user_pw_end = strchr(full_string.data(), '@'); - /// parsing with the new [shard{shard_index}[_replica{replica_index}]] format + /// parsing with the new shard{shard_index}[_replica{replica_index}] format if (!user_pw_end && startsWith(full_string, "shard")) { const char * underscore = strchr(full_string.data(), '_'); @@ -401,6 +401,9 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, bool internal_replication = config.getBool(partial_prefix + ".internal_replication", false); ShardInfoInsertPathForInternalReplication insert_paths; + /// "_all_replicas" is a marker that will be replaced with all replicas + /// (for creating connections in the Distributed engine) + insert_paths.compact = fmt::format("shard{}_all_replicas", current_shard_num); for (const auto & replica_key : replica_keys) { @@ -419,20 +422,10 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, if (internal_replication) { - /// use_compact_format=0 - { - auto dir_name = replica_addresses.back().toFullString(false /* use_compact_format */); - if (!replica_addresses.back().is_local) - concatInsertPath(insert_paths.prefer_localhost_replica, dir_name); - concatInsertPath(insert_paths.no_prefer_localhost_replica, dir_name); - } - /// use_compact_format=1 - { - auto dir_name = replica_addresses.back().toFullString(true /* use_compact_format */); - if (!replica_addresses.back().is_local) - concatInsertPath(insert_paths.prefer_localhost_replica_compact, dir_name); - concatInsertPath(insert_paths.no_prefer_localhost_replica_compact, dir_name); - } + auto dir_name = replica_addresses.back().toFullString(/* use_compact_format= */ false); + if (!replica_addresses.back().is_local) + concatInsertPath(insert_paths.prefer_localhost_replica, dir_name); + concatInsertPath(insert_paths.no_prefer_localhost_replica, dir_name); } } else @@ -660,17 +653,17 @@ const std::string & Cluster::ShardInfo::insertPathForInternalReplication(bool pr const auto & paths = insert_path_for_internal_replication; if (!use_compact_format) { - if (prefer_localhost_replica) - return paths.prefer_localhost_replica; - else - return paths.no_prefer_localhost_replica; + const auto & path = prefer_localhost_replica ? paths.prefer_localhost_replica : paths.no_prefer_localhost_replica; + if (path.size() > NAME_MAX) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Path '{}' for async distributed INSERT is too long (exceed {} limit)", path, NAME_MAX); + } + return path; } else { - if (prefer_localhost_replica) - return paths.prefer_localhost_replica_compact; - else - return paths.no_prefer_localhost_replica_compact; + return paths.compact; } } diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index 4ca5cbaa9f2..0afc43b85b2 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -166,10 +166,8 @@ public: std::string prefer_localhost_replica; /// prefer_localhost_replica == 0 && use_compact_format_in_distributed_parts_names=0 std::string no_prefer_localhost_replica; - /// prefer_localhost_replica == 1 && use_compact_format_in_distributed_parts_names=1 - std::string prefer_localhost_replica_compact; - /// prefer_localhost_replica == 0 && use_compact_format_in_distributed_parts_names=1 - std::string no_prefer_localhost_replica_compact; + /// use_compact_format_in_distributed_parts_names=1 + std::string compact; }; struct ShardInfo diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index e8835132f8f..fe2e11c7b65 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -60,13 +60,12 @@ namespace constexpr const std::chrono::minutes decrease_error_count_period{5}; template - ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, Poco::Logger * log) + ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, Poco::Logger * log) { ConnectionPoolPtrs pools; - for (auto it = boost::make_split_iterator(name, boost::first_finder(",")); it != decltype(it){}; ++it) + auto make_connection = [&](const Cluster::Address & address) { - Cluster::Address address = Cluster::Address::fromFullString(boost::copy_range(*it)); try { pools.emplace_back(factory(address)); @@ -76,10 +75,35 @@ namespace if (e.code() == ErrorCodes::INCORRECT_FILE_NAME) { tryLogCurrentException(log); - continue; + return; } throw; } + }; + + for (auto it = boost::make_split_iterator(name, boost::first_finder(",")); it != decltype(it){}; ++it) + { + const std::string & dirname = boost::copy_range(*it); + Cluster::Address address = Cluster::Address::fromFullString(dirname); + if (address.shard_index && dirname.ends_with("_all_replicas")) + { + if (address.shard_index > shards_info.size()) + { + LOG_ERROR(log, "No shard with shard_index={} ({})", address.shard_index, name); + continue; + } + + const auto & shard_info = shards_info[address.shard_index - 1]; + size_t replicas = shard_info.per_replica_pools.size(); + + for (size_t replica_index = 1; replica_index <= replicas; ++replica_index) + { + address.replica_index = replica_index; + make_connection(address); + } + } + else + make_connection(address); } return pools; @@ -420,13 +444,13 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri const auto & shards_info = cluster->getShardsInfo(); const auto & shards_addresses = cluster->getShardsAddresses(); - /// check new format shard{shard_index}_number{replica_index} + /// check new format shard{shard_index}_replica{replica_index} /// (shard_index and replica_index starts from 1) if (address.shard_index != 0) { if (!address.replica_index) throw Exception(ErrorCodes::INCORRECT_FILE_NAME, - "Wrong replica_index ({})", address.replica_index, name); + "Wrong replica_index={} ({})", address.replica_index, name); if (address.shard_index > shards_info.size()) throw Exception(ErrorCodes::INCORRECT_FILE_NAME, @@ -475,7 +499,7 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri address.secure); }; - auto pools = createPoolsForAddresses(name, pool_factory, storage.log); + auto pools = createPoolsForAddresses(name, pool_factory, storage.getCluster()->getShardsInfo(), storage.log); const auto settings = storage.getContext()->getSettings(); return pools.size() == 1 ? pools.front() : std::make_shared(pools, diff --git a/tests/integration/test_distributed_format/configs/remote_servers.xml b/tests/integration/test_distributed_format/configs/remote_servers.xml index 5c86713bd78..87eaea50a8b 100644 --- a/tests/integration/test_distributed_format/configs/remote_servers.xml +++ b/tests/integration/test_distributed_format/configs/remote_servers.xml @@ -1,19 +1,20 @@ - + + true not_existing 9000 - + - + not_existing 9000 - + diff --git a/tests/integration/test_distributed_format/test.py b/tests/integration/test_distributed_format/test.py index 22054077544..d6e1cc03fa8 100644 --- a/tests/integration/test_distributed_format/test.py +++ b/tests/integration/test_distributed_format/test.py @@ -1,16 +1,27 @@ -import pytest +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=line-too-long +import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node = cluster.add_instance('node', main_configs=['configs/remote_servers.xml']) cluster_param = pytest.mark.parametrize("cluster", [ - ('test_cluster'), - ('test_cluster_2'), + ('test_cluster_internal_replication'), + ('test_cluster_no_internal_replication'), ]) +def get_dist_path(cluster, table, dist_format): + if dist_format == 0: + return f'/var/lib/clickhouse/data/test/{table}/default@not_existing:9000' + if cluster == 'test_cluster_internal_replication': + return f'/var/lib/clickhouse/data/test/{table}/shard1_all_replicas' + return f'/var/lib/clickhouse/data/test/{table}/shard1_replica1' + + @pytest.fixture(scope="module") def started_cluster(): try: @@ -29,13 +40,16 @@ def test_single_file(started_cluster, cluster): node.query("insert into test.distr_1 values (1, 'a'), (2, 'bb'), (3, 'ccc')", settings={"use_compact_format_in_distributed_parts_names": "1"}) - query = "select * from file('/var/lib/clickhouse/data/test/distr_1/shard1_replica1/1.bin', 'Distributed')" + path = get_dist_path(cluster, 'distr_1', 1) + query = f"select * from file('{path}/1.bin', 'Distributed')" out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '1\ta\n2\tbb\n3\tccc\n' - query = "create table t (x UInt64, s String) engine = File('Distributed', '/var/lib/clickhouse/data/test/distr_1/shard1_replica1/1.bin');" \ - "select * from t" + query = f""" + create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin'); + select * from t; + """ out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '1\ta\n2\tbb\n3\tccc\n' @@ -54,13 +68,16 @@ def test_two_files(started_cluster, cluster): "use_compact_format_in_distributed_parts_names": "1", }) - query = "select * from file('/var/lib/clickhouse/data/test/distr_2/shard1_replica1/{1,2,3,4}.bin', 'Distributed') order by x" + path = get_dist_path(cluster, 'distr_2', 1) + query = f"select * from file('{path}/{{1,2,3,4}}.bin', 'Distributed') order by x" out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '0\t_\n1\ta\n2\tbb\n3\tccc\n' - query = "create table t (x UInt64, s String) engine = File('Distributed', '/var/lib/clickhouse/data/test/distr_2/shard1_replica1/{1,2,3,4}.bin');" \ - "select * from t order by x" + query = f""" + create table t (x UInt64, s String) engine = File('Distributed', '{path}/{{1,2,3,4}}.bin'); + select * from t order by x; + """ out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '0\t_\n1\ta\n2\tbb\n3\tccc\n' @@ -76,13 +93,16 @@ def test_single_file_old(started_cluster, cluster): "use_compact_format_in_distributed_parts_names": "0", }) - query = "select * from file('/var/lib/clickhouse/data/test/distr_3/default@not_existing:9000/1.bin', 'Distributed')" + path = get_dist_path(cluster, 'distr_3', 0) + query = f"select * from file('{path}/1.bin', 'Distributed')" out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '1\ta\n2\tbb\n3\tccc\n' - query = "create table t (x UInt64, s String) engine = File('Distributed', '/var/lib/clickhouse/data/test/distr_3/default@not_existing:9000/1.bin');" \ - "select * from t" + query = f""" + create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin'); + select * from t; + """ out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) assert out == '1\ta\n2\tbb\n3\tccc\n' From a0209178cc198fd06ff5814d9f4d11e0775aa387 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 3 May 2021 10:52:45 +0300 Subject: [PATCH 298/931] Add ability to split distributed batch on failures Add distributed_directory_monitor_split_batch_on_failure setting (OFF by default), that will split the batch and send files one by one in case of retriable errors. v2: more error codes --- docs/en/operations/settings/settings.md | 21 +++ src/Common/ErrorCodes.cpp | 1 + src/Core/Settings.h | 1 + src/Storages/Distributed/DirectoryMonitor.cpp | 122 ++++++++++++++---- src/Storages/Distributed/DirectoryMonitor.h | 1 + .../__init__.py | 0 .../configs/overrides_1.xml | 15 +++ .../configs/overrides_2.xml | 15 +++ .../configs/remote_servers.xml | 18 +++ .../test.py | 60 +++++++++ 10 files changed, 227 insertions(+), 27 deletions(-) create mode 100644 tests/integration/test_distributed_directory_monitor_split_batch_on_failure/__init__.py create mode 100644 tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_1.xml create mode 100644 tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_2.xml create mode 100644 tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/remote_servers.xml create mode 100644 tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2bde3b03048..29a6948567d 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1802,6 +1802,27 @@ Possible values: Default value: 0. +## distributed_directory_monitor_split_batch_on_failure {#distributed_directory_monitor_split_batch_on_failure} + +Enables/disables splitting batches on failures. + +Sometimes sending particular batch to the remote shard may fail, because of some complex pipeline after (i.e. `MATERIALIZED VIEW` with `GROUP BY`) due to `Memory limit exceeded` or similar errors. In this case, retrying will not help (and this will stuck distributed sends for the table) but sending files from that batch one by one may succeed INSERT. + +So installing this setting to `1` will disable batching for such batches (i.e. temporary disables `distributed_directory_monitor_batch_inserts` for failed batches). + +Possible values: + +- 1 — Enabled. +- 0 — Disabled. + +Default value: 0. + +!!! note "Note" + This setting also affects broken batches (that may appears because of abnormal server (machine) termination and no `fsync_after_insert`/`fsync_directories` for [Distributed](../../engines/table-engines/special/distributed.md) table engine). + +!!! warning "Warning" + You should not rely on automatic batch splitting, since this may hurt performance. + ## os_thread_priority {#setting-os-thread-priority} Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index be26997d8ff..2b3df9ea96a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -555,6 +555,7 @@ M(585, CANNOT_PARSE_YAML) \ M(586, CANNOT_CREATE_FILE) \ M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \ + M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 84e7500b064..e660dc7caea 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -90,6 +90,7 @@ class IColumn; M(Milliseconds, distributed_directory_monitor_max_sleep_time_ms, 30000, "Maximum sleep time for StorageDistributed DirectoryMonitors, it limits exponential growth too.", 0) \ \ M(Bool, distributed_directory_monitor_batch_inserts, false, "Should StorageDistributed DirectoryMonitors try to batch individual inserts into bigger ones.", 0) \ + M(Bool, distributed_directory_monitor_split_batch_on_failure, false, "Should StorageDistributed DirectoryMonitors try to split batch into smaller in case of failures.", 0) \ \ M(Bool, optimize_move_to_prewhere, true, "Allows disabling WHERE to PREWHERE optimization in SELECT queries from MergeTree.", 0) \ \ diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index e8835132f8f..15a097a5ab9 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -52,6 +52,14 @@ namespace ErrorCodes extern const int ATTEMPT_TO_READ_AFTER_EOF; extern const int EMPTY_DATA_PASSED; extern const int INCORRECT_FILE_NAME; + extern const int MEMORY_LIMIT_EXCEEDED; + extern const int DISTRIBUTED_BROKEN_BATCH_INFO; + extern const int TOO_MANY_PARTS; + extern const int TOO_MANY_BYTES; + extern const int TOO_MANY_ROWS_OR_BYTES; + extern const int TOO_MANY_PARTITIONS; + extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES; + extern const int ARGUMENT_OUT_OF_BOUND; } @@ -203,9 +211,25 @@ namespace || code == ErrorCodes::CANNOT_READ_ALL_DATA || code == ErrorCodes::UNKNOWN_CODEC || code == ErrorCodes::CANNOT_DECOMPRESS + || code == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_INFO || (!remote_error && code == ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF); } + /// Can the batch be split and send files from batch one-by-one instead? + bool isSplittableErrorCode(int code) + { + return code == ErrorCodes::MEMORY_LIMIT_EXCEEDED + /// FunctionRange::max_elements and similar + || code == ErrorCodes::ARGUMENT_OUT_OF_BOUND + || code == ErrorCodes::TOO_MANY_PARTS + || code == ErrorCodes::TOO_MANY_BYTES + || code == ErrorCodes::TOO_MANY_ROWS_OR_BYTES + || code == ErrorCodes::TOO_MANY_PARTITIONS + || code == ErrorCodes::DISTRIBUTED_TOO_MANY_PENDING_BYTES + || code == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_INFO + ; + } + SyncGuardPtr getDirectorySyncGuard(bool dir_fsync, const DiskPtr & disk, const String & path) { if (dir_fsync) @@ -295,6 +319,7 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( , relative_path(relative_path_) , path(fs::path(disk->getPath()) / relative_path / "") , should_batch_inserts(storage.getContext()->getSettingsRef().distributed_directory_monitor_batch_inserts) + , split_batch_on_failure(storage.getContext()->getSettingsRef().distributed_directory_monitor_split_batch_on_failure) , dir_fsync(storage.getDistributedSettingsRef().fsync_directories) , min_batched_block_size_rows(storage.getContext()->getSettingsRef().min_insert_block_size_rows) , min_batched_block_size_bytes(storage.getContext()->getSettingsRef().min_insert_block_size_bytes) @@ -618,6 +643,7 @@ struct StorageDistributedDirectoryMonitor::Batch StorageDistributedDirectoryMonitor & parent; const std::map & file_index_to_path; + bool split_batch_on_failure = true; bool fsync = false; bool dir_fsync = false; @@ -626,6 +652,7 @@ struct StorageDistributedDirectoryMonitor::Batch const std::map & file_index_to_path_) : parent(parent_) , file_index_to_path(file_index_to_path_) + , split_batch_on_failure(parent.split_batch_on_failure) , fsync(parent.storage.getDistributedSettingsRef().fsync_after_insert) , dir_fsync(parent.dir_fsync) {} @@ -681,35 +708,20 @@ struct StorageDistributedDirectoryMonitor::Batch bool batch_broken = false; try { - std::unique_ptr remote; - - for (UInt64 file_idx : file_indices) + try { - auto file_path = file_index_to_path.find(file_idx); - if (file_path == file_index_to_path.end()) - { - LOG_ERROR(parent.log, "Failed to send batch: file with index {} is absent", file_idx); - batch_broken = true; - break; - } - - ReadBufferFromFile in(file_path->second); - const auto & distributed_header = readDistributedHeader(in, parent.log); - - if (!remote) - { - remote = std::make_unique(*connection, timeouts, - distributed_header.insert_query, - distributed_header.insert_settings, - distributed_header.client_info); - remote->writePrefix(); - } - bool compression_expected = connection->getCompression() == Protocol::Compression::Enable; - writeRemoteConvert(distributed_header, *remote, compression_expected, in, parent.log); + sendBatch(*connection, timeouts); + } + catch (const Exception & e) + { + if (split_batch_on_failure && isSplittableErrorCode(e.code())) + { + tryLogCurrentException(parent.log, "Trying to split batch due to"); + sendSeparateFiles(*connection, timeouts); + } + else + throw; } - - if (remote) - remote->writeSuffix(); } catch (Exception & e) { @@ -773,6 +785,62 @@ struct StorageDistributedDirectoryMonitor::Batch } recovered = true; } + +private: + void sendBatch(Connection & connection, const ConnectionTimeouts & timeouts) + { + std::unique_ptr remote; + + for (UInt64 file_idx : file_indices) + { + auto file_path = file_index_to_path.find(file_idx); + if (file_path == file_index_to_path.end()) + throw Exception(ErrorCodes::DISTRIBUTED_BROKEN_BATCH_INFO, + "Failed to send batch: file with index {} is absent", file_idx); + + ReadBufferFromFile in(file_path->second); + const auto & distributed_header = readDistributedHeader(in, parent.log); + + if (!remote) + { + remote = std::make_unique(connection, timeouts, + distributed_header.insert_query, + distributed_header.insert_settings, + distributed_header.client_info); + remote->writePrefix(); + } + bool compression_expected = connection.getCompression() == Protocol::Compression::Enable; + writeRemoteConvert(distributed_header, *remote, compression_expected, in, parent.log); + } + + if (remote) + remote->writeSuffix(); + } + + void sendSeparateFiles(Connection & connection, const ConnectionTimeouts & timeouts) + { + for (UInt64 file_idx : file_indices) + { + auto file_path = file_index_to_path.find(file_idx); + if (file_path == file_index_to_path.end()) + { + LOG_ERROR(parent.log, "Failed to send one file from batch: file with index {} is absent", file_idx); + continue; + } + + ReadBufferFromFile in(file_path->second); + const auto & distributed_header = readDistributedHeader(in, parent.log); + + RemoteBlockOutputStream remote(connection, timeouts, + distributed_header.insert_query, + distributed_header.insert_settings, + distributed_header.client_info); + remote.writePrefix(); + bool compression_expected = connection.getCompression() == Protocol::Compression::Enable; + writeRemoteConvert(distributed_header, remote, compression_expected, in, parent.log); + remote.writeSuffix(); + } + } }; class DirectoryMonitorBlockInputStream : public IBlockInputStream diff --git a/src/Storages/Distributed/DirectoryMonitor.h b/src/Storages/Distributed/DirectoryMonitor.h index ab9b8592294..c04c49f3b9b 100644 --- a/src/Storages/Distributed/DirectoryMonitor.h +++ b/src/Storages/Distributed/DirectoryMonitor.h @@ -92,6 +92,7 @@ private: std::string path; const bool should_batch_inserts = false; + const bool split_batch_on_failure = true; const bool dir_fsync = false; const size_t min_batched_block_size_rows = 0; const size_t min_batched_block_size_bytes = 0; diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/__init__.py b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_1.xml b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_1.xml new file mode 100644 index 00000000000..4e4ccf75323 --- /dev/null +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_1.xml @@ -0,0 +1,15 @@ + + + + + 0 + + 1 + + 1 + + 86400 + 86400 + + + diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_2.xml b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_2.xml new file mode 100644 index 00000000000..d7c69c4a9ac --- /dev/null +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/overrides_2.xml @@ -0,0 +1,15 @@ + + + + + 0 + + 1 + + 0 + + 86400 + 86400 + + + diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/remote_servers.xml b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/remote_servers.xml new file mode 100644 index 00000000000..ebce4697529 --- /dev/null +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py new file mode 100644 index 00000000000..9cbf8771ee5 --- /dev/null +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py @@ -0,0 +1,60 @@ +import pytest +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +# node1 -- distributed_directory_monitor_split_batch_on_failure=on +node1 = cluster.add_instance('node1', + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/overrides_1.xml'], +) +# node2 -- distributed_directory_monitor_split_batch_on_failure=off +node2 = cluster.add_instance('node2', + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/overrides_2.xml'], +) + +@pytest.fixture(scope='module') +def started_cluster(): + try: + cluster.start() + + for _, node in cluster.instances.items(): + node.query(""" + create table null_ (key Int, value Int) engine=Null(); + create table dist as null_ engine=Distributed(test_cluster, currentDatabase(), null_, key); + create table data (key Int, uniq_values Int) engine=Memory(); + create materialized view mv to data as select key, uniqExact(value) uniq_values from null_ group by key; + system stop distributed sends dist; + + create table dist_data as data engine=Distributed(test_cluster, currentDatabase(), data); + """) + + yield cluster + finally: + cluster.shutdown() + +def test_distributed_directory_monitor_split_batch_on_failure_OFF(started_cluster): + for i in range(0, 100): + limit = 100e3 + node2.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ + # max_memory_usage is the limit for the batch on the remote node + # (local query should not be affected since 30MB is enough for 100K rows) + 'max_memory_usage': '30Mi', + }) + # "Received from" is mandatory, since the exception should be thrown on the remote node. + with pytest.raises(QueryRuntimeException, match=r'DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv'): + node2.query('system flush distributed dist') + assert int(node2.query('select count() from dist_data')) == 0 + +def test_distributed_directory_monitor_split_batch_on_failure_ON(started_cluster): + for i in range(0, 100): + limit = 100e3 + node1.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ + # max_memory_usage is the limit for the batch on the remote node + # (local query should not be affected since 30MB is enough for 100K rows) + 'max_memory_usage': '30Mi', + }) + node1.query('system flush distributed dist') + assert int(node1.query('select count() from dist_data')) == 100000 From 3bd53c68f98e6b4c4fb75275289a55b9b072f864 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 3 May 2021 10:52:51 +0300 Subject: [PATCH 299/931] Try to split the batch in case of broken batch too Broken batches may be because of abnormal server shutdown (and lack of fsync), and ignoring the whole batch is not great in this case, so apply the same split logic here too. v2: rename exception v3: catch missing exception v4: fix marking the file as broken multiple times (fixes test_insert_distributed_async_send with setting enabled) --- src/Common/ErrorCodes.cpp | 1 + src/Storages/Distributed/DirectoryMonitor.cpp | 48 ++++++++++++++----- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 2b3df9ea96a..f4ceef2896a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -556,6 +556,7 @@ M(586, CANNOT_CREATE_FILE) \ M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \ M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \ + M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 15a097a5ab9..b40b73f45cd 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -54,6 +54,7 @@ namespace ErrorCodes extern const int INCORRECT_FILE_NAME; extern const int MEMORY_LIMIT_EXCEEDED; extern const int DISTRIBUTED_BROKEN_BATCH_INFO; + extern const int DISTRIBUTED_BROKEN_BATCH_FILES; extern const int TOO_MANY_PARTS; extern const int TOO_MANY_BYTES; extern const int TOO_MANY_ROWS_OR_BYTES; @@ -212,11 +213,12 @@ namespace || code == ErrorCodes::UNKNOWN_CODEC || code == ErrorCodes::CANNOT_DECOMPRESS || code == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_INFO + || code == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_FILES || (!remote_error && code == ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF); } /// Can the batch be split and send files from batch one-by-one instead? - bool isSplittableErrorCode(int code) + bool isSplittableErrorCode(int code, bool remote) { return code == ErrorCodes::MEMORY_LIMIT_EXCEEDED /// FunctionRange::max_elements and similar @@ -227,6 +229,7 @@ namespace || code == ErrorCodes::TOO_MANY_PARTITIONS || code == ErrorCodes::DISTRIBUTED_TOO_MANY_PENDING_BYTES || code == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_INFO + || isFileBrokenErrorCode(code, remote) ; } @@ -706,6 +709,7 @@ struct StorageDistributedDirectoryMonitor::Batch auto connection = parent.pool->get(timeouts); bool batch_broken = false; + bool batch_marked_as_broken = false; try { try @@ -714,7 +718,7 @@ struct StorageDistributedDirectoryMonitor::Batch } catch (const Exception & e) { - if (split_batch_on_failure && isSplittableErrorCode(e.code())) + if (split_batch_on_failure && isSplittableErrorCode(e.code(), e.isRemoteException())) { tryLogCurrentException(parent.log, "Trying to split batch due to"); sendSeparateFiles(*connection, timeouts); @@ -729,6 +733,8 @@ struct StorageDistributedDirectoryMonitor::Batch { tryLogCurrentException(parent.log, "Failed to send batch due to"); batch_broken = true; + if (!e.isRemoteException() && e.code() == ErrorCodes::DISTRIBUTED_BROKEN_BATCH_FILES) + batch_marked_as_broken = true; } else { @@ -749,7 +755,7 @@ struct StorageDistributedDirectoryMonitor::Batch for (UInt64 file_index : file_indices) parent.markAsSend(file_index_to_path.at(file_index)); } - else + else if (!batch_marked_as_broken) { LOG_ERROR(parent.log, "Marking a batch of {} files as broken.", file_indices.size()); @@ -819,27 +825,43 @@ private: void sendSeparateFiles(Connection & connection, const ConnectionTimeouts & timeouts) { + size_t broken_files = 0; + for (UInt64 file_idx : file_indices) { auto file_path = file_index_to_path.find(file_idx); if (file_path == file_index_to_path.end()) { LOG_ERROR(parent.log, "Failed to send one file from batch: file with index {} is absent", file_idx); + ++broken_files; continue; } - ReadBufferFromFile in(file_path->second); - const auto & distributed_header = readDistributedHeader(in, parent.log); + try + { + ReadBufferFromFile in(file_path->second); + const auto & distributed_header = readDistributedHeader(in, parent.log); - RemoteBlockOutputStream remote(connection, timeouts, - distributed_header.insert_query, - distributed_header.insert_settings, - distributed_header.client_info); - remote.writePrefix(); - bool compression_expected = connection.getCompression() == Protocol::Compression::Enable; - writeRemoteConvert(distributed_header, remote, compression_expected, in, parent.log); - remote.writeSuffix(); + RemoteBlockOutputStream remote(connection, timeouts, + distributed_header.insert_query, + distributed_header.insert_settings, + distributed_header.client_info); + remote.writePrefix(); + bool compression_expected = connection.getCompression() == Protocol::Compression::Enable; + writeRemoteConvert(distributed_header, remote, compression_expected, in, parent.log); + remote.writeSuffix(); + } + catch (Exception & e) + { + e.addMessage(fmt::format("While sending {}", file_path->second)); + parent.maybeMarkAsBroken(file_path->second, e); + ++broken_files; + } } + + if (broken_files) + throw Exception(ErrorCodes::DISTRIBUTED_BROKEN_BATCH_FILES, + "Failed to send {} files", broken_files); } }; From 353a770a870b5604f6c15e07270f4789f705fedb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 4 May 2021 08:59:09 +0300 Subject: [PATCH 300/931] Extend test_insert_distributed_async_send for distributed_directory_monitor_split_batch_on_failure --- .../configs/remote_servers_split.xml | 32 ++++++++++++ .../configs/users.d/split.xml | 7 +++ .../test.py | 50 ++++++++++++++----- 3 files changed, 77 insertions(+), 12 deletions(-) create mode 100644 tests/integration/test_insert_distributed_async_send/configs/remote_servers_split.xml create mode 100644 tests/integration/test_insert_distributed_async_send/configs/users.d/split.xml diff --git a/tests/integration/test_insert_distributed_async_send/configs/remote_servers_split.xml b/tests/integration/test_insert_distributed_async_send/configs/remote_servers_split.xml new file mode 100644 index 00000000000..e2757bbc18c --- /dev/null +++ b/tests/integration/test_insert_distributed_async_send/configs/remote_servers_split.xml @@ -0,0 +1,32 @@ + + + + + false + + n3 + 9000 + + + n4 + 9000 + + + + + + + n3 + 9000 + + + + + n4 + 9000 + + + + + + diff --git a/tests/integration/test_insert_distributed_async_send/configs/users.d/split.xml b/tests/integration/test_insert_distributed_async_send/configs/users.d/split.xml new file mode 100644 index 00000000000..bf826629685 --- /dev/null +++ b/tests/integration/test_insert_distributed_async_send/configs/users.d/split.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_insert_distributed_async_send/test.py b/tests/integration/test_insert_distributed_async_send/test.py index b469da4e2e1..a9bf9801f4c 100644 --- a/tests/integration/test_insert_distributed_async_send/test.py +++ b/tests/integration/test_insert_distributed_async_send/test.py @@ -17,11 +17,29 @@ n1 = cluster.add_instance('n1', main_configs=['configs/remote_servers.xml'], use # n2 -- distributed_directory_monitor_batch_inserts=0 n2 = cluster.add_instance('n2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.d/no_batch.xml']) +# n3 -- distributed_directory_monitor_batch_inserts=1/distributed_directory_monitor_split_batch_on_failure=1 +n3 = cluster.add_instance('n3', main_configs=['configs/remote_servers_split.xml'], user_configs=[ + 'configs/users.d/batch.xml', + 'configs/users.d/split.xml', +]) +# n4 -- distributed_directory_monitor_batch_inserts=0/distributed_directory_monitor_split_batch_on_failure=1 +n4 = cluster.add_instance('n4', main_configs=['configs/remote_servers_split.xml'], user_configs=[ + 'configs/users.d/no_batch.xml', + 'configs/users.d/split.xml', +]) + batch_params = pytest.mark.parametrize('batch', [ (1), (0), ]) +batch_and_split_params = pytest.mark.parametrize('batch,split', [ + (1, 0), + (0, 0), + (1, 1), + (0, 1), +]) + @pytest.fixture(scope='module', autouse=True) def start_cluster(): try: @@ -62,15 +80,19 @@ def insert_data(node): assert size > 1<<16 return size -def get_node(batch): +def get_node(batch, split=None): + if split: + if batch: + return n3 + return n4 if batch: return n1 return n2 -def bootstrap(batch): +def bootstrap(batch, split=None): drop_tables() create_tables('insert_distributed_async_send_cluster_two_replicas') - return insert_data(get_node(batch)) + return insert_data(get_node(batch, split)) def get_path_to_dist_batch(file='2.bin'): # There are: @@ -80,8 +102,8 @@ def get_path_to_dist_batch(file='2.bin'): # @return the file for the n2 shard return f'/var/lib/clickhouse/data/default/dist/shard1_replica2/{file}' -def check_dist_after_corruption(truncate, batch): - node = get_node(batch) +def check_dist_after_corruption(truncate, batch, split=None): + node = get_node(batch, split) if batch: # In batch mode errors are ignored @@ -102,8 +124,12 @@ def check_dist_after_corruption(truncate, batch): broken = get_path_to_dist_batch('broken') node.exec_in_container(['bash', '-c', f'ls {broken}/2.bin']) - assert int(n1.query('SELECT count() FROM data')) == 10000 - assert int(n2.query('SELECT count() FROM data')) == 0 + if split: + assert int(n3.query('SELECT count() FROM data')) == 10000 + assert int(n4.query('SELECT count() FROM data')) == 0 + else: + assert int(n1.query('SELECT count() FROM data')) == 10000 + assert int(n2.query('SELECT count() FROM data')) == 0 @batch_params @@ -114,17 +140,17 @@ def test_insert_distributed_async_send_success(batch): assert int(n1.query('SELECT count() FROM data')) == 10000 assert int(n2.query('SELECT count() FROM data')) == 10000 -@batch_params -def test_insert_distributed_async_send_truncated_1(batch): - size = bootstrap(batch) +@batch_and_split_params +def test_insert_distributed_async_send_truncated_1(batch, split): + size = bootstrap(batch, split) path = get_path_to_dist_batch() - node = get_node(batch) + node = get_node(batch, split) new_size = size - 10 # we cannot use truncate, due to hardlinks node.exec_in_container(['bash', '-c', f'mv {path} /tmp/bin && head -c {new_size} /tmp/bin > {path}']) - check_dist_after_corruption(True, batch) + check_dist_after_corruption(True, batch, split) @batch_params def test_insert_distributed_async_send_truncated_2(batch): From ecb766a5c647da8c68fbfe93579fb75179fdb7fd Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 03:15:11 +0300 Subject: [PATCH 301/931] En docs --- docs/en/operations/settings/settings.md | 15 ++++-- .../aggregate-functions/reference/count.md | 2 + docs/en/sql-reference/data-types/map.md | 48 +++++++++++++++++++ .../functions/array-functions.md | 6 +++ .../functions/tuple-map-functions.md | 4 ++ docs/en/sql-reference/operators/index.md | 4 ++ 6 files changed, 76 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index de7f734e8e0..1fd951383c8 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1729,16 +1729,25 @@ Default value: 0. ## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} -Optimizes functions (if possible) to subcolumns to reduce amount of read data. +Optimizes functions (if possible) by transforming them to read the subcolumns. This reduces the amount of read data. -- +These function can be tranformed: + +- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +- [isNotNull](../../sql-reference/operators.md#is-not-null#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +- [count](../../sql-reference/aggregate-functions/reference/count.md) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). +- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). Possible values: - 0 — Disabled. - 1 — Enabled. -Default value: `value`. +Default value: `0`. ## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life} diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 48c6f3f8c05..a3d1fcdbf5c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -31,6 +31,8 @@ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this const The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). + **Examples** Example 1: diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index 58634e5b669..10074f07cab 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -75,6 +75,54 @@ SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map └───────────────────────────────┘ ``` +## Subcolumn Map.keys {#subcolumn-keys} + +To read all keys of a `Map` you can use the subcolumn `keys`, which doesn't read the whole column. + +**Example** + +Query: + +``` sql +CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory; + +INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3)); + +SELECT a.keys FROM t_map; +``` + +Result: + +``` text +┌─a.keys─────────────────┐ +│ ['key1','key2','key3'] │ +└────────────────────────┘ +``` + +## Subcolumn Map.values {#subcolumn-keys} + +To read all values of a `Map` you can use the subcolumn `values`, which doesn't read the whole column. + +**Example** + +Query: + +``` sql +CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory; + +INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3)) + +SELECT a.values FROM t_map; +``` + +Result: + +``` text +┌─a.values─┐ +│ [1,2,3] │ +└──────────┘ +``` + **See Also** - [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 6495a26a426..822600dd52f 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -11,18 +11,24 @@ Returns 1 for an empty array, or 0 for a non-empty array. The result type is UInt8. The function also works for strings. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). + ## notEmpty {#function-notempty} Returns 0 for an empty array, or 1 for a non-empty array. The result type is UInt8. The function also works for strings. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). + ## length {#array_functions-length} Returns the number of items in the array. The result type is UInt64. The function also works for strings. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). + ## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} ## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 8b0710c0182..efede833e7a 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -220,6 +220,8 @@ Result: Returns all keys from the `map` parameter. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). + **Syntax** ```sql @@ -261,6 +263,8 @@ Result: Returns all values from the `map` parameter. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). + **Syntax** ```sql diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 268e56a5034..f45c7c7b90f 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -283,6 +283,8 @@ ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. - `0` otherwise. - For other values, the `IS NULL` operator always returns `0`. +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). + ``` sql @@ -313,3 +315,5 @@ SELECT * FROM t_null WHERE y IS NOT NULL │ 2 │ 3 │ └───┴───┘ ``` + +Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). From 7cc4fa3696b55ac8bfcf4557105b8e834112aa42 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 03:25:28 +0300 Subject: [PATCH 302/931] fixed links --- docs/en/sql-reference/aggregate-functions/reference/count.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index a3d1fcdbf5c..2d6d931866d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -31,7 +31,7 @@ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this const The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by setting [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../../sql-reference/data-types/nullable.md#finding-null). **Examples** From 4e22692512ea8f4617616d3bed7c662e71211103 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 03:26:08 +0300 Subject: [PATCH 303/931] fixed links --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1fd951383c8..47ac756d6b8 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1737,7 +1737,7 @@ These function can be tranformed: - [empty](../../sql-reference/functions/array-functions.md#function-empty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). - [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). - [isNull](../../sql-reference/operators/index.md#operator-is-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). -- [isNotNull](../../sql-reference/operators.md#is-not-null#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +- [isNotNull](../../sql-reference/operators/index.md#is-not-null#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). - [count](../../sql-reference/aggregate-functions/reference/count.md) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). - [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). - [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). From e7fe155e987f8e3b964e05b15fb81692c66c816f Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 03:58:24 +0300 Subject: [PATCH 304/931] Added the articles --- .../en/sql-reference/aggregate-functions/reference/count.md | 2 +- docs/en/sql-reference/functions/array-functions.md | 6 +++--- docs/en/sql-reference/functions/tuple-map-functions.md | 4 ++-- docs/en/sql-reference/operators/index.md | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 2d6d931866d..6f55d3b5cee 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -31,7 +31,7 @@ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this const The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. -Can be optimized by setting [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../../sql-reference/data-types/nullable.md#finding-null). **Examples** diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 822600dd52f..10b8500b571 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -11,7 +11,7 @@ Returns 1 for an empty array, or 0 for a non-empty array. The result type is UInt8. The function also works for strings. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). ## notEmpty {#function-notempty} @@ -19,7 +19,7 @@ Returns 0 for an empty array, or 1 for a non-empty array. The result type is UInt8. The function also works for strings. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). ## length {#array_functions-length} @@ -27,7 +27,7 @@ Returns the number of items in the array. The result type is UInt64. The function also works for strings. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). ## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index efede833e7a..2deb9323cff 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -220,7 +220,7 @@ Result: Returns all keys from the `map` parameter. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). +Can be optimized by setting the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). **Syntax** @@ -263,7 +263,7 @@ Result: Returns all values from the `map` parameter. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). **Syntax** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index f45c7c7b90f..1cb7936969c 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -283,7 +283,7 @@ ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. - `0` otherwise. - For other values, the `IS NULL` operator always returns `0`. -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). @@ -316,4 +316,4 @@ SELECT * FROM t_null WHERE y IS NOT NULL └───┴───┘ ``` -Can be optimized by setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). From fa3d08420fbeb37bf7b4142cde7d3e3e02a2cd3f Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 04:04:20 +0300 Subject: [PATCH 305/931] Unrelated fix --- docs/en/sql-reference/data-types/map.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index 10074f07cab..dc1a9846d22 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -8,6 +8,7 @@ toc_title: Map(key, value) `Map(key, value)` data type stores `key:value` pairs. **Parameters** + - `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md). - `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md). From 50d9d3efd5d0275556838719cc52d8e0147ed245 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 22 Jun 2021 10:07:22 +0300 Subject: [PATCH 306/931] Remove only symlinks during force_restore_data of Atomic engine --- src/Databases/DatabaseAtomic.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 6b8c470861d..4b9d84de282 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -25,6 +25,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int FILE_ALREADY_EXISTS; extern const int INCORRECT_QUERY; + extern const int ABORTED; } class AtomicDatabaseTablesSnapshotIterator final : public DatabaseTablesSnapshotIterator @@ -420,7 +421,18 @@ void DatabaseAtomic::loadStoredObjects(ContextMutablePtr local_context, bool has { /// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken if (has_force_restore_data_flag) - fs::remove_all(path_to_table_symlinks); + { + for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks)) + { + if (!fs::is_symlink(table_path)) + { + throw Exception(ErrorCodes::ABORTED, + "'{}' is not a symlink. Atomic database should contains only symlinks.", std::string(table_path.path())); + } + + fs::remove(table_path); + } + } DatabaseOrdinary::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach); From ee5b0c2c344fa1e0d7bf0201c84360e2758d540f Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 23 Jun 2021 12:28:18 +0300 Subject: [PATCH 307/931] fix test --- tests/queries/0_stateless/00505_secure.reference | 1 - tests/queries/0_stateless/00505_secure.sh | 15 --------------- tests/queries/shell_config.sh | 2 ++ 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/tests/queries/0_stateless/00505_secure.reference b/tests/queries/0_stateless/00505_secure.reference index 73bdbdbafbe..9a8656bf491 100644 --- a/tests/queries/0_stateless/00505_secure.reference +++ b/tests/queries/0_stateless/00505_secure.reference @@ -1,4 +1,3 @@ -1 2 3 4 diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh index 3d9e28ba08d..c1113af761b 100755 --- a/tests/queries/0_stateless/00505_secure.sh +++ b/tests/queries/0_stateless/00505_secure.sh @@ -6,21 +6,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -# Not default server config needed - - -if [ -n "$CLICKHOUSE_CONFIG_CLIENT" ]; then - USE_CONFIG="--config-file $CLICKHOUSE_CONFIG_CLIENT" -fi - -CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:="$CLICKHOUSE_CLIENT_BINARY $USE_CONFIG --secure --port=$CLICKHOUSE_PORT_TCP_SECURE"} -if [[ $CLICKHOUSE_CLIENT != *"--port"* ]]; then - # Auto port detect. Cant test with re-defined via command line ports - $CLICKHOUSE_CLIENT_BINARY $USE_CONFIG --secure -q "SELECT 1"; -else - echo 1 -fi - $CLICKHOUSE_CLIENT_SECURE -q "SELECT 2;" #disable test diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index b7a3701c326..a13b087c477 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -73,6 +73,8 @@ export CLICKHOUSE_PORT_MYSQL=${CLICKHOUSE_PORT_MYSQL:="9004"} export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=postgresql_port 2>/dev/null)} 2>/dev/null export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:="9005"} +CLICKHOUSE_CLIENT_SECURE=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'/'"--secure --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g') + # Add database and log comment to url params if [ -v CLICKHOUSE_URL_PARAMS ] then From a5d3600f202d70aaf62231fbf231325270c7f881 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 18 Jun 2021 10:07:53 +0300 Subject: [PATCH 308/931] Fix compile errors with WriteBuffer --- src/Functions/DummyJSONParser.h | 6 ++++-- src/Functions/FunctionSQLJSON.h | 4 ++-- src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp | 3 ++- src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp | 3 +-- src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp | 4 +--- src/Functions/SimdJSONParser.h | 6 +++++- 6 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index 01fdab1abb6..128ee88e0ca 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -2,6 +2,8 @@ #include #include +#include +#include namespace DB { @@ -40,7 +42,7 @@ struct DummyJSONParser Array getArray() const { return {}; } Object getObject() const { return {}; } - Element getElement() { return {}; } + ALWAYS_INLINE Element getUnderlyingElement() const { return {}; } }; /// References an array in a JSON document. @@ -99,7 +101,7 @@ struct DummyJSONParser #endif }; -inline ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, DummyJSONParser::Element) +inline ALWAYS_INLINE WriteBufferFromString& operator<<(WriteBufferFromString& out, const DB::DummyJSONParser::Element &) { return out; } diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 9bfb4291ba8..3ff1b575bfc 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -283,7 +283,7 @@ public: String result; WriteBufferFromString out(result); - out << current_element.getElement(); + out << current_element; ColumnString & col_str = assert_cast(dest); col_str.insertData(result.data(), result.size()); return true; @@ -324,7 +324,7 @@ public: out << ", "; } success = true; - out << current_element.getElement(); + out << current_element; } else if (status == VisitorStatus::Error) { diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index f8496cd67d0..bc153b9d747 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -55,7 +55,8 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte } else if (pos->type == TokenType::BareWord) { - if (!ParserKeyword("TO").ignore(pos, expected)) { + if (!ParserKeyword("TO").ignore(pos, expected)) + { return false; } if (!number_p.parse(pos, number_ptr, expected)) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp index a67d284e40c..86cf793fb52 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRoot.cpp @@ -19,8 +19,7 @@ bool ParserJSONPathRoot::parseImpl(Pos & pos, ASTPtr & node, Expected & expected expected.add(pos, "dollar sign (start of jsonpath)"); return false; } - auto path_root = std::make_shared(); - node = path_root; + node = std::make_shared(); ++pos; return true; } diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp index c0d2b376794..97ab9ffec36 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp @@ -6,7 +6,6 @@ namespace DB { bool ParserJSONPathStar::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - if (pos->type != TokenType::OpeningSquareBracket) { return false; @@ -22,8 +21,7 @@ bool ParserJSONPathStar::parseImpl(Pos & pos, ASTPtr & node, Expected & expected } ++pos; - auto star = std::make_shared(); - node = star; + node = std::make_shared(); return true; } diff --git a/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h index c5793088baf..a176f2c5961 100644 --- a/src/Functions/SimdJSONParser.h +++ b/src/Functions/SimdJSONParser.h @@ -50,7 +50,7 @@ struct SimdJSONParser ALWAYS_INLINE Array getArray() const; ALWAYS_INLINE Object getObject() const; - ALWAYS_INLINE simdjson::dom::element getElement() const { return element; } + ALWAYS_INLINE simdjson::dom::element getUnderlyingElement() const { return element; } private: simdjson::dom::element element; @@ -165,6 +165,10 @@ inline ALWAYS_INLINE SimdJSONParser::Object SimdJSONParser::Element::getObject() return element.get_object().value_unsafe(); } +inline ALWAYS_INLINE WriteBuffer& operator<<(WriteBuffer& out, const DB::SimdJSONParser::Element & element) { + return out << element.getUnderlyingElement(); +} + } #endif From c6d877d10bdf6e3c4f0b76547f050ccaf49b34b1 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 23 Jun 2021 11:08:58 +0300 Subject: [PATCH 309/931] Add tests for checking access rights for SELECT. --- .../test_select_access_rights/test.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tests/integration/test_select_access_rights/test.py b/tests/integration/test_select_access_rights/test.py index 213df529ef7..0272eac5fa1 100644 --- a/tests/integration/test_select_access_rights/test.py +++ b/tests/integration/test_select_access_rights/test.py @@ -177,3 +177,61 @@ def test_select_count(): instance.query("GRANT SELECT ON default.table1 TO A") assert instance.query(select_query, user = 'A') == "0\n" + + +def test_select_where(): + # User should have grants for the columns used in WHERE. + instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") + instance.query("GRANT SELECT(a) ON default.table1 TO A") + + select_query = "SELECT a FROM table1 WHERE b = 0" + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(b) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + + instance.query("REVOKE SELECT ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + + +def test_select_prewhere(): + # User should have grants for the columns used in PREWHERE. + instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") + instance.query("GRANT SELECT(a) ON default.table1 TO A") + + select_query = "SELECT a FROM table1 PREWHERE b = 0" + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(b) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + + instance.query("REVOKE SELECT ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + + +def test_select_with_row_policy(): + # Normal users should not aware of the existence of row policy filters. + instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") + instance.query("CREATE ROW POLICY pol1 ON table1 USING b = 0 TO A") + + select_query = "SELECT a FROM table1" + select_query2 = "SELECT count() FROM table1" + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query2, user = 'A') + + instance.query("GRANT SELECT(a) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + assert instance.query(select_query2, user = 'A') == "2\n" + + instance.query("REVOKE SELECT(a) ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query2, user = 'A') From ec7ec63a40605b7ba4afdd1ffb436d352b07a0d8 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Wed, 23 Jun 2021 13:22:38 +0300 Subject: [PATCH 310/931] Fix style --- src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp | 6 ++++-- src/Functions/SimdJSONParser.h | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp index 97ab9ffec36..1338a2064f1 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathStar.cpp @@ -11,11 +11,13 @@ bool ParserJSONPathStar::parseImpl(Pos & pos, ASTPtr & node, Expected & expected return false; } ++pos; - if (pos->type != TokenType::Asterisk) { + if (pos->type != TokenType::Asterisk) + { return false; } ++pos; - if (pos->type != TokenType::ClosingSquareBracket) { + if (pos->type != TokenType::ClosingSquareBracket) + { expected.add(pos, "Closing square bracket"); return false; } diff --git a/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h index a176f2c5961..c11fca3272c 100644 --- a/src/Functions/SimdJSONParser.h +++ b/src/Functions/SimdJSONParser.h @@ -165,7 +165,8 @@ inline ALWAYS_INLINE SimdJSONParser::Object SimdJSONParser::Element::getObject() return element.get_object().value_unsafe(); } -inline ALWAYS_INLINE WriteBuffer& operator<<(WriteBuffer& out, const DB::SimdJSONParser::Element & element) { +inline ALWAYS_INLINE WriteBuffer& operator<<(WriteBuffer& out, const DB::SimdJSONParser::Element & element) +{ return out << element.getUnderlyingElement(); } From f64591b2ba39a4335cd710e56f3d356c6c9fc9af Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 23 Jun 2021 13:36:48 +0300 Subject: [PATCH 311/931] Update other-functions.md --- docs/ru/sql-reference/functions/other-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 7945abb7a15..a07bd19faa1 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1384,7 +1384,7 @@ initializeAggregation (aggregate_function, arg1, arg2, ..., argN) **Аргументы** - `aggregate_function` — название агрегатной функции, состояние которой нужно создать. [String](../../sql-reference/data-types/string.md#string). -- `arg` — аргументы, которые передается агрегатную функцию. +- `arg` — аргументы, которые передаются в агрегатную функцию. **Возвращаемое значение** From 8f9166df4ea4032f22bdd72cf6e390776fe54ac5 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 12:11:13 +0300 Subject: [PATCH 312/931] Add read flag to KeyGetterForDict --- src/Interpreters/HashJoin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index fcdf6305a68..469b3cddbee 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -204,6 +204,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s if (table_join->dictionary_reader) { + LOG_DEBUG(log, "Performing join over dict"); data->type = Type::DICT; std::get(data->maps).create(Type::DICT); chooseMethod(key_columns, key_sizes); /// init key_sizes @@ -326,8 +327,9 @@ public: FindResult findKey(const TableJoin & table_join, size_t row, const Arena &) { const DictionaryReader & reader = *table_join.dictionary_reader; - if (!read_result) + if (!dictionary_read) { + dictionary_read = true; reader.readKeys(*key_columns[0], read_result, found, positions); result.block = &read_result; @@ -345,6 +347,7 @@ private: const ColumnRawPtrs & key_columns; Block read_result; Mapped result; + bool dictionary_read = false; ColumnVector::Container found; std::vector positions; }; From ea7f798de4d0ebdcbcd5b7db6d8e2ac25fa0a544 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 13:05:51 +0300 Subject: [PATCH 313/931] Add tests/performance/dict_join.xml --- tests/performance/dict_join.xml | 37 +++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 tests/performance/dict_join.xml diff --git a/tests/performance/dict_join.xml b/tests/performance/dict_join.xml new file mode 100644 index 00000000000..e12ef4abd63 --- /dev/null +++ b/tests/performance/dict_join.xml @@ -0,0 +1,37 @@ + + + CREATE TABLE join_dictionary_source_table (key UInt64, value String) + ENGINE = MergeTree ORDER BY key; + + + + CREATE DICTIONARY join_hashed_dictionary (key UInt64, value String) + PRIMARY KEY key + SOURCE(CLICKHOUSE(DB 'default' TABLE 'join_dictionary_source_table')) + LIFETIME(MIN 0 MAX 1000) + LAYOUT(HASHED()); + + + + INSERT INTO join_dictionary_source_table + SELECT number, toString(number) + FROM numbers(10000000); + + + + SELECT COUNT() + FROM join_dictionary_source_table + JOIN join_hashed_dictionary + ON join_dictionary_source_table.key = join_hashed_dictionary.key; + + + + SELECT COUNT() + FROM join_dictionary_source_table + JOIN join_hashed_dictionary + ON join_dictionary_source_table.key = toUInt64(join_hashed_dictionary.key); + + + DROP DICTIONARY IF EXISTS join_hashed_dictionary; + DROP TABLE IF EXISTS join_dictionary_source_table; + From db2285b78111d730046863a563b2c335a787ab0f Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 23 Jun 2021 14:26:28 +0300 Subject: [PATCH 314/931] Update version_date.tsv after release 21.5.7.9 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f7111cc28e4..2db3541bafe 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,6 +1,7 @@ v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 v21.6.3.14-stable 2021-06-04 +v21.5.7.9-stable 2021-06-22 v21.5.6.6-stable 2021-05-29 v21.5.5.12-stable 2021-05-20 v21.4.7.3-stable 2021-05-19 From 4ce829d7c1df4dc94cdd9cf1e918257e63990214 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 13:13:11 +0300 Subject: [PATCH 315/931] Create KeyGetter outside of joinRightColumns --- src/Interpreters/HashJoin.cpp | 70 ++++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 469b3cddbee..6e5f7df99bd 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -320,34 +320,25 @@ public: using Mapped = RowRef; using FindResult = ColumnsHashing::columns_hashing_impl::FindResultImpl; - KeyGetterForDict(const ColumnRawPtrs & key_columns_, const Sizes &, void *) - : key_columns(key_columns_) - {} - - FindResult findKey(const TableJoin & table_join, size_t row, const Arena &) + KeyGetterForDict(const TableJoin & table_join, const ColumnRawPtrs & key_columns) { - const DictionaryReader & reader = *table_join.dictionary_reader; - if (!dictionary_read) - { - dictionary_read = true; - reader.readKeys(*key_columns[0], read_result, found, positions); - result.block = &read_result; + table_join.dictionary_reader->readKeys(*key_columns[0], read_result, found, positions); - if (table_join.forceNullableRight()) - for (auto & column : read_result) - if (table_join.rightBecomeNullable(column.type)) - JoinCommon::convertColumnToNullable(column); - } + for (ColumnWithTypeAndName & column : read_result) + if (table_join.rightBecomeNullable(column.type)) + JoinCommon::convertColumnToNullable(column); + } + FindResult findKey(void *, size_t row, const Arena &) + { + result.block = &read_result; result.row_num = positions[row]; return FindResult(&result, found[row], 0); } private: - const ColumnRawPtrs & key_columns; Block read_result; Mapped result; - bool dictionary_read = false; ColumnVector::Container found; std::vector positions; }; @@ -854,6 +845,7 @@ void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unuse /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). template NO_INLINE IColumn::Filter joinRightColumns( + KeyGetter && key_getter, const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map [[maybe_unused]], @@ -883,8 +875,6 @@ NO_INLINE IColumn::Filter joinRightColumns( if constexpr (need_replication) added_columns.offsets_to_replicate = std::make_unique(rows); - auto key_getter = createKeyGetter(added_columns.key_columns, added_columns.key_sizes); - IColumn::Offset current_offset = 0; for (size_t i = 0; i < rows; ++i) @@ -983,35 +973,51 @@ NO_INLINE IColumn::Filter joinRightColumns( template IColumn::Filter joinRightColumnsSwitchNullability( - const Map & map, AddedColumns & added_columns, const ConstNullMapPtr & null_map, JoinStuff::JoinUsedFlags & used_flags) + KeyGetter && key_getter, + const Map & map, + AddedColumns & added_columns, + const ConstNullMapPtr & null_map, + JoinStuff::JoinUsedFlags & used_flags) { if (added_columns.need_filter) { if (null_map) - return joinRightColumns(map, added_columns, null_map, used_flags); + return joinRightColumns( + std::forward(key_getter), map, added_columns, null_map, used_flags); else - return joinRightColumns(map, added_columns, nullptr, used_flags); + return joinRightColumns( + std::forward(key_getter), map, added_columns, nullptr, used_flags); } else { if (null_map) - return joinRightColumns(map, added_columns, null_map, used_flags); + return joinRightColumns( + std::forward(key_getter), map, added_columns, null_map, used_flags); else - return joinRightColumns(map, added_columns, nullptr, used_flags); + return joinRightColumns( + std::forward(key_getter), map, added_columns, nullptr, used_flags); } } template IColumn::Filter switchJoinRightColumns( - const Maps & maps_, AddedColumns & added_columns, HashJoin::Type type, const ConstNullMapPtr & null_map, JoinStuff::JoinUsedFlags & used_flags) + const Maps & maps_, + AddedColumns & added_columns, + HashJoin::Type type, + const ConstNullMapPtr & null_map, + JoinStuff::JoinUsedFlags & used_flags) { + constexpr bool is_asof_join = STRICTNESS == ASTTableJoin::Strictness::Asof; switch (type) { #define M(TYPE) \ case HashJoin::Type::TYPE: \ - return joinRightColumnsSwitchNullability>::Type>(\ - *maps_.TYPE, added_columns, null_map, used_flags); + { \ + using KeyGetter = typename KeyGetterForType>::Type; \ + auto key_getter = createKeyGetter(added_columns.key_columns, added_columns.key_sizes); \ + return joinRightColumnsSwitchNullability( \ + std::move(key_getter), *maps_.TYPE, added_columns, null_map, used_flags); \ + } APPLY_FOR_JOIN_VARIANTS(M) #undef M @@ -1028,8 +1034,12 @@ IColumn::Filter dictionaryJoinRightColumns(const TableJoin & table_join, AddedCo STRICTNESS == ASTTableJoin::Strictness::Semi || STRICTNESS == ASTTableJoin::Strictness::Anti)) { + assert(added_columns.key_columns.size() == 1); + JoinStuff::JoinUsedFlags flags; - return joinRightColumnsSwitchNullability(table_join, added_columns, null_map, flags); + KeyGetterForDict key_getter(table_join, added_columns.key_columns); + return joinRightColumnsSwitchNullability( + std::move(key_getter), nullptr, added_columns, null_map, flags); } throw Exception("Logical error: wrong JOIN combination", ErrorCodes::LOGICAL_ERROR); From a924a9dbacc1bd1506d4429d7e3d677cd97a2b38 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 23 Jun 2021 14:34:44 +0300 Subject: [PATCH 316/931] Update version_date.tsv after release 21.3.13.9 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2db3541bafe..3df6b8e6616 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -9,6 +9,7 @@ v21.4.6.55-stable 2021-04-30 v21.4.5.46-stable 2021-04-24 v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 +v21.3.13.9-lts 2021-06-22 v21.3.12.2-lts 2021-05-25 v21.3.11.5-lts 2021-05-14 v21.3.10.1-lts 2021-05-09 From c5d041e9da0b4e189bb118d9879c7c41dbe8a618 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 23 Jun 2021 14:56:31 +0300 Subject: [PATCH 317/931] fix testflows --- .../snapshots/common.py.tests.snapshot | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot b/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot index d0b7b3423d8..c8b57ffdd1c 100644 --- a/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot +++ b/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot @@ -653,7 +653,7 @@ a """ Inline___Int128___arrayReduceInRanges__sum_____1__5____ = r""" -arrayReduceInRanges(\'sum\', array(tuple(1, 5)), array(toInt128(\'3\'), toInt128(\'2\'), toInt128(\'1\'))) +arrayReduceInRanges(\'sum\', array((1, 5)), array(toInt128(\'3\'), toInt128(\'2\'), toInt128(\'1\'))) [6] """ @@ -1181,7 +1181,7 @@ a """ Inline___Int256___arrayReduceInRanges__sum_____1__5____ = r""" -arrayReduceInRanges(\'sum\', array(tuple(1, 5)), array(toInt256(\'3\'), toInt256(\'2\'), toInt256(\'1\'))) +arrayReduceInRanges(\'sum\', array((1, 5)), array(toInt256(\'3\'), toInt256(\'2\'), toInt256(\'1\'))) [6] """ @@ -1709,7 +1709,7 @@ a """ Inline___UInt128___arrayReduceInRanges__sum_____1__5____ = r""" -arrayReduceInRanges(\'sum\', array(tuple(1, 5)), array(toUInt128(\'3\'), toUInt128(\'2\'), toUInt128(\'1\'))) +arrayReduceInRanges(\'sum\', array((1, 5)), array(toUInt128(\'3\'), toUInt128(\'2\'), toUInt128(\'1\'))) [6] """ @@ -2237,7 +2237,7 @@ a """ Inline___UInt256___arrayReduceInRanges__sum_____1__5____ = r""" -arrayReduceInRanges(\'sum\', array(tuple(1, 5)), array(toUInt256(\'3\'), toUInt256(\'2\'), toUInt256(\'1\'))) +arrayReduceInRanges(\'sum\', array((1, 5)), array(toUInt256(\'3\'), toUInt256(\'2\'), toUInt256(\'1\'))) [6] """ @@ -2765,7 +2765,7 @@ a """ Inline___Decimal256_0____arrayReduceInRanges__sum_____1__5____ = r""" -arrayReduceInRanges(\'sum\', array(tuple(1, 5)), array(toDecimal256(\'3\', 0), toDecimal256(\'2\', 0), toDecimal256(\'1\', 0))) +arrayReduceInRanges(\'sum\', array((1, 5)), array(toDecimal256(\'3\', 0), toDecimal256(\'2\', 0), toDecimal256(\'1\', 0))) [6] """ From abe7e4195ebce6a6a54d18f3e67d3b5712c2e602 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Wed, 23 Jun 2021 15:02:47 +0300 Subject: [PATCH 318/931] . --- src/Functions/FunctionSQLJSON.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 3ff1b575bfc..1e9b25ee508 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -283,7 +283,7 @@ public: String result; WriteBufferFromString out(result); - out << current_element; + out << current_element.getUnderlyingElement(); ColumnString & col_str = assert_cast(dest); col_str.insertData(result.data(), result.size()); return true; @@ -324,7 +324,7 @@ public: out << ", "; } success = true; - out << current_element; + out << current_element.getUnderlyingElement(); } else if (status == VisitorStatus::Error) { From c92c352d9a357cdabbff2aeb8abb53863e8a5aa7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 23 Jun 2021 15:29:10 +0300 Subject: [PATCH 319/931] Fix crash in EXPLAIN AST without query. --- src/Parsers/ParserExplainQuery.cpp | 2 ++ .../0_stateless/01604_explain_ast_of_nonselect_query.sql | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Parsers/ParserExplainQuery.cpp b/src/Parsers/ParserExplainQuery.cpp index c8d8dc10a7f..dc548164157 100644 --- a/src/Parsers/ParserExplainQuery.cpp +++ b/src/Parsers/ParserExplainQuery.cpp @@ -57,6 +57,8 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserQuery p(end); if (p.parse(pos, query, expected)) explain_query->setExplainedQuery(std::move(query)); + else + return false; } else if (select_p.parse(pos, query, expected) || create_p.parse(pos, query, expected)) diff --git a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.sql b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.sql index 413acb789e0..41939123c92 100644 --- a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.sql +++ b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.sql @@ -1 +1,2 @@ +explain ast; -- { clientError 62 } explain ast alter table t1 delete where date = today() From ce0c0c6d2d69ff5c609cdf570d6d537be459d064 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 15:34:38 +0300 Subject: [PATCH 320/931] Update 00109_shard_totals_after_having.sql --- tests/queries/0_stateless/00109_shard_totals_after_having.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00109_shard_totals_after_having.sql b/tests/queries/0_stateless/00109_shard_totals_after_having.sql index 72e5e011e19..ae143f594c5 100644 --- a/tests/queries/0_stateless/00109_shard_totals_after_having.sql +++ b/tests/queries/0_stateless/00109_shard_totals_after_having.sql @@ -3,7 +3,9 @@ SET max_block_size = 100001; SET group_by_overflow_mode = 'any'; DROP TABLE IF EXISTS numbers500k; -CREATE VIEW numbers500k AS SELECT number FROM system.numbers LIMIT 500000; +CREATE TABLE numbers500k (number UInt32) ENGINE = TinyLog; + +INSERT INTO numbers500k SELECT number FROM system.numbers LIMIT 500000; SET totals_mode = 'after_having_auto'; SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; From 541c09d7b3a9df3be8cb7cc08aa6c7db20ff4578 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Wed, 23 Jun 2021 15:03:58 +0200 Subject: [PATCH 321/931] Fix tests errors --- src/Functions/array/mapOp.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp index da394c47f80..5c2637270d5 100644 --- a/src/Functions/array/mapOp.cpp +++ b/src/Functions/array/mapOp.cpp @@ -19,7 +19,6 @@ namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } @@ -91,7 +90,7 @@ private: const DataTypeTuple * tup = checkAndGetDataType(arg.get()); if (!tup) - throw Exception{getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + throw Exception(getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); auto elems = tup->getElements(); if (elems.size() != 2) @@ -107,8 +106,8 @@ private: auto result_type = v->getNestedType(); if (!result_type->canBePromoted()) - throw Exception{ - "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + throw Exception( + "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); auto promoted_val_type = result_type->promoteNumericType(); if (!key_type) @@ -133,13 +132,13 @@ private: { const auto * map = checkAndGetDataType(arg.get()); if (!map) - throw Exception{getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + throw Exception(getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); const auto & v = map->getValueType(); if (!v->canBePromoted()) - throw Exception{ - "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + throw Exception( + "Values to be summed are expected to be Numeric, Float or Decimal.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); auto promoted_val_type = v->promoteNumericType(); if (!key_type) @@ -158,14 +157,14 @@ private: DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (arguments.size() < 2) - throw Exception{getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + throw Exception(getName() + " accepts at least two maps", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); if (arguments[0]->getTypeId() == TypeIndex::Tuple) return getReturnTypeForTuples(arguments); else if (arguments[0]->getTypeId() == TypeIndex::Map) return getReturnTypeForMaps(arguments); else - throw Exception{getName() + " only accepts maps", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + throw Exception(getName() + " only accepts maps", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } template @@ -301,9 +300,9 @@ private: case TypeIndex::Float64: return execute2(row_count, args, res_type); default: - throw Exception{ + throw Exception( "Illegal column type " + res_value_type->getName() + " for values in arguments of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } } @@ -388,7 +387,7 @@ private: } else throw Exception{ - "Illegal column type " + key_type->getName() + " in arguments of function " + getName(), + "Illegal column type " + arguments[0].type->getName() + " in arguments of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; } From fdff177823325c59ad8f9887d470d1a6d0a37d1a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 23 Jun 2021 16:29:04 +0300 Subject: [PATCH 322/931] Remove TestFlows (these are unstable third-party tests) --- tests/testflows/regression.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index c2e143a4b1c..eef6dadb4bb 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -23,14 +23,14 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): with Pool(8) as pool: try: run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args) #run_scenario(pool, tasks, Feature(test=load("kerberos.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args) finally: join(tasks) From b00efaf3d1b0d9e2341e8b50f523573926a1b614 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 17:03:39 +0300 Subject: [PATCH 323/931] Add materialized columns to joined columns --- src/Interpreters/JoinedTables.cpp | 2 +- src/Interpreters/TreeRewriter.cpp | 9 ++- src/Interpreters/getTableExpressions.cpp | 66 ++++++++----------- src/Interpreters/getTableExpressions.h | 12 +++- .../01925_join_materialized_columns.reference | 7 ++ .../01925_join_materialized_columns.sql | 14 ++++ 6 files changed, 67 insertions(+), 43 deletions(-) create mode 100644 tests/queries/0_stateless/01925_join_materialized_columns.reference create mode 100644 tests/queries/0_stateless/01925_join_materialized_columns.sql diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 45466ae5ca1..5e53074d24f 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -187,7 +187,7 @@ StoragePtr JoinedTables::getLeftTableStorage() bool JoinedTables::resolveTables() { - tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context); + tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, true); if (tables_with_columns.size() != table_expressions.size()) throw Exception("Unexpected tables count", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 76093a14d45..1f94cda6b0f 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -896,9 +896,14 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (tables_with_columns.size() > 1) { - result.analyzed_join->columns_from_joined_table = tables_with_columns[1].columns; + const auto & right_table = tables_with_columns[1]; + auto & cols_from_joined = result.analyzed_join->columns_from_joined_table; + cols_from_joined = right_table.columns; + cols_from_joined.insert( + cols_from_joined.end(), right_table.materialized_columns.begin(), right_table.materialized_columns.end()); + result.analyzed_join->deduplicateAndQualifyColumnNames( - source_columns_set, tables_with_columns[1].table.getQualifiedNamePrefix()); + source_columns_set, right_table.table.getQualifiedNamePrefix()); } translateQualifiedNames(query, *select_query, source_columns_set, tables_with_columns); diff --git a/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp index 22eb307071c..f7d82a8f599 100644 --- a/src/Interpreters/getTableExpressions.cpp +++ b/src/Interpreters/getTableExpressions.cpp @@ -113,50 +113,42 @@ static NamesAndTypesList getColumnsFromTableExpression( return names_and_type_list; } -NamesAndTypesList getColumnsFromTableExpression(const ASTTableExpression & table_expression, ContextPtr context) -{ - NamesAndTypesList materialized; - NamesAndTypesList aliases; - NamesAndTypesList virtuals; - return getColumnsFromTableExpression(table_expression, context, materialized, aliases, virtuals); -} - -TablesWithColumns getDatabaseAndTablesWithColumns(const std::vector & table_expressions, ContextPtr context) +TablesWithColumns getDatabaseAndTablesWithColumns( + const ASTTableExprConstPtrs & table_expressions, + ContextPtr context, + bool add_materialized) { TablesWithColumns tables_with_columns; - if (!table_expressions.empty()) + String current_database = context->getCurrentDatabase(); + bool include_alias_cols = context->getSettingsRef().asterisk_include_alias_columns; + bool include_materialized_cols = add_materialized || context->getSettingsRef().asterisk_include_materialized_columns; + + for (const ASTTableExpression * table_expression : table_expressions) { - String current_database = context->getCurrentDatabase(); - bool include_alias_cols = context->getSettingsRef().asterisk_include_alias_columns; - bool include_materialized_cols = context->getSettingsRef().asterisk_include_materialized_columns; + NamesAndTypesList materialized; + NamesAndTypesList aliases; + NamesAndTypesList virtuals; + NamesAndTypesList names_and_types = getColumnsFromTableExpression(*table_expression, context, materialized, aliases, virtuals); - for (const ASTTableExpression * table_expression : table_expressions) + removeDuplicateColumns(names_and_types); + + tables_with_columns.emplace_back( + DatabaseAndTableWithAlias(*table_expression, current_database), names_and_types); + + auto & table = tables_with_columns.back(); + table.addHiddenColumns(materialized); + table.addHiddenColumns(aliases); + table.addHiddenColumns(virtuals); + + if (include_alias_cols) { - NamesAndTypesList materialized; - NamesAndTypesList aliases; - NamesAndTypesList virtuals; - NamesAndTypesList names_and_types = getColumnsFromTableExpression(*table_expression, context, materialized, aliases, virtuals); + table.addAliasColumns(aliases); + } - removeDuplicateColumns(names_and_types); - - tables_with_columns.emplace_back( - DatabaseAndTableWithAlias(*table_expression, current_database), names_and_types); - - auto & table = tables_with_columns.back(); - table.addHiddenColumns(materialized); - table.addHiddenColumns(aliases); - table.addHiddenColumns(virtuals); - - if (include_alias_cols) - { - table.addAliasColumns(aliases); - } - - if (include_materialized_cols) - { - table.addMaterializedColumns(materialized); - } + if (include_materialized_cols) + { + table.addMaterializedColumns(materialized); } } diff --git a/src/Interpreters/getTableExpressions.h b/src/Interpreters/getTableExpressions.h index 961176437b5..19c27057c2f 100644 --- a/src/Interpreters/getTableExpressions.h +++ b/src/Interpreters/getTableExpressions.h @@ -10,13 +10,19 @@ namespace DB struct ASTTableExpression; class ASTSelectQuery; +using ASTTableExprConstPtrs = std::vector; + NameSet removeDuplicateColumns(NamesAndTypesList & columns); -std::vector getTableExpressions(const ASTSelectQuery & select_query); +ASTTableExprConstPtrs getTableExpressions(const ASTSelectQuery & select_query); + const ASTTableExpression * getTableExpression(const ASTSelectQuery & select, size_t table_number); + ASTPtr extractTableExpression(const ASTSelectQuery & select, size_t table_number); -NamesAndTypesList getColumnsFromTableExpression(const ASTTableExpression & table_expression, ContextPtr context); -TablesWithColumns getDatabaseAndTablesWithColumns(const std::vector & table_expressions, ContextPtr context); +TablesWithColumns getDatabaseAndTablesWithColumns( + const ASTTableExprConstPtrs & table_expressions, + ContextPtr context, + bool add_materialized = false); } diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.reference b/tests/queries/0_stateless/01925_join_materialized_columns.reference new file mode 100644 index 00000000000..90f754f6e7c --- /dev/null +++ b/tests/queries/0_stateless/01925_join_materialized_columns.reference @@ -0,0 +1,7 @@ +2020-02-02 13:00:00 fact2 t1_val2 2020-02-02 2020-02-05 13:00:00 fact2 t1_val2 2020-02-05 +- +2020-01-01 2020-01-01 +2020-02-02 2020-02-05 +- +2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 +2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql new file mode 100644 index 00000000000..9c4596f9915 --- /dev/null +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (time DateTime, foo String, dimension_1 String, dt Date MATERIALIZED toDate(time)) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo); +CREATE TABLE t2 (time DateTime, bar String, dimension_2 String, dt Date MATERIALIZED toDate(time)) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar); + +INSERT INTO t1 VALUES ('2020-01-01 12:00:00', 'fact1', 't1_val1'), ('2020-02-02 13:00:00', 'fact2', 't1_val2'), ('2020-01-01 13:00:00', 'fact3', 't1_val3'); +INSERT INTO t2 VALUES ('2020-01-01 12:00:00', 'fact1', 't2_val2'), ('2020-02-05 13:00:00', 'fact2', 't1_val2'), ('2019-01-01 12:00:00', 'fact4', 't2_val2'); + +SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.dt >= '2020-02-01'; +SELECT '-'; +SELECT t1.dt, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.dt; +SELECT '-'; +SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; From e530a86d0f530aba4c5008a27df1119e37e289d6 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 23 Jun 2021 17:08:54 +0300 Subject: [PATCH 324/931] Add query with USING to 01925_join_materialized_columns --- .../0_stateless/01925_join_materialized_columns.reference | 3 +++ tests/queries/0_stateless/01925_join_materialized_columns.sql | 2 ++ 2 files changed, 5 insertions(+) diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.reference b/tests/queries/0_stateless/01925_join_materialized_columns.reference index 90f754f6e7c..e00de5f458d 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.reference +++ b/tests/queries/0_stateless/01925_join_materialized_columns.reference @@ -5,3 +5,6 @@ - 2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 +- +2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 +2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql index 9c4596f9915..7d5acc2cd25 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.sql +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -12,3 +12,5 @@ SELECT '-'; SELECT t1.dt, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.dt; SELECT '-'; SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time; From 1077e1f347717e68c241e4b7d8d8102bdd5dc42c Mon Sep 17 00:00:00 2001 From: tavplubix Date: Wed, 23 Jun 2021 18:27:39 +0300 Subject: [PATCH 325/931] Update shell_config.sh --- tests/queries/shell_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index a13b087c477..5c7aae162bf 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -73,7 +73,7 @@ export CLICKHOUSE_PORT_MYSQL=${CLICKHOUSE_PORT_MYSQL:="9004"} export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=postgresql_port 2>/dev/null)} 2>/dev/null export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:="9005"} -CLICKHOUSE_CLIENT_SECURE=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'/'"--secure --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g') +export CLICKHOUSE_CLIENT_SECURE=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'/'"--secure --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g') # Add database and log comment to url params if [ -v CLICKHOUSE_URL_PARAMS ] From ebceb6a5b40ecdd8140cd3e4bcb329903c5035e1 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 23 Jun 2021 18:54:26 +0300 Subject: [PATCH 326/931] fixed link --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 47ac756d6b8..6190b9b030b 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1737,7 +1737,7 @@ These function can be tranformed: - [empty](../../sql-reference/functions/array-functions.md#function-empty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). - [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). - [isNull](../../sql-reference/operators/index.md#operator-is-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). -- [isNotNull](../../sql-reference/operators/index.md#is-not-null#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). - [count](../../sql-reference/aggregate-functions/reference/count.md) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). - [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). - [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). From 48fbc78d31b66a691c0ef70490640a17b3d133bd Mon Sep 17 00:00:00 2001 From: tavplubix Date: Wed, 23 Jun 2021 19:37:57 +0300 Subject: [PATCH 327/931] Update shell_config.sh --- tests/queries/shell_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index 5c7aae162bf..e768a773255 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -73,7 +73,7 @@ export CLICKHOUSE_PORT_MYSQL=${CLICKHOUSE_PORT_MYSQL:="9004"} export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:=$(${CLICKHOUSE_EXTRACT_CONFIG} --try --key=postgresql_port 2>/dev/null)} 2>/dev/null export CLICKHOUSE_PORT_POSTGRESQL=${CLICKHOUSE_PORT_POSTGRESQL:="9005"} -export CLICKHOUSE_CLIENT_SECURE=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'/'"--secure --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g') +export CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'/'"--secure --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g')} # Add database and log comment to url params if [ -v CLICKHOUSE_URL_PARAMS ] From fa9242a1989b1ef79a9ebc6c36af2bcf210a0588 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Tue, 22 Jun 2021 14:10:34 +0000 Subject: [PATCH 328/931] Fixed docs --- docs/en/development/continuous-integration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/continuous-integration.md b/docs/en/development/continuous-integration.md index d0109233022..95a7e7bbc69 100644 --- a/docs/en/development/continuous-integration.md +++ b/docs/en/development/continuous-integration.md @@ -126,7 +126,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t - **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`). - **Build type**: `Debug` or `RelWithDebInfo` (cmake). - **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan). -- **Bundled**: `bundled` build uses system libraries, and `unbundled` build uses libraries from `contrib` folder. +- **Bundled**: `bundled` build uses libraries from `contrib` folder, and `unbundled` build uses system libraries. - **Splitted** `splitted` is a [split build](build.md#split-build) - **Status**: `success` or `fail` - **Build log**: link to the building and files copying log, useful when build failed. From f365b8eac98c42c9b6f1c3dfc561453bb0ce6b2d Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 23 Jun 2021 00:09:10 +0300 Subject: [PATCH 329/931] Update tips.md --- docs/en/operations/tips.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index 0b74ae95b06..4291b91b41d 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -30,14 +30,6 @@ Do not disable overcommit. The value `cat /proc/sys/vm/overcommit_memory` should $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory ``` -## Huge Pages {#huge-pages} - -Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation. - -``` bash -$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled -``` - Use `perf top` to watch the time spent in the kernel for memory management. Permanent huge pages also do not need to be allocated. @@ -91,6 +83,15 @@ The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementati Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. +## Huge Pages {#huge-pages} + +If you are using old Linux kernel, disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation. +On newer Linux kernels transparent huge pages are alright. + +``` bash +$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled +``` + ## Hypervisor configuration If you are using OpenStack, set From bda33cad790881e5e41d46e356f5af28f3d515f3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 23 Jun 2021 00:21:32 +0300 Subject: [PATCH 330/931] Add a test for #22108 --- .../0_stateless/01924_argmax_bitmap_state.reference | 1 + tests/queries/0_stateless/01924_argmax_bitmap_state.sql | 8 ++++++++ tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 3 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/01924_argmax_bitmap_state.reference create mode 100644 tests/queries/0_stateless/01924_argmax_bitmap_state.sql diff --git a/tests/queries/0_stateless/01924_argmax_bitmap_state.reference b/tests/queries/0_stateless/01924_argmax_bitmap_state.reference new file mode 100644 index 00000000000..ec635144f60 --- /dev/null +++ b/tests/queries/0_stateless/01924_argmax_bitmap_state.reference @@ -0,0 +1 @@ +9 diff --git a/tests/queries/0_stateless/01924_argmax_bitmap_state.sql b/tests/queries/0_stateless/01924_argmax_bitmap_state.sql new file mode 100644 index 00000000000..298bbceeb1d --- /dev/null +++ b/tests/queries/0_stateless/01924_argmax_bitmap_state.sql @@ -0,0 +1,8 @@ +SELECT bitmapMax(argMax(x, y)) +FROM remote('127.0.0.{2,3}', view( + SELECT + groupBitmapState(toUInt32(number)) AS x, + number AS y + FROM numbers(10) + GROUP BY number +)); diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index f146913a2e8..0f3861c0bbe 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -247,3 +247,4 @@ 01910_view_dictionary 01824_prefer_global_in_and_join 01576_alias_column_rewrite +01924_argmax_bitmap_state From 1869bd37078f003784a249cd42ca1c8669b215ab Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 23 Jun 2021 01:34:44 +0300 Subject: [PATCH 331/931] Update adopters.md --- docs/en/introduction/adopters.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index c324a8995de..8d72e12f01b 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -153,5 +153,6 @@ toc_title: Adopters | Gigapipe | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) | | Hydrolix | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) | | Argedor | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) | +| SigNoz | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) | [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) From 3b13567b573a3e736015cf56ea23cba4fb2b32ed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 23 Jun 2021 02:02:57 +0300 Subject: [PATCH 332/931] Minor change --- programs/server/Server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index ad2c83da194..2ffef474ad0 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -390,7 +390,7 @@ void Server::initialize(Poco::Util::Application & self) BaseDaemon::initialize(self); logger().information("starting up"); - LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}", + LOG_INFO(&logger(), "OS name: {}, version: {}, architecture: {}", Poco::Environment::osName(), Poco::Environment::osVersion(), Poco::Environment::osArchitecture()); From 81c74435a370d979a6d906729671bf45310c7e15 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 23 Jun 2021 22:24:43 +0300 Subject: [PATCH 333/931] Fix drop part bug --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 9 +++- src/Storages/StorageReplicatedMergeTree.cpp | 49 ++++++++++++------- ...nt_ttl_and_normal_merges_zookeeper_long.sh | 9 ++-- 4 files changed, 47 insertions(+), 22 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index abc37f52ff9..606431b9ff9 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2331,7 +2331,7 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c if (part->info.partition_id != drop_range.partition_id) throw Exception("Unexpected partition_id of part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); - if (part->info.min_block < drop_range.min_block) /// NOTE Always false, because drop_range.min_block == 0 + if (part->info.min_block < drop_range.min_block) { if (drop_range.min_block <= part->info.max_block) { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 1a30a33db5d..92633b48172 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -137,14 +137,21 @@ void ReplicatedMergeTreeQueue::insertUnlocked( for (const String & virtual_part_name : entry->getVirtualPartNames(format_version)) { virtual_parts.add(virtual_part_name, nullptr, log); - addPartToMutations(virtual_part_name); + /// Don't add drop range parts to mutations + /// they don't produce any useful parts + if (entry->type != LogEntry::DROP_RANGE) + addPartToMutations(virtual_part_name); } /// Put 'DROP PARTITION' entries at the beginning of the queue not to make superfluous fetches of parts that will be eventually deleted if (entry->type != LogEntry::DROP_RANGE) + { queue.push_back(entry); + } else + { queue.push_front(entry); + } if (entry->type == LogEntry::GET_PART || entry->type == LogEntry::ATTACH_PART) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 47f6bbd0ccc..210f2b2e3ec 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2194,23 +2194,37 @@ bool StorageReplicatedMergeTree::executeFetchShared( void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) { auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); - queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); - - if (entry.detach) - LOG_DEBUG(log, "Detaching parts."); - else - LOG_DEBUG(log, "Removing parts."); - - /// Delete the parts contained in the range to be deleted. - /// It's important that no old parts remain (after the merge), because otherwise, - /// after adding a new replica, this new replica downloads them, but does not delete them. - /// And, if you do not, the parts will come to life after the server is restarted. - /// Therefore, we use all data parts. auto metadata_snapshot = getInMemoryMetadataPtr(); + + queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); + DataPartsVector parts_to_remove; { auto data_parts_lock = lockParts(); + /// It's a DROP PART + if (!drop_range_info.isFakeDropRangePart()) + { + auto containing_part = getActiveContainingPart(drop_range_info, MergeTreeDataPartState::Committed, data_parts_lock); + if (containing_part && containing_part->info != drop_range_info) + { + LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range_info.getPartName(), containing_part->name); + return; + } + } + + if (entry.detach) + LOG_DEBUG(log, "Detaching parts."); + else + LOG_DEBUG(log, "Removing parts."); + + + /// Delete the parts contained in the range to be deleted. + /// It's important that no old parts remain (after the merge), because otherwise, + /// after adding a new replica, this new replica downloads them, but does not delete them. + /// And, if you do not, the parts will come to life after the server is restarted. + /// Therefore, we use all data parts. + /// parts_to_remove = removePartsInRangeFromWorkingSet(drop_range_info, true, data_parts_lock); } @@ -6992,15 +7006,16 @@ bool StorageReplicatedMergeTree::dropPartImpl( getClearBlocksInPartitionOps(ops, *zookeeper, part_info.partition_id, part_info.min_block, part_info.max_block); size_t clear_block_ops_size = ops.size(); - /// Set fake level to treat this part as virtual in queue. - auto drop_part_info = part->info; - drop_part_info.level = MergeTreePartInfo::MAX_LEVEL; - /// If `part_name` is result of a recent merge and source parts are still available then /// DROP_RANGE with detach will move this part together with source parts to `detached/` dir. entry.type = LogEntry::DROP_RANGE; entry.source_replica = replica_name; - entry.new_part_name = getPartNamePossiblyFake(format_version, drop_part_info); + /// We don't set fake drop level (999999999) for the single part DROP_RANGE. + /// First of all we don't guarantee anything other than the part will not be + /// active after DROP PART, but covering part (without data of dropped part) can exist. + /// If we add part with 9999999 level than we can break invariant in virtual_parts of + /// the queue. + entry.new_part_name = getPartNamePossiblyFake(format_version, part->info); entry.detach = detach; entry.create_time = time(nullptr); diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 80e7d6b4c00..13086879e0d 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -7,9 +7,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=5 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" & done +wait for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i( @@ -18,7 +19,7 @@ for i in $(seq 1 $NUM_REPLICAS); do ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i') ORDER BY tuple() TTL key + INTERVAL 1 SECOND - SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100;" + SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" done function optimize_thread @@ -66,5 +67,7 @@ $CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table l $CLICKHOUSE_CLIENT --query "SELECT COUNT() > 0 FROM system.part_log where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}'" for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ttl_table$i" & done + +wait From 4be4bc21e285fabac45b56854c16fa735c05d195 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 23 Jun 2021 23:57:49 +0300 Subject: [PATCH 334/931] Fix for fix --- src/Storages/MergeTree/MergeTreeData.cpp | 9 +++-- src/Storages/StorageReplicatedMergeTree.cpp | 40 ++++++++------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 606431b9ff9..82701e6c117 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2328,6 +2328,13 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c for (const DataPartPtr & part : partition_range) { + /// It's a DROP_PART and it's already executed by fetching some covering part + if (part->info.contains(drop_range)) + { + LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range.getPartName(), part->name); + return {}; + } + if (part->info.partition_id != drop_range.partition_id) throw Exception("Unexpected partition_id of part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); @@ -2658,7 +2665,6 @@ void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event * until) const std::this_thread::sleep_for(std::chrono::milliseconds(static_cast(delay_milliseconds))); } - MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart( const MergeTreePartInfo & part_info, MergeTreeData::DataPartState state, DataPartsLock & /*lock*/) const { @@ -2760,7 +2766,6 @@ MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVectorInPartition(Merg data_parts_by_state_and_info.upper_bound(state_with_partition)); } - MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const MergeTreePartInfo & part_info, const MergeTreeData::DataPartStates & valid_states) { auto lock = lockParts(); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 210f2b2e3ec..e91f3d9554e 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2194,40 +2194,28 @@ bool StorageReplicatedMergeTree::executeFetchShared( void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) { auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); - - auto metadata_snapshot = getInMemoryMetadataPtr(); - queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); + /// Delete the parts contained in the range to be deleted. + /// It's important that no old parts remain (after the merge), because otherwise, + /// after adding a new replica, this new replica downloads them, but does not delete them. + /// And, if you do not, the parts will come to life after the server is restarted. + /// Therefore, we use all data parts. + + auto metadata_snapshot = getInMemoryMetadataPtr(); DataPartsVector parts_to_remove; { auto data_parts_lock = lockParts(); - /// It's a DROP PART - if (!drop_range_info.isFakeDropRangePart()) - { - auto containing_part = getActiveContainingPart(drop_range_info, MergeTreeDataPartState::Committed, data_parts_lock); - if (containing_part && containing_part->info != drop_range_info) - { - LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range_info.getPartName(), containing_part->name); - return; - } - } - - if (entry.detach) - LOG_DEBUG(log, "Detaching parts."); - else - LOG_DEBUG(log, "Removing parts."); - - - /// Delete the parts contained in the range to be deleted. - /// It's important that no old parts remain (after the merge), because otherwise, - /// after adding a new replica, this new replica downloads them, but does not delete them. - /// And, if you do not, the parts will come to life after the server is restarted. - /// Therefore, we use all data parts. - /// parts_to_remove = removePartsInRangeFromWorkingSet(drop_range_info, true, data_parts_lock); + if (parts_to_remove.empty()) + return; } + if (entry.detach) + LOG_DEBUG(log, "Detaching parts."); + else + LOG_DEBUG(log, "Removing parts."); + if (entry.detach) { /// If DETACH clone parts to detached/ directory From 24876291d13bca791a1679ecc8167b2b86b6664c Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 24 Jun 2021 00:01:07 +0300 Subject: [PATCH 335/931] Followup fix --- src/Storages/MergeTree/MergeTreeData.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 82701e6c117..d98ea6170ec 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2328,8 +2328,8 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c for (const DataPartPtr & part : partition_range) { - /// It's a DROP_PART and it's already executed by fetching some covering part - if (part->info.contains(drop_range)) + /// It's a DROP PART and it's already executed by fetching some covering part + if (part->info != drop_range && part->info.contains(drop_range)) { LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range.getPartName(), part->name); return {}; From 7739fcc295a9abcdf276257bdda412f189945914 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 22 Jun 2021 09:38:01 +0300 Subject: [PATCH 336/931] clickhouse-client: fix NULL dereference for --param w/o value --- programs/client/Client.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index a3419003e2b..12d94943dea 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -2446,6 +2446,8 @@ public: { /// param_name value ++arg_num; + if (arg_num >= argc) + throw Exception("Parameter requires value", ErrorCodes::BAD_ARGUMENTS); arg = argv[arg_num]; query_parameters.emplace(String(param_continuation), String(arg)); } From f9b3a8770c4a756249a3abe05cfd498c51c69e68 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 23 Jun 2021 02:50:25 +0300 Subject: [PATCH 337/931] Add a test for clickhouse-client --param w/o value --- tests/queries/0_stateless/01922_client_param.reference | 2 ++ tests/queries/0_stateless/01922_client_param.sh | 9 +++++++++ 2 files changed, 11 insertions(+) create mode 100644 tests/queries/0_stateless/01922_client_param.reference create mode 100755 tests/queries/0_stateless/01922_client_param.sh diff --git a/tests/queries/0_stateless/01922_client_param.reference b/tests/queries/0_stateless/01922_client_param.reference new file mode 100644 index 00000000000..0d55bed3a35 --- /dev/null +++ b/tests/queries/0_stateless/01922_client_param.reference @@ -0,0 +1,2 @@ +foo +foo diff --git a/tests/queries/0_stateless/01922_client_param.sh b/tests/queries/0_stateless/01922_client_param.sh new file mode 100755 index 00000000000..bb0abfb2191 --- /dev/null +++ b/tests/queries/0_stateless/01922_client_param.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --param_foo |& grep -q -x 'Code: 36. DB::Exception: Parameter requires value' +$CLICKHOUSE_CLIENT --param_foo foo -q 'select {foo:String}' +$CLICKHOUSE_CLIENT -q 'select {foo:String}' --param_foo foo From e6da53dc4600ab12474325dc442269bb13909823 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 24 Jun 2021 00:09:06 +0300 Subject: [PATCH 338/931] better --- src/Storages/MergeTree/MergeTreeData.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index d98ea6170ec..ec9be3bf436 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2328,6 +2328,9 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c for (const DataPartPtr & part : partition_range) { + if (part->info.partition_id != drop_range.partition_id) + throw Exception("Unexpected partition_id of part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); + /// It's a DROP PART and it's already executed by fetching some covering part if (part->info != drop_range && part->info.contains(drop_range)) { @@ -2335,9 +2338,6 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c return {}; } - if (part->info.partition_id != drop_range.partition_id) - throw Exception("Unexpected partition_id of part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); - if (part->info.min_block < drop_range.min_block) { if (drop_range.min_block <= part->info.max_block) From 3ef23c4e228036a5c2d466472b8901873c9d81ed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 24 Jun 2021 00:33:22 +0300 Subject: [PATCH 339/931] Add pv to the test image --- docker/test/base/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 44b9d42d6a1..a722132c3a5 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -46,6 +46,7 @@ RUN apt-get update \ pigz \ pkg-config \ tzdata \ + pv \ --yes --no-install-recommends # Sanitizer options for services (clickhouse-server) From 3c76986311593e3aa844ac601b66b7de476791b4 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 24 Jun 2021 00:32:34 +0300 Subject: [PATCH 340/931] Fix alternative stack under osx (MINSIGSTKSZ is 32K) Fixes: #25632 --- src/Common/ThreadStatus.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 1622e12712e..0e12830e49d 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -60,7 +60,8 @@ struct ThreadStack void * getData() const { return data; } private: - static constexpr size_t size = 16 << 10; /// 16 KiB - not too big but enough to handle error. + /// 16 KiB - not too big but enough to handle error. + static constexpr size_t size = std::max(16 << 10, MINSIGSTKSZ); void * data; }; From 79ffb59d49090c1876ee5dacde3b1f1b78977296 Mon Sep 17 00:00:00 2001 From: yuchuansun Date: Thu, 24 Jun 2021 10:22:31 +0800 Subject: [PATCH 341/931] doc: [chinese] change wrong language format into correct --- docs/zh/engines/table-engines/special/file.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/engines/table-engines/special/file.md b/docs/zh/engines/table-engines/special/file.md index 08f0a58070f..503d6d7e7f5 100644 --- a/docs/zh/engines/table-engines/special/file.md +++ b/docs/zh/engines/table-engines/special/file.md @@ -54,7 +54,7 @@ SELECT * FROM file_engine_table ## 在 Clickhouse-local 中的使用 {#zai-clickhouse-local-zhong-de-shi-yong} -使用 [ツ环板-ョツ嘉ッツ偲](../../../engines/table-engines/special/file.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 +使用 [clickhouse-local](../../../operations/utilities/clickhouse-local.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 **例如:** ``` bash From 843902631254f99a648891a587be771602f9de43 Mon Sep 17 00:00:00 2001 From: yuchuansun Date: Thu, 24 Jun 2021 10:34:28 +0800 Subject: [PATCH 342/931] Update set.md --- docs/zh/engines/table-engines/special/set.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/engines/table-engines/special/set.md b/docs/zh/engines/table-engines/special/set.md index 71271b0d7ca..a4fd0d85bd1 100644 --- a/docs/zh/engines/table-engines/special/set.md +++ b/docs/zh/engines/table-engines/special/set.md @@ -1,4 +1,4 @@ -# 设置 {#set} +# 集合 {#set} 始终存在于 RAM 中的数据集。它适用于IN运算符的右侧(请参见 «IN运算符» 部分)。 From a616ae88618eb050f2e8df3435fceddfd773a250 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 24 Jun 2021 10:07:31 +0300 Subject: [PATCH 343/931] Improve startup time of Distributed engine. - create directory monitors in parallel (this also includes rmdir in case of directory is empty, since even if the directory is empty it may take some time to remove it, due to waiting for journal or if the directory is large, i.e. it had lots of files before, since remember ext4 does not truncate the directory size on each unlink [1]) - initialize increment in parallel too (since it does readdir()) [1]: https://lore.kernel.org/linux-ext4/930A5754-5CE6-4567-8CF0-62447C97825C@dilger.ca/ --- .../DistributedBlockOutputStream.cpp | 2 +- src/Storages/StorageDistributed.cpp | 65 +++++++++++++++---- src/Storages/StorageDistributed.h | 2 +- 3 files changed, 54 insertions(+), 15 deletions(-) diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index 9b13198812b..9a50cec5986 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -752,7 +752,7 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std:: auto sleep_ms = context->getSettingsRef().distributed_directory_monitor_sleep_time_ms; for (const auto & dir_name : dir_names) { - auto & directory_monitor = storage.requireDirectoryMonitor(disk, dir_name); + auto & directory_monitor = storage.requireDirectoryMonitor(disk, dir_name, /* startup= */ false); directory_monitor.addAndSchedule(file_size, sleep_ms.totalMilliseconds()); } } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 8507198a7f6..d43fd1532a1 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -800,12 +800,31 @@ void StorageDistributed::startup() if (!storage_policy) return; - for (const DiskPtr & disk : data_volume->getDisks()) - createDirectoryMonitors(disk); + const auto & disks = data_volume->getDisks(); + ThreadPool pool(disks.size()); - for (const String & path : getDataPaths()) + for (const DiskPtr & disk : disks) + { + pool.scheduleOrThrowOnError([&]() + { + createDirectoryMonitors(disk); + }); + } + pool.wait(); + + const auto & paths = getDataPaths(); + std::vector last_increment(paths.size()); + for (size_t i = 0; i < paths.size(); ++i) + { + pool.scheduleOrThrowOnError([&, i]() + { + last_increment[i] = getMaximumFileNumber(paths[i]); + }); + } + pool.wait(); + + for (const auto inc : last_increment) { - UInt64 inc = getMaximumFileNumber(path); if (inc > file_names_increment.value) file_names_increment.value.store(inc); } @@ -907,30 +926,50 @@ void StorageDistributed::createDirectoryMonitors(const DiskPtr & disk) } else { - requireDirectoryMonitor(disk, dir_path.filename().string()); + requireDirectoryMonitor(disk, dir_path.filename().string(), /* startup= */ true); } } } } -StorageDistributedDirectoryMonitor& StorageDistributed::requireDirectoryMonitor(const DiskPtr & disk, const std::string & name) +StorageDistributedDirectoryMonitor& StorageDistributed::requireDirectoryMonitor(const DiskPtr & disk, const std::string & name, bool startup) { const std::string & disk_path = disk->getPath(); const std::string key(disk_path + name); - std::lock_guard lock(cluster_nodes_mutex); - auto & node_data = cluster_nodes_data[key]; - if (!node_data.directory_monitor) + auto create_node_data = [&]() { - node_data.connection_pool = StorageDistributedDirectoryMonitor::createPool(name, *this); - node_data.directory_monitor = std::make_unique( + ClusterNodeData data; + data.connection_pool = StorageDistributedDirectoryMonitor::createPool(name, *this); + data.directory_monitor = std::make_unique( *this, disk, relative_data_path + name, - node_data.connection_pool, + data.connection_pool, monitors_blocker, getContext()->getDistributedSchedulePool()); + return data; + }; + + /// In case of startup the lock can be acquired later. + if (startup) + { + auto tmp_node_data = create_node_data(); + std::lock_guard lock(cluster_nodes_mutex); + auto & node_data = cluster_nodes_data[key]; + assert(!node_data.directory_monitor); + node_data = std::move(tmp_node_data); + return *node_data.directory_monitor; + } + else + { + std::lock_guard lock(cluster_nodes_mutex); + auto & node_data = cluster_nodes_data[key]; + if (!node_data.directory_monitor) + { + node_data = create_node_data(); + } + return *node_data.directory_monitor; } - return *node_data.directory_monitor; } std::vector StorageDistributed::getDirectoryMonitorsStatuses() const diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index c734b0f777e..c63abbc6aa4 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -160,7 +160,7 @@ private: /// create directory monitors for each existing subdirectory void createDirectoryMonitors(const DiskPtr & disk); /// ensure directory monitor thread and connectoin pool creation by disk and subdirectory name - StorageDistributedDirectoryMonitor & requireDirectoryMonitor(const DiskPtr & disk, const std::string & name); + StorageDistributedDirectoryMonitor & requireDirectoryMonitor(const DiskPtr & disk, const std::string & name, bool startup); /// Return list of metrics for all created monitors /// (note that monitors are created lazily, i.e. until at least one INSERT executed) From b1263c18ee15f0aa40c5f94dee11c7e1e0e27e74 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Thu, 24 Jun 2021 10:40:00 +0300 Subject: [PATCH 344/931] Fix pcg deserialization (#24538) * fix pcg deserialization * Update 01156_pcg_deserialization.sh * Update 01156_pcg_deserialization.sh * Update 01156_pcg_deserialization.sh * fix another bug Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: alexey-milovidov --- .../AggregateFunctionGroupArray.cpp | 14 +++++++------- .../AggregateFunctionGroupArray.h | 12 ++++++------ src/IO/ReadHelpers.h | 4 +++- .../01156_pcg_deserialization.reference | 3 +++ .../0_stateless/01156_pcg_deserialization.sh | 19 +++++++++++++++++++ 5 files changed, 38 insertions(+), 14 deletions(-) create mode 100644 tests/queries/0_stateless/01156_pcg_deserialization.reference create mode 100755 tests/queries/0_stateless/01156_pcg_deserialization.sh diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 73039dc4dec..5a9fd778277 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -30,16 +30,16 @@ static IAggregateFunction * createWithNumericOrTimeType(const IDataType & argume template -inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataTypePtr & argument_type, TArgs ... args) +inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataTypePtr & argument_type, const Array & parameters, TArgs ... args) { - if (auto res = createWithNumericOrTimeType(*argument_type, argument_type, std::forward(args)...)) + if (auto res = createWithNumericOrTimeType(*argument_type, argument_type, parameters, std::forward(args)...)) return AggregateFunctionPtr(res); WhichDataType which(argument_type); if (which.idx == TypeIndex::String) - return std::make_shared>(argument_type, std::forward(args)...); + return std::make_shared>(argument_type, parameters, std::forward(args)...); - return std::make_shared>(argument_type, std::forward(args)...); + return std::make_shared>(argument_type, parameters, std::forward(args)...); // Link list implementation doesn't show noticeable performance improvement // if (which.idx == TypeIndex::String) @@ -79,9 +79,9 @@ AggregateFunctionPtr createAggregateFunctionGroupArray( ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); if (!limit_size) - return createAggregateFunctionGroupArrayImpl>(argument_types[0]); + return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters); else - return createAggregateFunctionGroupArrayImpl>(argument_types[0], max_elems); + return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters, max_elems); } AggregateFunctionPtr createAggregateFunctionGroupArraySample( @@ -114,7 +114,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample( else seed = thread_local_rng(); - return createAggregateFunctionGroupArrayImpl>(argument_types[0], max_elems, seed); + return createAggregateFunctionGroupArrayImpl>(argument_types[0], parameters, max_elems, seed); } } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h index 06292992a2f..a78ce89ce5a 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.h +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.h @@ -119,9 +119,9 @@ class GroupArrayNumericImpl final public: explicit GroupArrayNumericImpl( - const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456) + const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456) : IAggregateFunctionDataHelper, GroupArrayNumericImpl>( - {data_type_}, {}) + {data_type_}, parameters_) , max_elems(max_elems_) , seed(seed_) { @@ -421,9 +421,9 @@ class GroupArrayGeneralImpl final UInt64 seed; public: - GroupArrayGeneralImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456) + GroupArrayGeneralImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max(), UInt64 seed_ = 123456) : IAggregateFunctionDataHelper, GroupArrayGeneralImpl>( - {data_type_}, {}) + {data_type_}, parameters_) , data_type(this->argument_types[0]) , max_elems(max_elems_) , seed(seed_) @@ -696,8 +696,8 @@ class GroupArrayGeneralListImpl final UInt64 max_elems; public: - GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits::max()) - : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type_}, {}) + GroupArrayGeneralListImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max()) + : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type_}, parameters_) , data_type(this->argument_types[0]) , max_elems(max_elems_) { diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index a772d4ccd69..ffcfeea3827 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1248,7 +1248,7 @@ bool loadAtPosition(ReadBuffer & in, Memory<> & memory, char * & current); struct PcgDeserializer { - static void deserializePcg32(const pcg32_fast & rng, ReadBuffer & buf) + static void deserializePcg32(pcg32_fast & rng, ReadBuffer & buf) { decltype(rng.state_) multiplier, increment, state; readText(multiplier, buf); @@ -1261,6 +1261,8 @@ struct PcgDeserializer throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect multiplier in pcg32: expected {}, got {}", rng.multiplier(), multiplier); if (increment != rng.increment()) throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect increment in pcg32: expected {}, got {}", rng.increment(), increment); + + rng.state_ = state; } }; diff --git a/tests/queries/0_stateless/01156_pcg_deserialization.reference b/tests/queries/0_stateless/01156_pcg_deserialization.reference new file mode 100644 index 00000000000..e43b7ca3ceb --- /dev/null +++ b/tests/queries/0_stateless/01156_pcg_deserialization.reference @@ -0,0 +1,3 @@ +5 5 +5 5 +5 5 diff --git a/tests/queries/0_stateless/01156_pcg_deserialization.sh b/tests/queries/0_stateless/01156_pcg_deserialization.sh new file mode 100755 index 00000000000..9c8ac29f32e --- /dev/null +++ b/tests/queries/0_stateless/01156_pcg_deserialization.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +declare -a engines=("Memory" "MergeTree order by n" "Log") + +for engine in "${engines[@]}" +do + $CLICKHOUSE_CLIENT -q "drop table if exists t"; + $CLICKHOUSE_CLIENT -q "create table t (n UInt8, a1 AggregateFunction(groupArraySample(1), UInt8)) engine=$engine" + $CLICKHOUSE_CLIENT -q "insert into t select number % 5 as n, groupArraySampleState(1)(toUInt8(number)) from numbers(10) group by n" + + $CLICKHOUSE_CLIENT -q "select * from t format TSV" | $CLICKHOUSE_CLIENT -q "insert into t format TSV" + $CLICKHOUSE_CLIENT -q "select countDistinct(n), countDistinct(a1) from t" + + $CLICKHOUSE_CLIENT -q "drop table t"; +done From 8262e251c46e07addd5d53fd20081caec563e3e3 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 24 Jun 2021 09:47:23 +0300 Subject: [PATCH 345/931] Fix getStackSize() stack address on osx Should fix the following exception under osx: Stack size too large. Stack address: 0x700001a58000, frame address: 0x700001a53f40, stack size: 540864, maximum stack size: 524288 (version 21.7.1.1) --- src/Common/checkStackSize.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/checkStackSize.cpp b/src/Common/checkStackSize.cpp index dfb1910a8eb..f8b13cd8ca2 100644 --- a/src/Common/checkStackSize.cpp +++ b/src/Common/checkStackSize.cpp @@ -44,7 +44,7 @@ size_t getStackSize(void ** out_address) size = pthread_main_np() ? (8 * 1024 * 1024) : pthread_get_stacksize_np(thread); // stack address points to the start of the stack, not the end how it's returned by pthread_get_stackaddr_np - address = reinterpret_cast(reinterpret_cast(pthread_get_stackaddr_np(thread)) - max_stack_size); + address = reinterpret_cast(reinterpret_cast(pthread_get_stackaddr_np(thread)) - size); #else pthread_attr_t attr; # if defined(__FreeBSD__) || defined(OS_SUNOS) From 5c5b888ded7456d2ac25510a23e51fe9f23f8a8c Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 24 Jun 2021 11:11:46 +0300 Subject: [PATCH 346/931] Lightweight test --- .../01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 13086879e0d..7c7d58e1012 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -59,11 +59,8 @@ timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & wait -for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA ttl_table$i" -done -$CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception != '' FORMAT Vertical" +$CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception like '%but should be merged into%' FORMAT Vertical" $CLICKHOUSE_CLIENT --query "SELECT COUNT() > 0 FROM system.part_log where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}'" for i in $(seq 1 $NUM_REPLICAS); do From 7e6963bb39b47014b8a934750064de151ce27468 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 24 Jun 2021 11:16:54 +0300 Subject: [PATCH 347/931] fix REPLACE_RANGE entry removal --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 1a30a33db5d..93f8d889997 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -891,6 +891,10 @@ bool ReplicatedMergeTreeQueue::checkReplaceRangeCanBeRemoved(const MergeTreePart if (entry_ptr->replace_range_entry == current.replace_range_entry) /// same partition, don't want to drop ourselves return false; + + if (!part_info.contains(MergeTreePartInfo::fromPartName(entry_ptr->replace_range_entry->drop_range_part_name, format_version))) + return false; + size_t number_of_covered_parts = 0; for (const String & new_part_name : entry_ptr->replace_range_entry->new_part_names) { From 6fae921d65d6a7de34e4e74e7fc600a0d4f21b72 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 24 Jun 2021 11:27:55 +0300 Subject: [PATCH 348/931] Smaller table in performance/dict_join.xml --- tests/performance/dict_join.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/dict_join.xml b/tests/performance/dict_join.xml index e12ef4abd63..1fa5ce1726c 100644 --- a/tests/performance/dict_join.xml +++ b/tests/performance/dict_join.xml @@ -15,7 +15,7 @@ INSERT INTO join_dictionary_source_table SELECT number, toString(number) - FROM numbers(10000000); + FROM numbers(1000000); From 9dcd37b94d0cfae0236908dafcb675568fa08ec2 Mon Sep 17 00:00:00 2001 From: Yuriy Chernyshov Date: Thu, 24 Jun 2021 12:04:40 +0300 Subject: [PATCH 349/931] Enable MurmurHash in ArcadiaBuild --- src/Functions/FunctionsHashing.h | 5 +++-- src/Functions/ya.make | 2 ++ src/Functions/ya.make.in | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index f56f8689599..cfa6eebf44b 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -3,9 +3,10 @@ #include #include #include +#include +#include + #if !defined(ARCADIA_BUILD) -# include -# include # include "config_functions.h" # include "config_core.h" #endif diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 315ce93dbde..cddca2431cd 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -15,6 +15,7 @@ ADDINCL( contrib/libs/libdivide contrib/libs/rapidjson/include contrib/libs/xxhash + contrib/restricted/murmurhash ) PEERDIR( @@ -30,6 +31,7 @@ PEERDIR( contrib/libs/metrohash contrib/libs/rapidjson contrib/libs/xxhash + contrib/restricted/murmurhash library/cpp/consistent_hashing ) diff --git a/src/Functions/ya.make.in b/src/Functions/ya.make.in index ef3e97060c7..10103554455 100644 --- a/src/Functions/ya.make.in +++ b/src/Functions/ya.make.in @@ -14,6 +14,7 @@ ADDINCL( contrib/libs/libdivide contrib/libs/rapidjson/include contrib/libs/xxhash + contrib/restricted/murmurhash ) PEERDIR( @@ -29,6 +30,7 @@ PEERDIR( contrib/libs/metrohash contrib/libs/rapidjson contrib/libs/xxhash + contrib/restricted/murmurhash library/cpp/consistent_hashing ) From d28b12975d68beb82868e4e10839551301ed2a49 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:52:32 +0300 Subject: [PATCH 350/931] Update docs/ru/getting-started/install.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 2924958ddf4..84713adb60e 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -87,7 +87,7 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh Для запуска ClickHouse в Docker нужно следовать инструкции на [Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/). Внутри образов используются официальные `deb` пакеты. -### Из единого бинарника {#from-single-binary} +### Из единого бинарного файла {#from-single-binary} Для установки ClickHouse на Linux можно использовать единый переносимый бинарник из последнего коммита ветки `master`: https://builds.clickhouse.tech/master/amd64/clickhouse. From 07421ffff0a34b419e0aa48a2ef21686362ddfb2 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:52:45 +0300 Subject: [PATCH 351/931] Update docs/ru/getting-started/install.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 84713adb60e..5e415c7a62d 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -89,7 +89,7 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh ### Из единого бинарного файла {#from-single-binary} -Для установки ClickHouse на Linux можно использовать единый переносимый бинарник из последнего коммита ветки `master`: https://builds.clickhouse.tech/master/amd64/clickhouse. +Для установки ClickHouse под Linux можно использовать единый переносимый бинарный файл из последнего коммита ветки `master`: [https://builds.clickhouse.tech/master/amd64/clickhouse]. ``` bash curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse From ff9fba33bbeb00607a0f3feb77bc68a32495c4a7 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:52:53 +0300 Subject: [PATCH 352/931] Update docs/en/getting-started/install.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 5cec83c3819..3de90156a41 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -96,7 +96,7 @@ To run ClickHouse inside Docker follow the guide on [Docker Hub](https://hub.doc ### Single Binary {#from-single-binary} -You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: https://builds.clickhouse.tech/master/amd64/clickhouse. +You can install ClickHouse on Linux using a single portable binary from the latest commit of the `master` branch: [https://builds.clickhouse.tech/master/amd64/clickhouse]. ``` bash curl -O 'https://builds.clickhouse.tech/master/amd64/clickhouse' && chmod a+x clickhouse From fce8316d9831d0812c9dd3ce39fae19625ff8a20 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:52:59 +0300 Subject: [PATCH 353/931] Update docs/ru/getting-started/install.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 5e415c7a62d..565aaac5cee 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -98,7 +98,7 @@ sudo ./clickhouse install ### Из исполняемых файлов для нестандартных окружений {#from-binaries-non-linux} -Для других операционных систем и архитектуры AArch64, сборки ClickHouse предоставляются в виде кросс-компилированного бинарника из последнего коммита ветки `master` (с задержкой в несколько часов). +Для других операционных систем и архитектуры AArch64 сборки ClickHouse предоставляются в виде кросс-компилированного бинарного файла из последнего коммита ветки `master` (с задержкой в несколько часов). - [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` From 432ba0b885e6aaf8a6ebb353229a06011db0ec63 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:06 +0300 Subject: [PATCH 354/931] Update docs/ru/getting-started/install.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 565aaac5cee..e585021119d 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -108,7 +108,7 @@ sudo ./clickhouse install Чтобы установить ClickHouse в рамках всей системы (с необходимыми конфигурационными файлами, настройками пользователей и т.д.), выполните `sudo ./clickhouse install`. Затем выполните команды `clickhouse start` (чтобы запустить сервер) и `clickhouse-client` (чтобы подключиться к нему). -Данные сборки не рекомендуются для использования в продакшене, так как они недостаточно тщательно протестированны. Также, в них присутствуют не все возможности ClickHouse. +Данные сборки не рекомендуются для использования в рабочей базе данных, так как они недостаточно тщательно протестированы. Также в них присутствуют не все возможности ClickHouse. ### Из исходного кода {#from-sources} From 653ba9c86d57c2f138f072c2571583f050a5dca8 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:12 +0300 Subject: [PATCH 355/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 388d54c21a0..c0811c5d415 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -52,7 +52,7 @@ LIFETIME(MIN 300 MAX 360) ``` Если `0` и `0`, ClickHouse не перегружает словарь по истечению времени. -В этом случае, ClickHouse может перезагрузить данные словаря если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. +В этом случае ClickHouse может перезагрузить данные словаря, если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. При обновлении словарей сервер ClickHouse применяет различную логику в зависимости от типа [источника](external-dicts-dict-sources.md): @@ -116,4 +116,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) ... -``` \ No newline at end of file +``` From 220d8217856dcf5460dffb8f77a2585ad123fc4a Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:18 +0300 Subject: [PATCH 356/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index c0811c5d415..573d4374fed 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -51,7 +51,7 @@ LIFETIME(300) LIFETIME(MIN 300 MAX 360) ``` -Если `0` и `0`, ClickHouse не перегружает словарь по истечению времени. +Если `0` и `0`, ClickHouse не перегружает словарь по истечении времени. В этом случае ClickHouse может перезагрузить данные словаря, если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. При обновлении словарей сервер ClickHouse применяет различную логику в зависимости от типа [источника](external-dicts-dict-sources.md): From c7d5282798ee11c702a80d8cfee2239afb5c57bb Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:25 +0300 Subject: [PATCH 357/931] Update docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 1d79c9a28bf..36b42ed6281 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -88,7 +88,7 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. -It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration value of the previous update time in seconds will be added to the data request. Depends on source type Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC different logic will be applied to `update_field` before request data from an external source. +It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to only request data that was changed after the previous update. If `update_field` is specified as part of the dictionary source configuration, value of the previous update time in seconds will be added to the data request. Depends on source type (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, or ODBC) different logic will be applied to `update_field` before request data from an external source. - If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value. - If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value. @@ -116,4 +116,4 @@ or ... SOURCE(CLICKHOUSE(... update_field 'added_time' update_lag 15)) ... -``` \ No newline at end of file +``` From e08587024c941cafde8d36b888dc25cf4f1615c8 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:33 +0300 Subject: [PATCH 358/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 573d4374fed..81b61566d86 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -88,7 +88,7 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher Для словарей `Cache`, `ComplexKeyCache`, `SSDCache` и `SSDComplexKeyCache` поддерживается как синхронное, так и асинхронное обновление. -Также словари `Flat`, `Hashed`, `ComplexKeyHashed` могут запрашивать только те данные, которые были изменены после предыдущего обновления. Если `update_field` указана как часть конфигурации источника словаря, к запросу данных будет добавлено время предыдущего обновления в секундах. В зависимости от типа источника (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC) к `update_field` будет применена соответствующая логика перед запросом данных из внешнего источника. +Словари `Flat`, `Hashed` и `ComplexKeyHashed` могут запрашивать только те данные, которые были изменены после предыдущего обновления. Если `update_field` указано как часть конфигурации источника словаря, к запросу данных будет добавлено время предыдущего обновления в секундах. В зависимости от типа источника (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC) к `update_field` будет применена соответствующая логика перед запросом данных из внешнего источника. - Если источник HTTP, то `update_field` будет добавлена в качестве параметра запроса, а время последнего обновления — в качестве значения параметра. - Если источник Executable, то `update_field` будет добавлена в качестве аргумента исполняемого скрипта, время последнего обновления — в качестве значения аргумента. From 2bb9bb6a86a79f86e61d1d1fba039902b8f0d838 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:39 +0300 Subject: [PATCH 359/931] Update docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 36b42ed6281..afef6ae249d 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -92,7 +92,7 @@ It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to onl - If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value. - If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value. -- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of WHERE, where `update_field` is compared as greater or equal with the last update time. +- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of `WHERE`, where `update_field` is compared as greater or equal with the last update time. If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data. From 5e017654f9ed81cf77b56f6996764a0b3acf7e4d Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:45 +0300 Subject: [PATCH 360/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 81b61566d86..7f0686cfcef 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -90,9 +90,9 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher Словари `Flat`, `Hashed` и `ComplexKeyHashed` могут запрашивать только те данные, которые были изменены после предыдущего обновления. Если `update_field` указано как часть конфигурации источника словаря, к запросу данных будет добавлено время предыдущего обновления в секундах. В зависимости от типа источника (Executable, HTTP, MySQL, PostgreSQL, ClickHouse, ODBC) к `update_field` будет применена соответствующая логика перед запросом данных из внешнего источника. -- Если источник HTTP, то `update_field` будет добавлена в качестве параметра запроса, а время последнего обновления — в качестве значения параметра. -- Если источник Executable, то `update_field` будет добавлена в качестве аргумента исполняемого скрипта, время последнего обновления — в качестве значения аргумента. -- Если источник ClickHouse, MySQL, PostgreSQL или ODBC, то будет дополнительная часть запроса `WHERE`, где `update_field` будет больше или равна времени последнего обновления. +- Если источник HTTP, то `update_field` будет добавлено в качестве параметра запроса, а время последнего обновления — в качестве значения параметра. +- Если источник Executable, то `update_field` будет добавлено в качестве аргумента исполняемого скрипта, время последнего обновления — в качестве значения аргумента. +- Если источник ClickHouse, MySQL, PostgreSQL или ODBC, то будет дополнительная часть запроса `WHERE`, где `update_field` будет больше или равно времени последнего обновления. Если установлена опция `update_field`, то может быть установлена дополнительная опция `update_lag`. Значение параметра `update_lag` вычитается из времени предыдущего обновления перед запросом обновленных данных. From f1dfeb553f7a7c282714496fbf8e19d0add16719 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Thu, 24 Jun 2021 12:53:51 +0300 Subject: [PATCH 361/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../external-dictionaries/external-dicts-dict-lifetime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 7f0686cfcef..9d4205ab1d1 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -94,7 +94,7 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher - Если источник Executable, то `update_field` будет добавлено в качестве аргумента исполняемого скрипта, время последнего обновления — в качестве значения аргумента. - Если источник ClickHouse, MySQL, PostgreSQL или ODBC, то будет дополнительная часть запроса `WHERE`, где `update_field` будет больше или равно времени последнего обновления. -Если установлена опция `update_field`, то может быть установлена дополнительная опция `update_lag`. Значение параметра `update_lag` вычитается из времени предыдущего обновления перед запросом обновленных данных. +Если установлена опция `update_field`, то может быть установлена дополнительная опция `update_lag`. Значение `update_lag` вычитается из времени предыдущего обновления перед запросом обновленных данных. Пример настройки: From 3a25b057657fd5b8542aa869593e12b3a255b6ef Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 24 Jun 2021 13:00:33 +0300 Subject: [PATCH 362/931] fix rename Distributed table --- src/Storages/StorageDistributed.cpp | 1 + ...55_rename_move_materialized_view.reference | 12 +++++++++++ .../01155_rename_move_materialized_view.sql | 21 ++++++++++++------- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 8507198a7f6..414f6cd20c4 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1155,6 +1155,7 @@ void StorageDistributed::renameOnDisk(const String & new_path_to_table_data) { for (const DiskPtr & disk : data_volume->getDisks()) { + disk->createDirectories(new_path_to_table_data); disk->moveDirectory(relative_data_path, new_path_to_table_data); auto new_path = disk->getPath() + new_path_to_table_data; diff --git a/tests/queries/0_stateless/01155_rename_move_materialized_view.reference b/tests/queries/0_stateless/01155_rename_move_materialized_view.reference index 942cedf8696..635fd16620d 100644 --- a/tests/queries/0_stateless/01155_rename_move_materialized_view.reference +++ b/tests/queries/0_stateless/01155_rename_move_materialized_view.reference @@ -1,10 +1,12 @@ 1 .inner.mv1 before moving tablesmv1 +1 dist before moving tables 1 dst before moving tablesmv2 1 mv1 before moving tablesmv1 1 mv2 before moving tablesmv2 1 src before moving tables ordinary: .inner.mv1 +dist dst mv1 mv2 @@ -12,12 +14,16 @@ src ordinary after rename: atomic after rename: .inner_id. +dist dst mv1 mv2 src 3 .inner_id. after renaming databasemv1 3 .inner_id. before moving tablesmv1 +3 dist after moving tables +3 dist after renaming database +3 dist before moving tables 3 dst after renaming databasemv2 3 dst before moving tablesmv2 3 mv1 after renaming databasemv1 @@ -28,6 +34,7 @@ src 3 src after renaming database 3 src before moving tables .inner_id. +dist dst mv1 mv2 @@ -36,6 +43,10 @@ CREATE DATABASE test_01155_atomic\nENGINE = Atomic 4 .inner.mv1 after renaming databasemv1 4 .inner.mv1 after renaming tablesmv1 4 .inner.mv1 before moving tablesmv1 +4 dist after moving tables +4 dist after renaming database +4 dist after renaming tables +4 dist before moving tables 4 dst after renaming databasemv2 4 dst after renaming tablesmv2 4 dst before moving tablesmv2 @@ -51,6 +62,7 @@ CREATE DATABASE test_01155_atomic\nENGINE = Atomic 4 src before moving tables test_01155_ordinary: .inner.mv1 +dist dst mv1 mv2 diff --git a/tests/queries/0_stateless/01155_rename_move_materialized_view.sql b/tests/queries/0_stateless/01155_rename_move_materialized_view.sql index 2ede0fbcedf..882be2702d8 100644 --- a/tests/queries/0_stateless/01155_rename_move_materialized_view.sql +++ b/tests/queries/0_stateless/01155_rename_move_materialized_view.sql @@ -9,8 +9,11 @@ CREATE TABLE src (s String) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s; CREATE MATERIALIZED VIEW mv1 (s String) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s AS SELECT (*,).1 || 'mv1' as s FROM src; CREATE TABLE dst (s String) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s; CREATE MATERIALIZED VIEW mv2 TO dst (s String) AS SELECT (*,).1 || 'mv2' as s FROM src; -INSERT INTO src VALUES ('before moving tables'); -SELECT 1, substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; +CREATE TABLE dist (s String) Engine=Distributed(test_shard_localhost, test_01155_ordinary, src); +INSERT INTO dist VALUES ('before moving tables'); +SYSTEM FLUSH DISTRIBUTED dist; +-- FIXME Cannot convert column `1` because it is non constant in source stream but must be constant in result +SELECT materialize(1), substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; -- Move tables with materialized views from Ordinary to Atomic SELECT 'ordinary:'; @@ -19,6 +22,7 @@ RENAME TABLE test_01155_ordinary.mv1 TO test_01155_atomic.mv1; RENAME TABLE test_01155_ordinary.mv2 TO test_01155_atomic.mv2; RENAME TABLE test_01155_ordinary.dst TO test_01155_atomic.dst; RENAME TABLE test_01155_ordinary.src TO test_01155_atomic.src; +RENAME TABLE test_01155_ordinary.dist TO test_01155_atomic.dist; SELECT 'ordinary after rename:'; SELECT substr(name, 1, 10) FROM system.tables WHERE database='test_01155_ordinary'; SELECT 'atomic after rename:'; @@ -27,13 +31,14 @@ DROP DATABASE test_01155_ordinary; USE default; INSERT INTO test_01155_atomic.src VALUES ('after moving tables'); -SELECT 2, substr(_table, 1, 10), s FROM merge('test_01155_atomic', '') ORDER BY _table, s; -- { serverError 81 } +SELECT materialize(2), substr(_table, 1, 10), s FROM merge('test_01155_atomic', '') ORDER BY _table, s; -- { serverError 81 } RENAME DATABASE test_01155_atomic TO test_01155_ordinary; USE test_01155_ordinary; -INSERT INTO src VALUES ('after renaming database'); -SELECT 3, substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; +INSERT INTO dist VALUES ('after renaming database'); +SYSTEM FLUSH DISTRIBUTED dist; +SELECT materialize(3), substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; SELECT substr(name, 1, 10) FROM system.tables WHERE database='test_01155_ordinary'; @@ -47,9 +52,11 @@ RENAME TABLE test_01155_atomic.mv1 TO test_01155_ordinary.mv1; RENAME TABLE test_01155_atomic.mv2 TO test_01155_ordinary.mv2; RENAME TABLE test_01155_atomic.dst TO test_01155_ordinary.dst; RENAME TABLE test_01155_atomic.src TO test_01155_ordinary.src; +RENAME TABLE test_01155_atomic.dist TO test_01155_ordinary.dist; -INSERT INTO src VALUES ('after renaming tables'); -SELECT 4, substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; +INSERT INTO dist VALUES ('after renaming tables'); +SYSTEM FLUSH DISTRIBUTED dist; +SELECT materialize(4), substr(_table, 1, 10), s FROM merge('test_01155_ordinary', '') ORDER BY _table, s; SELECT 'test_01155_ordinary:'; SHOW TABLES FROM test_01155_ordinary; SELECT 'test_01155_atomic:'; From e9e49a908598f6851abc9fa0c135e22a97adb4ad Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Thu, 24 Jun 2021 14:45:29 +0300 Subject: [PATCH 363/931] Fix ANTLR parser and enable it back in CI (#25638) * Fix * Skip more tests --- .../MergeTree/MergeTreeIndexConditionBloomFilter.cpp | 3 +++ tests/ci/ci_config.json | 12 ++++++++++++ tests/queries/skip_list.json | 8 +++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 93f8c118ad5..6b5a2c6ee17 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -234,6 +234,9 @@ bool MergeTreeIndexConditionBloomFilter::traverseFunction(const ASTPtr & node, B if (const auto * function = node->as()) { + if (!function->arguments) + return false; + const ASTs & arguments = function->arguments->children; for (const auto & arg : arguments) { diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 39722e17f25..03bc013138d 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -383,6 +383,18 @@ "with_coverage": false } }, + "Functional stateless tests (ANTLR debug)": { + "required_build_properties": { + "compiler": "clang-11", + "package_type": "deb", + "build_type": "debug", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang-tidy": "disable", + "with_coverage": false + } + }, "Stress test (address)": { "required_build_properties": { "compiler": "clang-11", diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index e38089230f4..78b8e3065ff 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -297,6 +297,7 @@ "01192_rename_database_zookeeper", "01213_alter_rename_column", "01232_untuple", + "01240_join_get_or_null", "01244_optimize_distributed_group_by_sharding_key", "01254_dict_load_after_detach_attach", "01256_misspell_layout_name_podshumok", @@ -510,7 +511,12 @@ "01892_setting_limit_offset_distributed", "01902_dictionary_array_type", "01903_ssd_cache_dictionary_array_type", - "01905_to_json_string" + "01905_to_json_string", + "01913_fix_column_transformer_replace_format", + "01913_if_int_decimal", + "01913_join_push_down_bug", + "01921_with_fill_with_totals", + "01924_argmax_bitmap_state" ], "parallel": [ From bb35a113bf57e92c8194b22db2bc4731f1108c17 Mon Sep 17 00:00:00 2001 From: nickzhwang Date: Thu, 24 Jun 2021 20:18:14 +0800 Subject: [PATCH 364/931] fix broken links and incorrect translations --- .../system-tables/asynchronous_metric_log.md | 2 +- .../system-tables/asynchronous_metrics.md | 4 +- docs/en/operations/system-tables/clusters.md | 2 +- docs/en/operations/system-tables/columns.md | 2 +- .../operations/system-tables/contributors.md | 2 +- .../operations/system-tables/current-roles.md | 2 +- .../system-tables/data_type_families.md | 2 +- docs/en/operations/system-tables/databases.md | 2 +- .../system-tables/detached_parts.md | 2 +- .../operations/system-tables/dictionaries.md | 2 +- docs/en/operations/system-tables/disks.md | 2 +- .../operations/system-tables/enabled-roles.md | 2 +- docs/en/operations/system-tables/events.md | 2 +- docs/en/operations/system-tables/functions.md | 2 +- docs/en/operations/system-tables/grants.md | 2 +- .../system-tables/graphite_retentions.md | 2 +- docs/en/operations/system-tables/licenses.md | 2 +- .../system-tables/merge_tree_settings.md | 2 +- docs/en/operations/system-tables/merges.md | 2 +- .../en/operations/system-tables/metric_log.md | 2 +- docs/en/operations/system-tables/metrics.md | 2 +- docs/en/operations/system-tables/mutations.md | 2 +- docs/en/operations/system-tables/numbers.md | 2 +- .../en/operations/system-tables/numbers_mt.md | 2 +- docs/en/operations/system-tables/one.md | 2 +- docs/en/operations/system-tables/part_log.md | 2 +- docs/en/operations/system-tables/parts.md | 2 +- docs/en/operations/system-tables/processes.md | 2 +- docs/en/operations/system-tables/query_log.md | 2 +- .../system-tables/query_thread_log.md | 2 +- .../operations/system-tables/quota_limits.md | 2 +- .../operations/system-tables/quota_usage.md | 2 +- docs/en/operations/system-tables/quotas.md | 2 +- .../operations/system-tables/quotas_usage.md | 2 +- docs/en/operations/system-tables/replicas.md | 2 +- .../operations/system-tables/role-grants.md | 2 +- docs/en/operations/system-tables/roles.md | 2 +- .../operations/system-tables/row_policies.md | 2 +- docs/en/operations/system-tables/settings.md | 2 +- .../settings_profile_elements.md | 2 +- .../system-tables/settings_profiles.md | 2 +- .../system-tables/storage_policies.md | 2 +- .../operations/system-tables/table_engines.md | 2 +- docs/en/operations/system-tables/tables.md | 2 +- docs/en/operations/system-tables/text_log.md | 2 +- .../en/operations/system-tables/time_zones.md | 2 +- docs/en/operations/system-tables/trace_log.md | 2 +- docs/en/operations/system-tables/users.md | 2 +- docs/en/operations/system-tables/zookeeper.md | 2 +- .../system-tables/asynchronous_metric_log.md | 2 +- .../system-tables/asynchronous_metrics.md | 14 +++---- docs/zh/operations/system-tables/clusters.md | 25 ++++++------ docs/zh/operations/system-tables/columns.md | 2 + docs/zh/operations/system-tables/tables.md | 26 +++++++------ docs/zh/operations/system-tables/zookeeper.md | 39 ++++++++++--------- 55 files changed, 109 insertions(+), 99 deletions(-) diff --git a/docs/en/operations/system-tables/asynchronous_metric_log.md b/docs/en/operations/system-tables/asynchronous_metric_log.md index 75607cc30b0..56803174cbd 100644 --- a/docs/en/operations/system-tables/asynchronous_metric_log.md +++ b/docs/en/operations/system-tables/asynchronous_metric_log.md @@ -36,4 +36,4 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10 - [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background. - [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metric_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index a401c7a723b..d9f427cf783 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -33,6 +33,6 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. - [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics. - [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred. -- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` and `system.events`. - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metrics) \ No newline at end of file + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics) diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index e9721379d7b..bf56ac7d33f 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -23,4 +23,4 @@ Please note that `errors_count` is updated once per query to the cluster, but `e - [distributed\_replica\_error\_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed\_replica\_error\_half\_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/clusters) diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index 4d8077ddeac..33b284fc816 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -21,4 +21,4 @@ The `system.columns` table contains the following columns (the column type is sh - `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. - `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/columns) diff --git a/docs/en/operations/system-tables/contributors.md b/docs/en/operations/system-tables/contributors.md index 37d01ef6204..a718c403c11 100644 --- a/docs/en/operations/system-tables/contributors.md +++ b/docs/en/operations/system-tables/contributors.md @@ -38,4 +38,4 @@ SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova' │ Olga Khvostikova │ └──────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/contributors) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/contributors) diff --git a/docs/en/operations/system-tables/current-roles.md b/docs/en/operations/system-tables/current-roles.md index f10dbe69918..56dbb602637 100644 --- a/docs/en/operations/system-tables/current-roles.md +++ b/docs/en/operations/system-tables/current-roles.md @@ -8,4 +8,4 @@ Columns: - `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role. - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/current-roles) diff --git a/docs/en/operations/system-tables/data_type_families.md b/docs/en/operations/system-tables/data_type_families.md index ddda91ed151..e149b9de3e4 100644 --- a/docs/en/operations/system-tables/data_type_families.md +++ b/docs/en/operations/system-tables/data_type_families.md @@ -33,4 +33,4 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String' - [Syntax](../../sql-reference/syntax.md) — Information about supported syntax. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/data_type_families) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/data_type_families) diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index 84b696a3bf8..9318df30a28 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -6,4 +6,4 @@ Each database that the server knows about has a corresponding entry in the table This system table is used for implementing the `SHOW DATABASES` query. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/databases) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/databases) diff --git a/docs/en/operations/system-tables/detached_parts.md b/docs/en/operations/system-tables/detached_parts.md index ade89bd40c4..a5748128426 100644 --- a/docs/en/operations/system-tables/detached_parts.md +++ b/docs/en/operations/system-tables/detached_parts.md @@ -8,4 +8,4 @@ For the description of other columns, see [system.parts](../../operations/system If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/partition.md#alter_drop-detached). -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/detached_parts) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/detached_parts) diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index 3d3bbe2af4e..4a94ff5b41b 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -60,4 +60,4 @@ SELECT * FROM system.dictionaries └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/dictionaries) diff --git a/docs/en/operations/system-tables/disks.md b/docs/en/operations/system-tables/disks.md index 9c01b6d9aa4..2bd871e73ee 100644 --- a/docs/en/operations/system-tables/disks.md +++ b/docs/en/operations/system-tables/disks.md @@ -10,4 +10,4 @@ Columns: - `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes. - `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/disks) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) diff --git a/docs/en/operations/system-tables/enabled-roles.md b/docs/en/operations/system-tables/enabled-roles.md index 27875fcf984..c03129b32dd 100644 --- a/docs/en/operations/system-tables/enabled-roles.md +++ b/docs/en/operations/system-tables/enabled-roles.md @@ -9,4 +9,4 @@ Columns: - `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/enabled-roles) diff --git a/docs/en/operations/system-tables/events.md b/docs/en/operations/system-tables/events.md index d23533189c7..e6d4e1cf905 100644 --- a/docs/en/operations/system-tables/events.md +++ b/docs/en/operations/system-tables/events.md @@ -31,4 +31,4 @@ SELECT * FROM system.events LIMIT 5 - [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/events) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/events) diff --git a/docs/en/operations/system-tables/functions.md b/docs/en/operations/system-tables/functions.md index d9a5e3cc363..6441bbf59e4 100644 --- a/docs/en/operations/system-tables/functions.md +++ b/docs/en/operations/system-tables/functions.md @@ -7,4 +7,4 @@ Columns: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/functions) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index fb2a91ab30a..927fa4f3227 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -21,4 +21,4 @@ Columns: - `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax). -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/grants) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/grants) diff --git a/docs/en/operations/system-tables/graphite_retentions.md b/docs/en/operations/system-tables/graphite_retentions.md index f5d65bbe3fe..4aeccee9cfd 100644 --- a/docs/en/operations/system-tables/graphite_retentions.md +++ b/docs/en/operations/system-tables/graphite_retentions.md @@ -14,4 +14,4 @@ Columns: - `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. - `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/graphite_retentions) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/graphite_retentions) diff --git a/docs/en/operations/system-tables/licenses.md b/docs/en/operations/system-tables/licenses.md index c95e4e8b9b4..a9cada507c6 100644 --- a/docs/en/operations/system-tables/licenses.md +++ b/docs/en/operations/system-tables/licenses.md @@ -36,4 +36,4 @@ SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15 ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/licenses) diff --git a/docs/en/operations/system-tables/merge_tree_settings.md b/docs/en/operations/system-tables/merge_tree_settings.md index 78aab24cb41..2d593392894 100644 --- a/docs/en/operations/system-tables/merge_tree_settings.md +++ b/docs/en/operations/system-tables/merge_tree_settings.md @@ -10,4 +10,4 @@ Columns: - `type` (String) — Setting type (implementation specific string value). - `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merge_tree_settings) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) diff --git a/docs/en/operations/system-tables/merges.md b/docs/en/operations/system-tables/merges.md index 3e712e2962c..c7bdaee42e1 100644 --- a/docs/en/operations/system-tables/merges.md +++ b/docs/en/operations/system-tables/merges.md @@ -22,4 +22,4 @@ Columns: - `merge_type` — The type of current merge. Empty if it's an mutation. - `merge_algorithm` — The algorithm used in current merge. Empty if it's an mutation. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merges) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merges) diff --git a/docs/en/operations/system-tables/metric_log.md b/docs/en/operations/system-tables/metric_log.md index 063fe81923b..1166ead7f9d 100644 --- a/docs/en/operations/system-tables/metric_log.md +++ b/docs/en/operations/system-tables/metric_log.md @@ -54,4 +54,4 @@ CurrentMetric_DistributedFilesToInsert: 0 - [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metric_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metric_log) diff --git a/docs/en/operations/system-tables/metrics.md b/docs/en/operations/system-tables/metrics.md index cf4c6efe8d4..fcd499cc2fc 100644 --- a/docs/en/operations/system-tables/metrics.md +++ b/docs/en/operations/system-tables/metrics.md @@ -38,4 +38,4 @@ SELECT * FROM system.metrics LIMIT 10 - [system.metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metrics) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metrics) diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index e5ea7eab457..24fa559197c 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -45,4 +45,4 @@ If there were problems with mutating some data parts, the following columns cont - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine - [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/mutations) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/mutations) diff --git a/docs/en/operations/system-tables/numbers.md b/docs/en/operations/system-tables/numbers.md index 9b7e148242c..d75487c0297 100644 --- a/docs/en/operations/system-tables/numbers.md +++ b/docs/en/operations/system-tables/numbers.md @@ -6,4 +6,4 @@ You can use this table for tests, or if you need to do a brute force search. Reads from this table are not parallelized. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) diff --git a/docs/en/operations/system-tables/numbers_mt.md b/docs/en/operations/system-tables/numbers_mt.md index 870b256223e..d8b44ce4e7a 100644 --- a/docs/en/operations/system-tables/numbers_mt.md +++ b/docs/en/operations/system-tables/numbers_mt.md @@ -4,4 +4,4 @@ The same as [system.numbers](../../operations/system-tables/numbers.md) but read Used for tests. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers_mt) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) diff --git a/docs/en/operations/system-tables/one.md b/docs/en/operations/system-tables/one.md index 854fab32730..ee8d79f0f17 100644 --- a/docs/en/operations/system-tables/one.md +++ b/docs/en/operations/system-tables/one.md @@ -6,4 +6,4 @@ This table is used if a `SELECT` query doesn’t specify the `FROM` clause. This is similar to the `DUAL` table found in other DBMSs. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/one) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) diff --git a/docs/en/operations/system-tables/part_log.md b/docs/en/operations/system-tables/part_log.md index e7c157077e3..e340b0fa046 100644 --- a/docs/en/operations/system-tables/part_log.md +++ b/docs/en/operations/system-tables/part_log.md @@ -31,4 +31,4 @@ The `system.part_log` table contains the following columns: The `system.part_log` table is created after the first inserting data to the `MergeTree` table. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/part_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index f02d1ebc114..872125d7fda 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -155,4 +155,4 @@ move_ttl_info.max: [] - [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md) - [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/parts) diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index 0e44c61a4f6..43595ac11f7 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -14,4 +14,4 @@ Columns: - `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert. - `query_id` (String) – Query ID, if defined. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/processes) diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 72927b5a7e9..a334e49a54b 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -138,5 +138,5 @@ Settings.Values: ['0','random','1','10000000000'] - [system.query\_thread\_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_log) diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 3dcd05c4cc3..35f9b4e1341 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -113,4 +113,4 @@ ProfileEvents.Values: [1,97,81,5,81] - [system.query\_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_thread_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_thread_log) diff --git a/docs/en/operations/system-tables/quota_limits.md b/docs/en/operations/system-tables/quota_limits.md index 065296f5df3..b8964b74af4 100644 --- a/docs/en/operations/system-tables/quota_limits.md +++ b/docs/en/operations/system-tables/quota_limits.md @@ -16,4 +16,4 @@ Columns: - `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of bytes read from all tables and table functions participated in queries. - `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of the query execution time, in seconds. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quota_limits) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_limits) diff --git a/docs/en/operations/system-tables/quota_usage.md b/docs/en/operations/system-tables/quota_usage.md index 0eb59fd6453..7f8495c0288 100644 --- a/docs/en/operations/system-tables/quota_usage.md +++ b/docs/en/operations/system-tables/quota_usage.md @@ -27,4 +27,4 @@ Columns: - [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quota_usage) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_usage) diff --git a/docs/en/operations/system-tables/quotas.md b/docs/en/operations/system-tables/quotas.md index f4f52a4a131..1692f3911f2 100644 --- a/docs/en/operations/system-tables/quotas.md +++ b/docs/en/operations/system-tables/quotas.md @@ -24,5 +24,5 @@ Columns: - [SHOW QUOTAS](../../sql-reference/statements/show.md#show-quotas-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas) diff --git a/docs/en/operations/system-tables/quotas_usage.md b/docs/en/operations/system-tables/quotas_usage.md index ed6be820b26..ec3e0db7b3d 100644 --- a/docs/en/operations/system-tables/quotas_usage.md +++ b/docs/en/operations/system-tables/quotas_usage.md @@ -28,4 +28,4 @@ Columns: - [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas_usage) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas_usage) diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index 17519690951..a96b517518e 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -120,5 +120,5 @@ WHERE If this query doesn’t return anything, it means that everything is fine. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/replicas) diff --git a/docs/en/operations/system-tables/role-grants.md b/docs/en/operations/system-tables/role-grants.md index 5eb18b0dca7..d90bc1f77be 100644 --- a/docs/en/operations/system-tables/role-grants.md +++ b/docs/en/operations/system-tables/role-grants.md @@ -18,4 +18,4 @@ Columns: - 1 — The role has `ADMIN OPTION` privilege. - 0 — The role without `ADMIN OPTION` privilege. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) diff --git a/docs/en/operations/system-tables/roles.md b/docs/en/operations/system-tables/roles.md index 4ab5102dfc8..e68d5ed290a 100644 --- a/docs/en/operations/system-tables/roles.md +++ b/docs/en/operations/system-tables/roles.md @@ -12,4 +12,4 @@ Columns: - [SHOW ROLES](../../sql-reference/statements/show.md#show-roles-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/roles) diff --git a/docs/en/operations/system-tables/row_policies.md b/docs/en/operations/system-tables/row_policies.md index 97474d1b3ee..767270d64ae 100644 --- a/docs/en/operations/system-tables/row_policies.md +++ b/docs/en/operations/system-tables/row_policies.md @@ -31,4 +31,4 @@ Columns: - [SHOW POLICIES](../../sql-reference/statements/show.md#show-policies-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/row_policies) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/row_policies) diff --git a/docs/en/operations/system-tables/settings.md b/docs/en/operations/system-tables/settings.md index a1db0a3d558..685bd25bd9e 100644 --- a/docs/en/operations/system-tables/settings.md +++ b/docs/en/operations/system-tables/settings.md @@ -49,4 +49,4 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing' - [Permissions for Queries](../../operations/settings/permissions-for-queries.md#settings_readonly) - [Constraints on Settings](../../operations/settings/constraints-on-settings.md) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings) diff --git a/docs/en/operations/system-tables/settings_profile_elements.md b/docs/en/operations/system-tables/settings_profile_elements.md index d0f2c3c4527..3c8c728e645 100644 --- a/docs/en/operations/system-tables/settings_profile_elements.md +++ b/docs/en/operations/system-tables/settings_profile_elements.md @@ -27,4 +27,4 @@ Columns: - `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — A parent profile for this setting profile. `NULL` if not set. Setting profile will inherit all the settings' values and constraints (`min`, `max`, `readonly`) from its parent profiles. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profile_elements) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profile_elements) diff --git a/docs/en/operations/system-tables/settings_profiles.md b/docs/en/operations/system-tables/settings_profiles.md index a06b26b9cb6..80dc5172f4e 100644 --- a/docs/en/operations/system-tables/settings_profiles.md +++ b/docs/en/operations/system-tables/settings_profiles.md @@ -21,4 +21,4 @@ Columns: - [SHOW PROFILES](../../sql-reference/statements/show.md#show-profiles-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profiles) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profiles) diff --git a/docs/en/operations/system-tables/storage_policies.md b/docs/en/operations/system-tables/storage_policies.md index c8171b50aed..1c243fc58d5 100644 --- a/docs/en/operations/system-tables/storage_policies.md +++ b/docs/en/operations/system-tables/storage_policies.md @@ -13,4 +13,4 @@ Columns: If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/storage_policies) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/storage_policies) diff --git a/docs/en/operations/system-tables/table_engines.md b/docs/en/operations/system-tables/table_engines.md index 4ca1fc657ee..dbaad893efa 100644 --- a/docs/en/operations/system-tables/table_engines.md +++ b/docs/en/operations/system-tables/table_engines.md @@ -34,4 +34,4 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') - Kafka [settings](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table) - Join [settings](../../engines/table-engines/special/join.md#join-limitations-and-settings) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/table_engines) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/table_engines) diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index e69b8aa67a0..41a296705ba 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -52,4 +52,4 @@ This table contains the following columns (the column type is shown in brackets) The `system.tables` table is used in `SHOW TABLES` query implementation. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/tables) diff --git a/docs/en/operations/system-tables/text_log.md b/docs/en/operations/system-tables/text_log.md index bd92519b96b..32f626b3db6 100644 --- a/docs/en/operations/system-tables/text_log.md +++ b/docs/en/operations/system-tables/text_log.md @@ -25,4 +25,4 @@ Columns: - `source_file` (LowCardinality(String)) — Source file from which the logging was done. - `source_line` (UInt64) — Source line from which the logging was done. - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/text_log) \ No newline at end of file + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) diff --git a/docs/en/operations/system-tables/time_zones.md b/docs/en/operations/system-tables/time_zones.md index 1b84ae7fe37..fa467124884 100644 --- a/docs/en/operations/system-tables/time_zones.md +++ b/docs/en/operations/system-tables/time_zones.md @@ -27,4 +27,4 @@ SELECT * FROM system.time_zones LIMIT 10 └────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/time_zones) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/time_zones) diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index b911fdd2263..3d83db3bb89 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -47,4 +47,4 @@ query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] ``` - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/trace_log) \ No newline at end of file + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/trace_log) diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index 2227816aff3..11fdeb1e9ae 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -31,4 +31,4 @@ Columns: - [SHOW USERS](../../sql-reference/statements/show.md#show-users-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/users) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/users) diff --git a/docs/en/operations/system-tables/zookeeper.md b/docs/en/operations/system-tables/zookeeper.md index ddb4d305964..c975970abd9 100644 --- a/docs/en/operations/system-tables/zookeeper.md +++ b/docs/en/operations/system-tables/zookeeper.md @@ -68,4 +68,4 @@ numChildren: 7 pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/zookeeper) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/zookeeper) diff --git a/docs/zh/operations/system-tables/asynchronous_metric_log.md b/docs/zh/operations/system-tables/asynchronous_metric_log.md index 9f6c697a18e..c6eb7754c23 100644 --- a/docs/zh/operations/system-tables/asynchronous_metric_log.md +++ b/docs/zh/operations/system-tables/asynchronous_metric_log.md @@ -3,6 +3,6 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -## 系统。asynchronous\_metric\_log {#system-tables-async-log} +## system.asynchronous_metric_log {#system-tables-async-log} 包含以下内容的历史值 `system.asynchronous_log` (见 [系统。asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics)) diff --git a/docs/zh/operations/system-tables/asynchronous_metrics.md b/docs/zh/operations/system-tables/asynchronous_metrics.md index 2bd615085a8..9180e88f2d0 100644 --- a/docs/zh/operations/system-tables/asynchronous_metrics.md +++ b/docs/zh/operations/system-tables/asynchronous_metrics.md @@ -3,14 +3,14 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。asynchronous\_metrics {#system_tables-asynchronous_metrics} +# system.asynchronous_metrics {#system_tables-asynchronous_metrics} 包含在后台定期计算的指标。 例如,在使用的RAM量。 列: -- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name. -- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value. +- `metric` ([字符串](../../sql-reference/data-types/string.md)) — 指标名。 +- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。 **示例** @@ -35,7 +35,7 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 **另请参阅** -- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics. -- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred. -- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](../../operations/monitoring.md) — ClickHouse监控的基本概念。 +- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — 包含即时计算的指标。 +- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — 包含出现的事件的次数。 +- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — 包含`system.metrics` 和 `system.events`表中的指标的历史值。 diff --git a/docs/zh/operations/system-tables/clusters.md b/docs/zh/operations/system-tables/clusters.md index 4bc8d4210ff..e901ab039b6 100644 --- a/docs/zh/operations/system-tables/clusters.md +++ b/docs/zh/operations/system-tables/clusters.md @@ -3,22 +3,23 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。集群 {#system-clusters} +# system.clusters{#system-clusters} 包含有关配置文件中可用的集群及其中的服务器的信息。 列: -- `cluster` (String) — The cluster name. -- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. -- `shard_weight` (UInt32) — The relative weight of the shard when writing data. -- `replica_num` (UInt32) — The replica number in the shard, starting from 1. -- `host_name` (String) — The host name, as specified in the config. -- `host_address` (String) — The host IP address obtained from DNS. -- `port` (UInt16) — The port to use for connecting to the server. -- `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32)-此主机无法到达副本的次数。 -- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。 +- `cluster` (String) — 集群名。 +- `shard_num` (UInt32) — 集群中的分片数,从1开始。 +- `shard_weight` (UInt32) — 写数据时该分片的相对权重。 +- `replica_num` (UInt32) — 分片的副本数量,从1开始。 +- `host_name` (String) — 配置中指定的主机名。 +- `host_address` (String) — 从DNS获取的主机IP地址。 +- `port` (UInt16) — 连接到服务器的端口。 +- `user` (String) — 连接到服务器的用户名。 +- `errors_count` (UInt32) - 此主机无法访问副本的次数。 +- `slowdowns_count` (UInt32) - 与对冲请求建立连接时导致更改副本的减速次数。 +- `estimated_recovery_time` (UInt32) - 剩下的秒数,直到副本错误计数归零并被视为恢复正常。 请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。 @@ -27,3 +28,5 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - [表引擎分布式](../../engines/table-engines/special/distributed.md) - [distributed\_replica\_error\_cap设置](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed\_replica\_error\_half\_life设置](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/clusters) diff --git a/docs/zh/operations/system-tables/columns.md b/docs/zh/operations/system-tables/columns.md index 24296dc715c..b21be98c0dc 100644 --- a/docs/zh/operations/system-tables/columns.md +++ b/docs/zh/operations/system-tables/columns.md @@ -25,3 +25,5 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. - `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. - `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/columns) diff --git a/docs/zh/operations/system-tables/tables.md b/docs/zh/operations/system-tables/tables.md index a690e938a3a..0c3e913b9bb 100644 --- a/docs/zh/operations/system-tables/tables.md +++ b/docs/zh/operations/system-tables/tables.md @@ -5,15 +5,15 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 # 系统。表 {#system-tables} -包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`. +包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`。 此表包含以下列(列类型显示在括号中): -- `database` (String) — The name of the database the table is in. +- `database` (String) — 表所在的数据库表名。 -- `name` (String) — Table name. +- `name` (String) — 表名。 -- `engine` (String) — Table engine name (without parameters). +- `engine` (String) — 表引擎名 (不包含参数)。 - `is_temporary` (UInt8)-指示表是否是临时的标志。 @@ -23,11 +23,11 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - `metadata_modification_time` (DateTime)-表元数据的最新修改时间。 -- `dependencies_database` (数组(字符串))-数据库依赖关系. +- `dependencies_database` (数组(字符串))-数据库依赖关系。 - `dependencies_table` (数组(字符串))-表依赖关系 ([MaterializedView](../../engines/table-engines/special/materializedview.md) 基于当前表的表)。 -- `create_table_query` (String)-用于创建表的查询。 +- `create_table_query` (String)-用于创建表的SQL语句。 - `engine_full` (String)-表引擎的参数。 @@ -44,11 +44,15 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [分布](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则 `Null` (包括内衣 `Buffer` 表)。 +- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则行数为`Null`(包括底层 `Buffer` 表)。 -- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则 `Null` (**不** 包括任何底层存储)。 +- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则字节数为`Null` (即**不** 包括任何底层存储)。 - - If the table stores data on disk, returns used space on disk (i.e. compressed). - - 如果表在内存中存储数据,返回在内存中使用的近似字节数. + - 如果表将数据存在磁盘上,返回实际使用的磁盘空间(压缩后)。 + - 如果表在内存中存储数据,返回在内存中使用的近似字节数。 -该 `system.tables` 表中使用 `SHOW TABLES` 查询实现。 +- `lifetime_rows` (Nullbale(UInt64))-服务启动后插入的总行数(只针对`Buffer`表)。 + +`system.tables` 表被用于 `SHOW TABLES` 的查询实现中。 + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/tables) diff --git a/docs/zh/operations/system-tables/zookeeper.md b/docs/zh/operations/system-tables/zookeeper.md index b66e5262df3..79c9c041ca8 100644 --- a/docs/zh/operations/system-tables/zookeeper.md +++ b/docs/zh/operations/system-tables/zookeeper.md @@ -3,31 +3,31 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。动物园管理员 {#system-zookeeper} +# system.zookeeper {#system-zookeeper} -如果未配置ZooKeeper,则表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 -查询必须具有 ‘path’ WHERE子句中的平等条件。 这是ZooKeeper中您想要获取数据的孩子的路径。 +如果未配置ZooKeeper,则该表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 +查询必须具有 ‘path’ WHERE子句中的相等条件。 这是ZooKeeper中您想要获取数据的子路径。 -查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出对所有孩子的数据 `/clickhouse` 节点。 -要输出所有根节点的数据,write path= ‘/’. +查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出`/clickhouse`节点的对所有子路径的数据。 +要输出所有根节点的数据,使用path= ‘/’. 如果在指定的路径 ‘path’ 不存在,将引发异常。 列: -- `name` (String) — The name of the node. -- `path` (String) — The path to the node. -- `value` (String) — Node value. -- `dataLength` (Int32) — Size of the value. -- `numChildren` (Int32) — Number of descendants. -- `czxid` (Int64) — ID of the transaction that created the node. -- `mzxid` (Int64) — ID of the transaction that last changed the node. -- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. -- `ctime` (DateTime) — Time of node creation. -- `mtime` (DateTime) — Time of the last modification of the node. -- `version` (Int32) — Node version: the number of times the node was changed. -- `cversion` (Int32) — Number of added or removed descendants. -- `aversion` (Int32) — Number of changes to the ACL. -- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. +- `name` (String) — 节点的名字。 +- `path` (String) — 节点的路径。 +- `value` (String) — 节点的值。 +- `dataLength` (Int32) — 节点的值长度。 +- `numChildren` (Int32) — 子节点的个数。 +- `czxid` (Int64) — 创建该节点的事务ID。 +- `mzxid` (Int64) — 最后修改该节点的事务ID。 +- `pzxid` (Int64) — 最后删除或者增加子节点的事务ID。 +- `ctime` (DateTime) — 节点的创建时间。 +- `mtime` (DateTime) — 节点的最后修改时间。 +- `version` (Int32) — 节点版本:节点被修改的次数。 +- `cversion` (Int32) — 增加或删除子节点的个数。 +- `aversion` (Int32) — ACL的修改次数。 +- `ephemeralOwner` (Int64) — 针对临时节点,拥有该节点的事务ID。 示例: @@ -73,3 +73,4 @@ numChildren: 7 pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/zookeeper) From 9fc890abf579c783fa6131eed915c187598b395a Mon Sep 17 00:00:00 2001 From: nickzhwang Date: Thu, 24 Jun 2021 20:47:02 +0800 Subject: [PATCH 365/931] resolve conflicts --- .../system-tables/asynchronous_metric_log.md | 2 +- .../system-tables/asynchronous_metrics.md | 4 +- docs/en/operations/system-tables/clusters.md | 2 +- docs/en/operations/system-tables/columns.md | 2 +- .../operations/system-tables/contributors.md | 2 +- .../operations/system-tables/current-roles.md | 2 +- .../system-tables/data_type_families.md | 2 +- docs/en/operations/system-tables/databases.md | 2 +- .../system-tables/detached_parts.md | 2 +- .../operations/system-tables/dictionaries.md | 2 +- docs/en/operations/system-tables/disks.md | 6 +-- .../operations/system-tables/enabled-roles.md | 2 +- docs/en/operations/system-tables/events.md | 2 +- docs/en/operations/system-tables/functions.md | 6 +-- docs/en/operations/system-tables/grants.md | 2 +- .../system-tables/graphite_retentions.md | 2 +- docs/en/operations/system-tables/licenses.md | 2 +- .../system-tables/merge_tree_settings.md | 2 +- docs/en/operations/system-tables/merges.md | 2 +- .../en/operations/system-tables/metric_log.md | 2 +- docs/en/operations/system-tables/metrics.md | 2 +- docs/en/operations/system-tables/mutations.md | 2 +- docs/en/operations/system-tables/numbers.md | 2 +- .../en/operations/system-tables/numbers_mt.md | 2 +- docs/en/operations/system-tables/one.md | 2 +- docs/en/operations/system-tables/part_log.md | 2 +- docs/en/operations/system-tables/parts.md | 2 +- docs/en/operations/system-tables/processes.md | 3 +- docs/en/operations/system-tables/query_log.md | 2 +- .../system-tables/query_thread_log.md | 2 +- .../operations/system-tables/quota_limits.md | 2 + .../operations/system-tables/quota_usage.md | 2 + docs/en/operations/system-tables/quotas.md | 2 +- .../operations/system-tables/quotas_usage.md | 4 +- docs/en/operations/system-tables/replicas.md | 2 +- .../operations/system-tables/role-grants.md | 2 +- docs/en/operations/system-tables/roles.md | 2 +- .../operations/system-tables/row_policies.md | 2 +- docs/en/operations/system-tables/settings.md | 2 +- .../settings_profile_elements.md | 2 +- .../system-tables/settings_profiles.md | 2 +- .../system-tables/storage_policies.md | 2 +- .../operations/system-tables/table_engines.md | 2 +- docs/en/operations/system-tables/tables.md | 2 +- docs/en/operations/system-tables/text_log.md | 2 +- .../en/operations/system-tables/time_zones.md | 2 +- docs/en/operations/system-tables/trace_log.md | 1 - docs/en/operations/system-tables/users.md | 2 +- docs/en/operations/system-tables/zookeeper.md | 2 +- .../system-tables/asynchronous_metric_log.md | 2 +- .../system-tables/asynchronous_metrics.md | 15 ++++--- docs/zh/operations/system-tables/clusters.md | 25 ++++++------ docs/zh/operations/system-tables/columns.md | 2 + docs/zh/operations/system-tables/tables.md | 26 +++++++------ docs/zh/operations/system-tables/zookeeper.md | 39 ++++++++++--------- 55 files changed, 114 insertions(+), 105 deletions(-) diff --git a/docs/en/operations/system-tables/asynchronous_metric_log.md b/docs/en/operations/system-tables/asynchronous_metric_log.md index 5dcfca5fbda..b0480dc256a 100644 --- a/docs/en/operations/system-tables/asynchronous_metric_log.md +++ b/docs/en/operations/system-tables/asynchronous_metric_log.md @@ -36,4 +36,4 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10 - [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background. - [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metric_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index b27434793c7..fc801aa1c80 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -33,6 +33,6 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. - [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics. - [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred. -- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` and `system.events`. - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/asynchronous_metrics) \ No newline at end of file + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics) diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index 096eca12e7d..16cf183de53 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -68,4 +68,4 @@ estimated_recovery_time: 0 - [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/clusters) diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index 9160dca9a1a..471a1af1fe0 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -69,4 +69,4 @@ is_in_sampling_key: 0 compression_codec: ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/columns) diff --git a/docs/en/operations/system-tables/contributors.md b/docs/en/operations/system-tables/contributors.md index 37d01ef6204..a718c403c11 100644 --- a/docs/en/operations/system-tables/contributors.md +++ b/docs/en/operations/system-tables/contributors.md @@ -38,4 +38,4 @@ SELECT * FROM system.contributors WHERE name = 'Olga Khvostikova' │ Olga Khvostikova │ └──────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/contributors) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/contributors) diff --git a/docs/en/operations/system-tables/current-roles.md b/docs/en/operations/system-tables/current-roles.md index f10dbe69918..56dbb602637 100644 --- a/docs/en/operations/system-tables/current-roles.md +++ b/docs/en/operations/system-tables/current-roles.md @@ -8,4 +8,4 @@ Columns: - `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a role with `ADMIN OPTION` privilege. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `current_role` is a default role. - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/current-roles) + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/current-roles) diff --git a/docs/en/operations/system-tables/data_type_families.md b/docs/en/operations/system-tables/data_type_families.md index 4e439f13aa5..fdce9c33b37 100644 --- a/docs/en/operations/system-tables/data_type_families.md +++ b/docs/en/operations/system-tables/data_type_families.md @@ -33,4 +33,4 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String' - [Syntax](../../sql-reference/syntax.md) — Information about supported syntax. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/data_type_families) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/data_type_families) diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index 8ef5551d9b0..2c78fd25c2b 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -35,4 +35,4 @@ SELECT * FROM system.databases └────────────────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/databases) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/databases) diff --git a/docs/en/operations/system-tables/detached_parts.md b/docs/en/operations/system-tables/detached_parts.md index ade89bd40c4..a5748128426 100644 --- a/docs/en/operations/system-tables/detached_parts.md +++ b/docs/en/operations/system-tables/detached_parts.md @@ -8,4 +8,4 @@ For the description of other columns, see [system.parts](../../operations/system If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/partition.md#alter_drop-detached). -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/detached_parts) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/detached_parts) diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index 2bc1be51f19..a34e893599c 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -61,4 +61,4 @@ SELECT * FROM system.dictionaries └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/dictionaries) diff --git a/docs/en/operations/system-tables/disks.md b/docs/en/operations/system-tables/disks.md index e9d324580d8..833a0b3b16b 100644 --- a/docs/en/operations/system-tables/disks.md +++ b/docs/en/operations/system-tables/disks.md @@ -10,9 +10,6 @@ Columns: - `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Disk volume in bytes. - `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/disks) - - **Example** ```sql @@ -27,5 +24,4 @@ Columns: 1 rows in set. Elapsed: 0.001 sec. ``` - - +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) diff --git a/docs/en/operations/system-tables/enabled-roles.md b/docs/en/operations/system-tables/enabled-roles.md index 27875fcf984..c03129b32dd 100644 --- a/docs/en/operations/system-tables/enabled-roles.md +++ b/docs/en/operations/system-tables/enabled-roles.md @@ -9,4 +9,4 @@ Columns: - `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a current role of a current user. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `enabled_role` is a default role. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/enabled-roles) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/enabled-roles) diff --git a/docs/en/operations/system-tables/events.md b/docs/en/operations/system-tables/events.md index b4ced6e6bf6..2fcb5d8edec 100644 --- a/docs/en/operations/system-tables/events.md +++ b/docs/en/operations/system-tables/events.md @@ -31,4 +31,4 @@ SELECT * FROM system.events LIMIT 5 - [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/events) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/events) diff --git a/docs/en/operations/system-tables/functions.md b/docs/en/operations/system-tables/functions.md index fbcd4b7b723..888e768fc93 100644 --- a/docs/en/operations/system-tables/functions.md +++ b/docs/en/operations/system-tables/functions.md @@ -7,8 +7,6 @@ Columns: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/functions) - **Example** ```sql @@ -30,4 +28,6 @@ Columns: └──────────────────────────┴──────────────┴──────────────────┴──────────┘ 10 rows in set. Elapsed: 0.002 sec. -``` \ No newline at end of file +``` + +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index fb2a91ab30a..927fa4f3227 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -21,4 +21,4 @@ Columns: - `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax). -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/grants) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/grants) diff --git a/docs/en/operations/system-tables/graphite_retentions.md b/docs/en/operations/system-tables/graphite_retentions.md index 7ae5e0e36a8..0d56242dc95 100644 --- a/docs/en/operations/system-tables/graphite_retentions.md +++ b/docs/en/operations/system-tables/graphite_retentions.md @@ -14,4 +14,4 @@ Columns: - `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. - `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/graphite_retentions) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/graphite_retentions) diff --git a/docs/en/operations/system-tables/licenses.md b/docs/en/operations/system-tables/licenses.md index c95e4e8b9b4..a9cada507c6 100644 --- a/docs/en/operations/system-tables/licenses.md +++ b/docs/en/operations/system-tables/licenses.md @@ -36,4 +36,4 @@ SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15 ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/licenses) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/licenses) diff --git a/docs/en/operations/system-tables/merge_tree_settings.md b/docs/en/operations/system-tables/merge_tree_settings.md index c2c5703f869..309c1cbc9d1 100644 --- a/docs/en/operations/system-tables/merge_tree_settings.md +++ b/docs/en/operations/system-tables/merge_tree_settings.md @@ -51,4 +51,4 @@ type: SettingUInt64 4 rows in set. Elapsed: 0.001 sec. ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merge_tree_settings) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) diff --git a/docs/en/operations/system-tables/merges.md b/docs/en/operations/system-tables/merges.md index 3e712e2962c..c7bdaee42e1 100644 --- a/docs/en/operations/system-tables/merges.md +++ b/docs/en/operations/system-tables/merges.md @@ -22,4 +22,4 @@ Columns: - `merge_type` — The type of current merge. Empty if it's an mutation. - `merge_algorithm` — The algorithm used in current merge. Empty if it's an mutation. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/merges) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merges) diff --git a/docs/en/operations/system-tables/metric_log.md b/docs/en/operations/system-tables/metric_log.md index 1f72c9a7358..ab149703309 100644 --- a/docs/en/operations/system-tables/metric_log.md +++ b/docs/en/operations/system-tables/metric_log.md @@ -48,4 +48,4 @@ CurrentMetric_DistributedFilesToInsert: 0 - [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metric_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metric_log) diff --git a/docs/en/operations/system-tables/metrics.md b/docs/en/operations/system-tables/metrics.md index decae8ea7fb..4afab40764b 100644 --- a/docs/en/operations/system-tables/metrics.md +++ b/docs/en/operations/system-tables/metrics.md @@ -38,4 +38,4 @@ SELECT * FROM system.metrics LIMIT 10 - [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/metrics) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/metrics) diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index e5ea7eab457..24fa559197c 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -45,4 +45,4 @@ If there were problems with mutating some data parts, the following columns cont - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine - [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/mutations) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/mutations) diff --git a/docs/en/operations/system-tables/numbers.md b/docs/en/operations/system-tables/numbers.md index d1737c9abbb..bf948d9dd5b 100644 --- a/docs/en/operations/system-tables/numbers.md +++ b/docs/en/operations/system-tables/numbers.md @@ -29,4 +29,4 @@ Reads from this table are not parallelized. 10 rows in set. Elapsed: 0.001 sec. ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) diff --git a/docs/en/operations/system-tables/numbers_mt.md b/docs/en/operations/system-tables/numbers_mt.md index b40dc9a2d6f..d7df1bc1e0e 100644 --- a/docs/en/operations/system-tables/numbers_mt.md +++ b/docs/en/operations/system-tables/numbers_mt.md @@ -27,4 +27,4 @@ Used for tests. 10 rows in set. Elapsed: 0.001 sec. ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/numbers_mt) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) diff --git a/docs/en/operations/system-tables/one.md b/docs/en/operations/system-tables/one.md index 51316dfbc44..10b2a1757d0 100644 --- a/docs/en/operations/system-tables/one.md +++ b/docs/en/operations/system-tables/one.md @@ -20,4 +20,4 @@ This is similar to the `DUAL` table found in other DBMSs. 1 rows in set. Elapsed: 0.001 sec. ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/one) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) diff --git a/docs/en/operations/system-tables/part_log.md b/docs/en/operations/system-tables/part_log.md index 3f9110349dd..b815d2366bb 100644 --- a/docs/en/operations/system-tables/part_log.md +++ b/docs/en/operations/system-tables/part_log.md @@ -66,4 +66,4 @@ error: 0 exception: ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/part_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index 5a4715a4513..b9b5aa09b64 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -155,4 +155,4 @@ move_ttl_info.max: [] - [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md) - [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/parts) diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index 9ef3c648006..9401be79e85 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -14,7 +14,6 @@ Columns: - `query` (String) – The query text. For `INSERT`, it does not include the data to insert. - `query_id` (String) – Query ID, if defined. - ```sql :) SELECT * FROM system.processes LIMIT 10 FORMAT Vertical; ``` @@ -61,4 +60,4 @@ Settings.Values: ['0','in_order','1','10000000000'] 1 rows in set. Elapsed: 0.002 sec. ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/processes) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/processes) diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 85f0679fe37..a7d5d9b61f6 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -177,4 +177,4 @@ used_table_functions: ['numbers'] - [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_log) diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 296a33259b3..7b098e21b80 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -115,4 +115,4 @@ ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47, - [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — Description of the `query_log` system table which contains common information about queries execution. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/query_thread_log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/query_thread_log) diff --git a/docs/en/operations/system-tables/quota_limits.md b/docs/en/operations/system-tables/quota_limits.md index 11616990206..0088b086e8c 100644 --- a/docs/en/operations/system-tables/quota_limits.md +++ b/docs/en/operations/system-tables/quota_limits.md @@ -17,3 +17,5 @@ Columns: - `max_read_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of rows read from all tables and table functions participated in queries. - `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum number of bytes read from all tables and table functions participated in queries. - `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Maximum of the query execution time, in seconds. + +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_limits) diff --git a/docs/en/operations/system-tables/quota_usage.md b/docs/en/operations/system-tables/quota_usage.md index 89fdfe70069..2f35b6b7dae 100644 --- a/docs/en/operations/system-tables/quota_usage.md +++ b/docs/en/operations/system-tables/quota_usage.md @@ -28,3 +28,5 @@ Columns: ## See Also {#see-also} - [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) + +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quota_usage) diff --git a/docs/en/operations/system-tables/quotas.md b/docs/en/operations/system-tables/quotas.md index 3e797c9bdc6..6acc349a54f 100644 --- a/docs/en/operations/system-tables/quotas.md +++ b/docs/en/operations/system-tables/quotas.md @@ -24,5 +24,5 @@ Columns: - [SHOW QUOTAS](../../sql-reference/statements/show.md#show-quotas-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/quotas) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas) diff --git a/docs/en/operations/system-tables/quotas_usage.md b/docs/en/operations/system-tables/quotas_usage.md index 04cf91cb990..6ba88cb935a 100644 --- a/docs/en/operations/system-tables/quotas_usage.md +++ b/docs/en/operations/system-tables/quotas_usage.md @@ -30,4 +30,6 @@ Columns: ## See Also {#see-also} -- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) \ No newline at end of file +- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) + +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/quotas_usage) diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index 63a2141e399..5a6ec54723b 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -120,5 +120,5 @@ WHERE If this query does not return anything, it means that everything is fine. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replicas) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/replicas) diff --git a/docs/en/operations/system-tables/role-grants.md b/docs/en/operations/system-tables/role-grants.md index 5eb18b0dca7..d90bc1f77be 100644 --- a/docs/en/operations/system-tables/role-grants.md +++ b/docs/en/operations/system-tables/role-grants.md @@ -18,4 +18,4 @@ Columns: - 1 — The role has `ADMIN OPTION` privilege. - 0 — The role without `ADMIN OPTION` privilege. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/role-grants) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) diff --git a/docs/en/operations/system-tables/roles.md b/docs/en/operations/system-tables/roles.md index 4ab5102dfc8..e68d5ed290a 100644 --- a/docs/en/operations/system-tables/roles.md +++ b/docs/en/operations/system-tables/roles.md @@ -12,4 +12,4 @@ Columns: - [SHOW ROLES](../../sql-reference/statements/show.md#show-roles-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/roles) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/roles) diff --git a/docs/en/operations/system-tables/row_policies.md b/docs/en/operations/system-tables/row_policies.md index 97474d1b3ee..767270d64ae 100644 --- a/docs/en/operations/system-tables/row_policies.md +++ b/docs/en/operations/system-tables/row_policies.md @@ -31,4 +31,4 @@ Columns: - [SHOW POLICIES](../../sql-reference/statements/show.md#show-policies-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/row_policies) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/row_policies) diff --git a/docs/en/operations/system-tables/settings.md b/docs/en/operations/system-tables/settings.md index 7034fe1204f..cfd9f43655a 100644 --- a/docs/en/operations/system-tables/settings.md +++ b/docs/en/operations/system-tables/settings.md @@ -50,4 +50,4 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing' - [Constraints on Settings](../../operations/settings/constraints-on-settings.md) - [SHOW SETTINGS](../../sql-reference/statements/show.md#show-settings) statement -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings) diff --git a/docs/en/operations/system-tables/settings_profile_elements.md b/docs/en/operations/system-tables/settings_profile_elements.md index d0f2c3c4527..3c8c728e645 100644 --- a/docs/en/operations/system-tables/settings_profile_elements.md +++ b/docs/en/operations/system-tables/settings_profile_elements.md @@ -27,4 +27,4 @@ Columns: - `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — A parent profile for this setting profile. `NULL` if not set. Setting profile will inherit all the settings' values and constraints (`min`, `max`, `readonly`) from its parent profiles. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profile_elements) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profile_elements) diff --git a/docs/en/operations/system-tables/settings_profiles.md b/docs/en/operations/system-tables/settings_profiles.md index a06b26b9cb6..80dc5172f4e 100644 --- a/docs/en/operations/system-tables/settings_profiles.md +++ b/docs/en/operations/system-tables/settings_profiles.md @@ -21,4 +21,4 @@ Columns: - [SHOW PROFILES](../../sql-reference/statements/show.md#show-profiles-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/settings_profiles) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/settings_profiles) diff --git a/docs/en/operations/system-tables/storage_policies.md b/docs/en/operations/system-tables/storage_policies.md index 5adab1cb2aa..4b07b593926 100644 --- a/docs/en/operations/system-tables/storage_policies.md +++ b/docs/en/operations/system-tables/storage_policies.md @@ -14,4 +14,4 @@ Columns: If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/storage_policies) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/storage_policies) diff --git a/docs/en/operations/system-tables/table_engines.md b/docs/en/operations/system-tables/table_engines.md index 30122cb133e..45ff6f1ac19 100644 --- a/docs/en/operations/system-tables/table_engines.md +++ b/docs/en/operations/system-tables/table_engines.md @@ -35,4 +35,4 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') - Kafka [settings](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table) - Join [settings](../../engines/table-engines/special/join.md#join-limitations-and-settings) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/table_engines) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/table_engines) diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index 480db3087f6..4d7b20be311 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -117,4 +117,4 @@ lifetime_bytes: ᴺᵁᴸᴸ comment: ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/tables) diff --git a/docs/en/operations/system-tables/text_log.md b/docs/en/operations/system-tables/text_log.md index f5f53c95653..ad95e91f0d2 100644 --- a/docs/en/operations/system-tables/text_log.md +++ b/docs/en/operations/system-tables/text_log.md @@ -50,4 +50,4 @@ source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void source_line: 45 ``` - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/text_log) \ No newline at end of file + [Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) diff --git a/docs/en/operations/system-tables/time_zones.md b/docs/en/operations/system-tables/time_zones.md index 1b84ae7fe37..fa467124884 100644 --- a/docs/en/operations/system-tables/time_zones.md +++ b/docs/en/operations/system-tables/time_zones.md @@ -27,4 +27,4 @@ SELECT * FROM system.time_zones LIMIT 10 └────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/time_zones) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/time_zones) diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index e4c01a65d9d..5de597a0a51 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -55,4 +55,3 @@ size: 5244400 ``` [Original article](https://clickhouse.tech/docs/en/operations/system-tables/trace_log) - diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index 2227816aff3..11fdeb1e9ae 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -31,4 +31,4 @@ Columns: - [SHOW USERS](../../sql-reference/statements/show.md#show-users-statement) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/users) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/users) diff --git a/docs/en/operations/system-tables/zookeeper.md b/docs/en/operations/system-tables/zookeeper.md index 3b8db14934e..52d1c686e52 100644 --- a/docs/en/operations/system-tables/zookeeper.md +++ b/docs/en/operations/system-tables/zookeeper.md @@ -72,4 +72,4 @@ numChildren: 7 pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/zookeeper) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/zookeeper) diff --git a/docs/zh/operations/system-tables/asynchronous_metric_log.md b/docs/zh/operations/system-tables/asynchronous_metric_log.md index 9fbe15b8507..ff7593768d3 100644 --- a/docs/zh/operations/system-tables/asynchronous_metric_log.md +++ b/docs/zh/operations/system-tables/asynchronous_metric_log.md @@ -3,6 +3,6 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -## 系统。asynchronous_metric_log {#system-tables-async-log} +## system.asynchronous_metric_log {#system-tables-async-log} 包含以下内容的历史值 `system.asynchronous_log` (见 [系统。asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics)) diff --git a/docs/zh/operations/system-tables/asynchronous_metrics.md b/docs/zh/operations/system-tables/asynchronous_metrics.md index 805477c9f47..5a302f6da7b 100644 --- a/docs/zh/operations/system-tables/asynchronous_metrics.md +++ b/docs/zh/operations/system-tables/asynchronous_metrics.md @@ -3,14 +3,14 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。asynchronous_metrics {#system_tables-asynchronous_metrics} +# system.asynchronous_metrics {#system_tables-asynchronous_metrics} 包含在后台定期计算的指标。 例如,在使用的RAM量。 列: -- `metric` ([字符串](../../sql-reference/data-types/string.md)) — Metric name. -- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value. +- `metric` ([字符串](../../sql-reference/data-types/string.md)) — 指标名。 +- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。 **示例** @@ -34,8 +34,7 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 ``` **另请参阅** - -- [监测](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. -- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics. -- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that have occurred. -- [系统。metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](../../operations/monitoring.md) — ClickHouse监控的基本概念。 +- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — 包含即时计算的指标。 +- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — 包含出现的事件的次数。 +- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — 包含`system.metrics` 和 `system.events`表中的指标的历史值。 diff --git a/docs/zh/operations/system-tables/clusters.md b/docs/zh/operations/system-tables/clusters.md index 1e5935c276e..f76288f4bd8 100644 --- a/docs/zh/operations/system-tables/clusters.md +++ b/docs/zh/operations/system-tables/clusters.md @@ -3,22 +3,23 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。集群 {#system-clusters} +# system.clusters{#system-clusters} 包含有关配置文件中可用的集群及其中的服务器的信息。 列: -- `cluster` (String) — The cluster name. -- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. -- `shard_weight` (UInt32) — The relative weight of the shard when writing data. -- `replica_num` (UInt32) — The replica number in the shard, starting from 1. -- `host_name` (String) — The host name, as specified in the config. -- `host_address` (String) — The host IP address obtained from DNS. -- `port` (UInt16) — The port to use for connecting to the server. -- `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32)-此主机无法到达副本的次数。 -- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。 +- `cluster` (String) — 集群名。 +- `shard_num` (UInt32) — 集群中的分片数,从1开始。 +- `shard_weight` (UInt32) — 写数据时该分片的相对权重。 +- `replica_num` (UInt32) — 分片的副本数量,从1开始。 +- `host_name` (String) — 配置中指定的主机名。 +- `host_address` (String) — 从DNS获取的主机IP地址。 +- `port` (UInt16) — 连接到服务器的端口。 +- `user` (String) — 连接到服务器的用户名。 +- `errors_count` (UInt32) - 此主机无法访问副本的次数。 +- `slowdowns_count` (UInt32) - 与对冲请求建立连接时导致更改副本的减速次数。 +- `estimated_recovery_time` (UInt32) - 剩下的秒数,直到副本错误计数归零并被视为恢复正常。 请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。 @@ -27,3 +28,5 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - [表引擎分布式](../../engines/table-engines/special/distributed.md) - [distributed_replica_error_cap设置](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed_replica_error_half_life设置](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/clusters) diff --git a/docs/zh/operations/system-tables/columns.md b/docs/zh/operations/system-tables/columns.md index 24296dc715c..b21be98c0dc 100644 --- a/docs/zh/operations/system-tables/columns.md +++ b/docs/zh/operations/system-tables/columns.md @@ -25,3 +25,5 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. - `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. - `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/columns) diff --git a/docs/zh/operations/system-tables/tables.md b/docs/zh/operations/system-tables/tables.md index a690e938a3a..0c3e913b9bb 100644 --- a/docs/zh/operations/system-tables/tables.md +++ b/docs/zh/operations/system-tables/tables.md @@ -5,15 +5,15 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 # 系统。表 {#system-tables} -包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`. +包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`。 此表包含以下列(列类型显示在括号中): -- `database` (String) — The name of the database the table is in. +- `database` (String) — 表所在的数据库表名。 -- `name` (String) — Table name. +- `name` (String) — 表名。 -- `engine` (String) — Table engine name (without parameters). +- `engine` (String) — 表引擎名 (不包含参数)。 - `is_temporary` (UInt8)-指示表是否是临时的标志。 @@ -23,11 +23,11 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - `metadata_modification_time` (DateTime)-表元数据的最新修改时间。 -- `dependencies_database` (数组(字符串))-数据库依赖关系. +- `dependencies_database` (数组(字符串))-数据库依赖关系。 - `dependencies_table` (数组(字符串))-表依赖关系 ([MaterializedView](../../engines/table-engines/special/materializedview.md) 基于当前表的表)。 -- `create_table_query` (String)-用于创建表的查询。 +- `create_table_query` (String)-用于创建表的SQL语句。 - `engine_full` (String)-表引擎的参数。 @@ -44,11 +44,15 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [分布](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则 `Null` (包括内衣 `Buffer` 表)。 +- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则行数为`Null`(包括底层 `Buffer` 表)。 -- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则 `Null` (**不** 包括任何底层存储)。 +- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则字节数为`Null` (即**不** 包括任何底层存储)。 - - If the table stores data on disk, returns used space on disk (i.e. compressed). - - 如果表在内存中存储数据,返回在内存中使用的近似字节数. + - 如果表将数据存在磁盘上,返回实际使用的磁盘空间(压缩后)。 + - 如果表在内存中存储数据,返回在内存中使用的近似字节数。 -该 `system.tables` 表中使用 `SHOW TABLES` 查询实现。 +- `lifetime_rows` (Nullbale(UInt64))-服务启动后插入的总行数(只针对`Buffer`表)。 + +`system.tables` 表被用于 `SHOW TABLES` 的查询实现中。 + +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/tables) diff --git a/docs/zh/operations/system-tables/zookeeper.md b/docs/zh/operations/system-tables/zookeeper.md index f7e816ccee6..ca767fba7aa 100644 --- a/docs/zh/operations/system-tables/zookeeper.md +++ b/docs/zh/operations/system-tables/zookeeper.md @@ -3,13 +3,13 @@ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- -# 系统。动物园管理员 {#system-zookeeper} +# system.zookeeper {#system-zookeeper} -如果未配置ZooKeeper,则表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 -查询必须具有 ‘path’ WHERE子句中的相等条件或者在某个集合中的条件。 这是ZooKeeper中您想要获取数据的孩子的路径。 +如果未配置ZooKeeper,则该表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 +查询必须具有 ‘path’ WHERE子句中的相等条件。 这是ZooKeeper中您想要获取数据的子路径。 -查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出对所有孩子的数据 `/clickhouse` 节点。 -要输出所有根节点的数据,write path= ‘/’. +查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出`/clickhouse`节点的对所有子路径的数据。 +要输出所有根节点的数据,使用path= ‘/’. 如果在指定的路径 ‘path’ 不存在,将引发异常。 查询`SELECT * FROM system.zookeeper WHERE path IN ('/', '/clickhouse')` 输出`/` 和 `/clickhouse`节点上所有子节点的数据。 @@ -18,20 +18,20 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 列: -- `name` (String) — The name of the node. -- `path` (String) — The path to the node. -- `value` (String) — Node value. -- `dataLength` (Int32) — Size of the value. -- `numChildren` (Int32) — Number of descendants. -- `czxid` (Int64) — ID of the transaction that created the node. -- `mzxid` (Int64) — ID of the transaction that last changed the node. -- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. -- `ctime` (DateTime) — Time of node creation. -- `mtime` (DateTime) — Time of the last modification of the node. -- `version` (Int32) — Node version: the number of times the node was changed. -- `cversion` (Int32) — Number of added or removed descendants. -- `aversion` (Int32) — Number of changes to the ACL. -- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. +- `name` (String) — 节点的名字。 +- `path` (String) — 节点的路径。 +- `value` (String) — 节点的值。 +- `dataLength` (Int32) — 节点的值长度。 +- `numChildren` (Int32) — 子节点的个数。 +- `czxid` (Int64) — 创建该节点的事务ID。 +- `mzxid` (Int64) — 最后修改该节点的事务ID。 +- `pzxid` (Int64) — 最后删除或者增加子节点的事务ID。 +- `ctime` (DateTime) — 节点的创建时间。 +- `mtime` (DateTime) — 节点的最后修改时间。 +- `version` (Int32) — 节点版本:节点被修改的次数。 +- `cversion` (Int32) — 增加或删除子节点的个数。 +- `aversion` (Int32) — ACL的修改次数。 +- `ephemeralOwner` (Int64) — 针对临时节点,拥有该节点的事务ID。 示例: @@ -77,3 +77,4 @@ numChildren: 7 pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` +[原文](https://clickhouse.tech/docs/zh/operations/system-tables/zookeeper) From 521ec3aa0c87eaae673cecb14479977ef795ce38 Mon Sep 17 00:00:00 2001 From: Mike Kot Date: Thu, 24 Jun 2021 16:52:08 +0300 Subject: [PATCH 366/931] Changed CH css theme for code highlighting --- docs/_includes/cmake_in_clickhouse_header.md | 10 +- docs/tools/README.md | 7 + docs/tools/build.py | 1 + website/css/highlight.css | 158 ++++++++----------- 4 files changed, 77 insertions(+), 99 deletions(-) diff --git a/docs/_includes/cmake_in_clickhouse_header.md b/docs/_includes/cmake_in_clickhouse_header.md index 7dfda35e34a..db9138fbbb7 100644 --- a/docs/_includes/cmake_in_clickhouse_header.md +++ b/docs/_includes/cmake_in_clickhouse_header.md @@ -2,18 +2,16 @@ ## TL; DR How to make ClickHouse compile and link faster? -Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`. +Minimal ClickHouse build example: -```cmake +```bash cmake .. \ - -DCMAKE_C_COMPILER=/bin/clang-10 \ - -DCMAKE_CXX_COMPILER=/bin/clang++-10 \ + -DCMAKE_C_COMPILER=$(which clang-11) \ + -DCMAKE_CXX_COMPILER=$(which clang++-11) \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_CLICKHOUSE_ALL=OFF \ -DENABLE_CLICKHOUSE_SERVER=ON \ -DENABLE_CLICKHOUSE_CLIENT=ON \ - -DUSE_STATIC_LIBRARIES=OFF \ - -DSPLIT_SHARED_LIBRARIES=ON \ -DENABLE_LIBRARIES=OFF \ -DUSE_UNWIND=ON \ -DENABLE_UTILS=OFF \ diff --git a/docs/tools/README.md b/docs/tools/README.md index 4340561fa57..61a2e9a04f2 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -47,6 +47,13 @@ When all prerequisites are installed, running `build.py` without args (there are The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888. +## How to change code highlighting? {#how-to-change-code-hl} + +ClickHouse does not use mkdocs `highlightjs` feature. It uses modified pygments styles instead. +If you want to change code highlighting, edit the `website/css/highlight.css` file. +Currently, an [eighties](https://github.com/idleberg/base16-pygments/blob/master/css/base16-eighties.dark.css) theme +is used. + ## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes} At the moment there’s no easy way to do just that, but you can consider: diff --git a/docs/tools/build.py b/docs/tools/build.py index 61112d5a4f5..39e91f59cc4 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -87,6 +87,7 @@ def build_for_lang(lang, args): website_url = 'https://clickhouse.tech' site_name = site_names.get(lang, site_names['en']) % '' site_name = site_name.replace(' ', ' ') + raw_config = dict( site_name=site_name, site_url=f'{website_url}/docs/{lang}/', diff --git a/website/css/highlight.css b/website/css/highlight.css index 7cc8a4865dd..8cb44bf0d18 100644 --- a/website/css/highlight.css +++ b/website/css/highlight.css @@ -1,99 +1,71 @@ /* - - Name: Base16 Paraiso Light - Author: Jan T. Sott - + Name: Base16 Eighties Dark + Author: Chris Kempson (http://chriskempson.com) Pygments template by Jan T. Sott (https://github.com/idleberg) Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder) - - Edited for ClickHouse to improve legibility. */ -.syntax .hll { background-color: #b9b6b0 } -.syntax { background: #f8f9fa; color: #2f1e2e } -.syntax .c { color: #8d8687 } /* Comment */ -.syntax .err {} /* Error */ -.syntax .k { color: #000000; font-weight: bold } /* Keyword */ -.syntax .l { color: #0088ff } /* Literal */ -.syntax .n { color: #2f1e2e } /* Name */ -.syntax .o { color: #880000 } /* Operator */ -.syntax .p { color: #2f1e2e } /* Punctuation */ -.syntax .cm { color: #8d8687 } /* Comment.Multiline */ -.syntax .cp { color: #8d8687 } /* Comment.Preproc */ -.syntax .c1 { color: #8d8687 } /* Comment.Single */ -.syntax .cs { color: #8d8687 } /* Comment.Special */ -.syntax .gd { color: #ef6155 } /* Generic.Deleted */ +.syntax .hll { background-color: #515151 } +.syntax { background: #2d2d2d; color: #f2f0ec } +.syntax .c { color: #747369 } /* Comment */ +.syntax .err { color: #f2777a } /* Error */ +.syntax .k { color: #cc99cc } /* Keyword */ +.syntax .l { color: #f99157 } /* Literal */ +.syntax .n { color: #f2f0ec } /* Name */ +.syntax .o { color: #66cccc } /* Operator */ +.syntax .p { color: #f2f0ec } /* Punctuation */ +.syntax .cm { color: #747369 } /* Comment.Multiline */ +.syntax .cp { color: #747369 } /* Comment.Preproc */ +.syntax .c1 { color: #747369 } /* Comment.Single */ +.syntax .cs { color: #747369 } /* Comment.Special */ +.syntax .gd { color: #f2777a } /* Generic.Deleted */ .syntax .ge { font-style: italic } /* Generic.Emph */ -.syntax .gh { color: #2f1e2e; font-weight: bold } /* Generic.Heading */ -.syntax .gi { color: #48b685 } /* Generic.Inserted */ -.syntax .gp { color: #8d8687; font-weight: bold } /* Generic.Prompt */ +.syntax .gh { color: #f2f0ec; font-weight: bold } /* Generic.Heading */ +.syntax .gi { color: #99cc99 } /* Generic.Inserted */ +.syntax .gp { color: #747369; font-weight: bold } /* Generic.Prompt */ .syntax .gs { font-weight: bold } /* Generic.Strong */ -.syntax .gu { color: #5bc4bf; font-weight: bold } /* Generic.Subheading */ -.syntax .kc { color: #815ba4 } /* Keyword.Constant */ -.syntax .kd { color: #815ba4 } /* Keyword.Declaration */ -.syntax .kn { color: #5bc4bf } /* Keyword.Namespace */ -.syntax .kp { color: #815ba4 } /* Keyword.Pseudo */ -.syntax .kr { color: #815ba4 } /* Keyword.Reserved */ -.syntax .kt { color: #fec418 } /* Keyword.Type */ -.syntax .ld { color: #48b685 } /* Literal.Date */ -.syntax .m { color: #0088ff } /* Literal.Number */ -.syntax .s { color: #48b685 } /* Literal.String */ -.syntax .na { color: #06b6ef } /* Name.Attribute */ -.syntax .nb { color: #2f1e2e } /* Name.Builtin */ -.syntax .nc { color: #fec418 } /* Name.Class */ -.syntax .no { color: #ef6155 } /* Name.Constant */ -.syntax .nd { color: #5bc4bf } /* Name.Decorator */ -.syntax .ni { color: #2f1e2e } /* Name.Entity */ -.syntax .ne { color: #ef6155 } /* Name.Exception */ -.syntax .nf { color: #06b6ef } /* Name.Function */ -.syntax .nl { color: #2f1e2e } /* Name.Label */ -.syntax .nn { color: #fec418 } /* Name.Namespace */ -.syntax .nx { color: #06b6ef } /* Name.Other */ -.syntax .py { color: #2f1e2e } /* Name.Property */ -.syntax .nt { color: #5bc4bf } /* Name.Tag */ -.syntax .nv { color: #ef6155 } /* Name.Variable */ -.syntax .ow { color: #5bc4bf } /* Operator.Word */ -.syntax .w { color: #2f1e2e } /* Text.Whitespace */ -.syntax .mf { color: #0088ff } /* Literal.Number.Float */ -.syntax .mh { color: #0088ff } /* Literal.Number.Hex */ -.syntax .mi { color: #0088ff } /* Literal.Number.Integer */ -.syntax .mo { color: #0088ff } /* Literal.Number.Oct */ -.syntax .sb { color: #48b685 } /* Literal.String.Backtick */ -.syntax .sc { color: #2f1e2e } /* Literal.String.Char */ -.syntax .sd { color: #8d8687 } /* Literal.String.Doc */ -.syntax .s2 { color: #48b685 } /* Literal.String.Double */ -.syntax .se { color: #0088ff } /* Literal.String.Escape */ -.syntax .sh { color: #48b685 } /* Literal.String.Heredoc */ -.syntax .si { color: #0088ff } /* Literal.String.Interpol */ -.syntax .sx { color: #48b685 } /* Literal.String.Other */ -.syntax .sr { color: #48b685 } /* Literal.String.Regex */ -.syntax .s1 { color: #008800 } /* Literal.String.Single */ -.syntax .ss { color: #48b685 } /* Literal.String.Symbol */ -.syntax .bp { color: #2f1e2e } /* Name.Builtin.Pseudo */ -.syntax .vc { color: #ef6155 } /* Name.Variable.Class */ -.syntax .vg { color: #ef6155 } /* Name.Variable.Global */ -.syntax .vi { color: #ef6155 } /* Name.Variable.Instance */ -.syntax .il { color: #0088ff } /* Literal.Number.Integer.Long */ - -@media (prefers-color-scheme: dark) { -.syntax .k { color: #c78cff } /* Keyword */ -.syntax .gi { color: #64ffbb } /* Generic.Inserted */ -.syntax .ld { color: #64ffbb } /* Literal.Date */ -.syntax .s { color: #64ffbb } /* Literal.String */ -.syntax .sb { color: #64ffbb } /* Literal.String.Backtick */ -.syntax .s2 { color: #64ffbb } /* Literal.String.Double */ -.syntax .sh { color: #64ffbb } /* Literal.String.Heredoc */ -.syntax .sx { color: #64ffbb } /* Literal.String.Other */ -.syntax .sr { color: #64ffbb } /* Literal.String.Regex */ -.syntax .s1 { color: #64ffbb } /* Literal.String.Single */ -.syntax .ss { color: #64ffbb } /* Literal.String.Symbol */ -.syntax .c { color: #64ffbb } /* Comment */ -.syntax .n { color: #f8f9fa } /* Name */ -.syntax .p { color: #f8f9fa } /* Punctuation */ -.syntax .gh { color: #f8f9fa; font-weight: bold } /* Generic.Heading */ -.syntax .nb { color: #f8f9fa } /* Name.Builtin */ -.syntax .ni { color: #f8f9fa } /* Name.Entity */ -.syntax .nl { color: #f8f9fa } /* Name.Label */ -.syntax .py { color: #f8f9fa } /* Name.Property */ -.syntax .w { color: #f8f9fa } /* Text.Whitespace */ -.syntax .sc { color: #f8f9fa } /* Literal.String.Char */ -} +.syntax .gu { color: #66cccc; font-weight: bold } /* Generic.Subheading */ +.syntax .kc { color: #cc99cc } /* Keyword.Constant */ +.syntax .kd { color: #cc99cc } /* Keyword.Declaration */ +.syntax .kn { color: #66cccc } /* Keyword.Namespace */ +.syntax .kp { color: #cc99cc } /* Keyword.Pseudo */ +.syntax .kr { color: #cc99cc } /* Keyword.Reserved */ +.syntax .kt { color: #ffcc66 } /* Keyword.Type */ +.syntax .ld { color: #99cc99 } /* Literal.Date */ +.syntax .m { color: #f99157 } /* Literal.Number */ +.syntax .s { color: #99cc99 } /* Literal.String */ +.syntax .na { color: #6699cc } /* Name.Attribute */ +.syntax .nb { color: #f2f0ec } /* Name.Builtin */ +.syntax .nc { color: #ffcc66 } /* Name.Class */ +.syntax .no { color: #f2777a } /* Name.Constant */ +.syntax .nd { color: #66cccc } /* Name.Decorator */ +.syntax .ni { color: #f2f0ec } /* Name.Entity */ +.syntax .ne { color: #f2777a } /* Name.Exception */ +.syntax .nf { color: #6699cc } /* Name.Function */ +.syntax .nl { color: #f2f0ec } /* Name.Label */ +.syntax .nn { color: #ffcc66 } /* Name.Namespace */ +.syntax .nx { color: #6699cc } /* Name.Other */ +.syntax .py { color: #f2f0ec } /* Name.Property */ +.syntax .nt { color: #66cccc } /* Name.Tag */ +.syntax .nv { color: #f2777a } /* Name.Variable */ +.syntax .ow { color: #66cccc } /* Operator.Word */ +.syntax .w { color: #f2f0ec } /* Text.Whitespace */ +.syntax .mf { color: #f99157 } /* Literal.Number.Float */ +.syntax .mh { color: #f99157 } /* Literal.Number.Hex */ +.syntax .mi { color: #f99157 } /* Literal.Number.Integer */ +.syntax .mo { color: #f99157 } /* Literal.Number.Oct */ +.syntax .sb { color: #99cc99 } /* Literal.String.Backtick */ +.syntax .sc { color: #f2f0ec } /* Literal.String.Char */ +.syntax .sd { color: #747369 } /* Literal.String.Doc */ +.syntax .s2 { color: #99cc99 } /* Literal.String.Double */ +.syntax .se { color: #f99157 } /* Literal.String.Escape */ +.syntax .sh { color: #99cc99 } /* Literal.String.Heredoc */ +.syntax .si { color: #f99157 } /* Literal.String.Interpol */ +.syntax .sx { color: #99cc99 } /* Literal.String.Other */ +.syntax .sr { color: #99cc99 } /* Literal.String.Regex */ +.syntax .s1 { color: #99cc99 } /* Literal.String.Single */ +.syntax .ss { color: #99cc99 } /* Literal.String.Symbol */ +.syntax .bp { color: #f2f0ec } /* Name.Builtin.Pseudo */ +.syntax .vc { color: #f2777a } /* Name.Variable.Class */ +.syntax .vg { color: #f2777a } /* Name.Variable.Global */ +.syntax .vi { color: #f2777a } /* Name.Variable.Instance */ +.syntax .il { color: #f99157 } /* Literal.Number.Integer.Long */ From 76156af5cc30b8706e2b9527811706cce99a452d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 24 Jun 2021 17:07:43 +0300 Subject: [PATCH 367/931] cancel merges on drop partition --- src/Storages/MergeTree/MergeList.cpp | 4 ++-- src/Storages/MergeTree/MergeList.h | 16 ++++++++++++++-- .../MergeTree/MergeTreeDataMergerMutator.cpp | 8 ++++++-- src/Storages/StorageReplicatedMergeTree.cpp | 8 ++++++++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index c6f9459d0db..24beb0cc06f 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -13,7 +13,7 @@ MergeListElement::MergeListElement(const StorageID & table_id_, const FutureMerg , partition_id{future_part.part_info.partition_id} , result_part_name{future_part.name} , result_part_path{future_part.path} - , result_data_version{future_part.part_info.getDataVersion()} + , result_part_info{future_part.part_info} , num_parts{future_part.parts.size()} , thread_id{getThreadId()} , merge_type{future_part.merge_type} @@ -32,7 +32,7 @@ MergeListElement::MergeListElement(const StorageID & table_id_, const FutureMerg if (!future_part.parts.empty()) { source_data_version = future_part.parts[0]->info.getDataVersion(); - is_mutation = (result_data_version != source_data_version); + is_mutation = (result_part_info.getDataVersion() != source_data_version); } /// Each merge is executed into separate background processing pool thread diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index 9680ce6ac30..6f4aedcc6f8 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -60,7 +61,7 @@ struct MergeListElement : boost::noncopyable const std::string result_part_name; const std::string result_part_path; - Int64 result_data_version{}; + MergeTreePartInfo result_part_info; bool is_mutation{}; UInt64 num_parts{}; @@ -130,7 +131,18 @@ public: if ((partition_id.empty() || merge_element.partition_id == partition_id) && merge_element.table_id == table_id && merge_element.source_data_version < mutation_version - && merge_element.result_data_version >= mutation_version) + && merge_element.result_part_info.getDataVersion() >= mutation_version) + merge_element.is_cancelled = true; + } + } + + void cancelInPartition(const StorageID & table_id, const String & partition_id, Int64 delimiting_block_number) + { + for (auto & merge_element : entries) + { + if (merge_element.table_id == table_id + && merge_element.partition_id == partition_id + && merge_element.result_part_info.min_block < delimiting_block_number) merge_element.is_cancelled = true; } } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 846ad7b026d..b4f3d433f66 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -951,8 +951,12 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor size_t rows_written = 0; const size_t initial_reservation = space_reservation ? space_reservation->getSize() : 0; - auto is_cancelled = [&]() { return merges_blocker.isCancelled() - || (need_remove_expired_values && ttl_merges_blocker.isCancelled()); }; + auto is_cancelled = [&]() + { + return merges_blocker.isCancelled() + || (need_remove_expired_values && ttl_merges_blocker.isCancelled()) + || merge_entry->is_cancelled.load(std::memory_order_relaxed); + }; Block block; while (!is_cancelled() && (block = merged_stream->read())) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 47f6bbd0ccc..75ff8a93980 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2194,6 +2194,7 @@ bool StorageReplicatedMergeTree::executeFetchShared( void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) { auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); + getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range_info.partition_id, drop_range_info.max_block); queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); if (entry.detach) @@ -2253,9 +2254,14 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) bool replace = !LogEntry::ReplaceRangeEntry::isMovePartitionOrAttachFrom(drop_range); if (replace) + { + getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range.partition_id, drop_range.max_block); queue.removePartProducingOpsInRange(getZooKeeper(), drop_range, entry); + } else + { drop_range = {}; + } struct PartDescription { @@ -7094,6 +7100,8 @@ bool StorageReplicatedMergeTree::dropAllPartsInPartition( String log_znode_path = dynamic_cast(*responses.front()).path_created; entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); + getContext()->getMergeList().cancelInPartition(getStorageID(), partition_id, drop_range_info.max_block); + return true; } From d6fc2dcd992404866cb2826d434996a2f6a43a01 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 24 Jun 2021 17:13:16 +0300 Subject: [PATCH 368/931] Ban test --- .../01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh | 5 ++++- tests/queries/skip_list.json | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 7c7d58e1012..13086879e0d 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -59,8 +59,11 @@ timeout $TIMEOUT bash -c optimize_thread 2> /dev/null & wait +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA ttl_table$i" +done -$CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception like '%but should be merged into%' FORMAT Vertical" +$CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}' and type='MERGE_PARTS' and last_exception != '' FORMAT Vertical" $CLICKHOUSE_CLIENT --query "SELECT COUNT() > 0 FROM system.part_log where table like 'ttl_table%' and database = '${CLICKHOUSE_DATABASE}'" for i in $(seq 1 $NUM_REPLICAS); do diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index e38089230f4..980eea26305 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -836,6 +836,7 @@ "01870_modulo_partition_key", "01870_buffer_flush", // creates database "01889_postgresql_protocol_null_fields", - "01889_check_row_policy_defined_using_user_function" + "01889_check_row_policy_defined_using_user_function", + "01921_concurrent_ttl_and_normal_merges_zookeeper_long" // heavy test, better to run sequentially ] } From 84e02911cf4edcef68cac462ee8d74165ee77277 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 24 Jun 2021 17:14:36 +0300 Subject: [PATCH 369/931] Materialized columns for joined table, don't add to asterisk select --- src/Interpreters/DatabaseAndTableWithAlias.h | 2 -- src/Interpreters/ExpressionAnalyzer.cpp | 5 +++-- src/Interpreters/InterpreterSelectQuery.cpp | 5 ++--- src/Interpreters/JoinedTables.cpp | 4 ++-- src/Interpreters/JoinedTables.h | 3 +-- src/Interpreters/SelectQueryOptions.h | 15 ++++++++++++--- src/Interpreters/TreeRewriter.cpp | 5 ++--- src/Interpreters/getTableExpressions.cpp | 3 ++- .../01925_join_materialized_columns.reference | 10 ++++++---- .../01925_join_materialized_columns.sql | 2 ++ 10 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/Interpreters/DatabaseAndTableWithAlias.h b/src/Interpreters/DatabaseAndTableWithAlias.h index b53cadce460..e60674d93c6 100644 --- a/src/Interpreters/DatabaseAndTableWithAlias.h +++ b/src/Interpreters/DatabaseAndTableWithAlias.h @@ -86,8 +86,6 @@ private: names.insert(col.name); } - -private: NameSet names; }; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index fe52b30da7b..2216f1b5818 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -900,8 +899,10 @@ JoinPtr SelectQueryExpressionAnalyzer::makeTableJoin( * in the subquery_for_set object this subquery is exposed as source and the temporary table _data1 as the `table`. * - this function shows the expression JOIN _data1. */ - auto interpreter = interpretSubquery(join_element.table_expression, getContext(), original_right_columns, query_options); + + auto interpreter = interpretSubquery( + join_element.table_expression, getContext(), original_right_columns, query_options.copy().setWithMaterialized()); { joined_plan = std::make_unique(); interpreter->buildQueryPlan(*joined_plan); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 7cca527cbc1..173d363796e 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -68,7 +68,6 @@ #include #include #include -#include #include #include @@ -330,7 +329,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( metadata_snapshot = storage->getInMemoryMetadataPtr(); } - if (has_input || !joined_tables.resolveTables()) + if (has_input || !joined_tables.resolveTables(options.with_materialized)) joined_tables.makeFakeTable(storage, metadata_snapshot, source_header); /// Rewrite JOINs @@ -339,7 +338,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( rewriteMultipleJoins(query_ptr, joined_tables.tablesWithColumns(), context->getCurrentDatabase(), context->getSettingsRef()); joined_tables.reset(getSelectQuery()); - joined_tables.resolveTables(); + joined_tables.resolveTables(options.with_materialized); if (storage && joined_tables.isLeftTableSubquery()) { diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 5e53074d24f..c0c726b1e9b 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -185,9 +185,9 @@ StoragePtr JoinedTables::getLeftTableStorage() return DatabaseCatalog::instance().getTable(table_id, context); } -bool JoinedTables::resolveTables() +bool JoinedTables::resolveTables(bool with_materialized) { - tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, true); + tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, with_materialized); if (tables_with_columns.size() != table_expressions.size()) throw Exception("Unexpected tables count", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h index 52eb71e419d..6cbbb7c1400 100644 --- a/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -30,14 +30,13 @@ public: } StoragePtr getLeftTableStorage(); - bool resolveTables(); + bool resolveTables(bool with_materialized); /// Make fake tables_with_columns[0] in case we have predefined input in InterpreterSelectQuery void makeFakeTable(StoragePtr storage, const StorageMetadataPtr & metadata_snapshot, const Block & source_header); std::shared_ptr makeTableJoin(const ASTSelectQuery & select_query); const TablesWithColumns & tablesWithColumns() const { return tables_with_columns; } - TablesWithColumns moveTablesWithColumns() { return std::move(tables_with_columns); } bool isLeftTableSubquery() const; bool isLeftTableFunction() const; diff --git a/src/Interpreters/SelectQueryOptions.h b/src/Interpreters/SelectQueryOptions.h index d723dbf4ff6..8050e184852 100644 --- a/src/Interpreters/SelectQueryOptions.h +++ b/src/Interpreters/SelectQueryOptions.h @@ -42,11 +42,14 @@ struct SelectQueryOptions bool ignore_alias = false; bool is_internal = false; bool is_subquery = false; // non-subquery can also have subquery_depth > 0, e.g. insert select + bool with_materialized = false; /// asterisk include materialized columns - SelectQueryOptions(QueryProcessingStage::Enum stage = QueryProcessingStage::Complete, size_t depth = 0, bool is_subquery_ = false) + SelectQueryOptions( + QueryProcessingStage::Enum stage = QueryProcessingStage::Complete, + size_t depth = 0, + bool is_subquery_ = false) : to_stage(stage), subquery_depth(depth), is_subquery(is_subquery_) - { - } + {} SelectQueryOptions copy() const { return *this; } @@ -114,6 +117,12 @@ struct SelectQueryOptions is_internal = value; return *this; } + + SelectQueryOptions & setWithMaterialized(bool value = true) + { + with_materialized = value; + return *this; + } }; } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 1f94cda6b0f..e2e7b68e757 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -1,5 +1,4 @@ #include -#include #include #include @@ -32,7 +31,6 @@ #include #include -#include #include #include @@ -899,8 +897,9 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( const auto & right_table = tables_with_columns[1]; auto & cols_from_joined = result.analyzed_join->columns_from_joined_table; cols_from_joined = right_table.columns; + /// query can use materialized columns from right joined table, add it to columns_from_joined_table cols_from_joined.insert( - cols_from_joined.end(), right_table.materialized_columns.begin(), right_table.materialized_columns.end()); + cols_from_joined.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); result.analyzed_join->deduplicateAndQualifyColumnNames( source_columns_set, right_table.table.getQualifiedNamePrefix()); diff --git a/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp index f7d82a8f599..43f7030d06e 100644 --- a/src/Interpreters/getTableExpressions.cpp +++ b/src/Interpreters/getTableExpressions.cpp @@ -129,7 +129,8 @@ TablesWithColumns getDatabaseAndTablesWithColumns( NamesAndTypesList materialized; NamesAndTypesList aliases; NamesAndTypesList virtuals; - NamesAndTypesList names_and_types = getColumnsFromTableExpression(*table_expression, context, materialized, aliases, virtuals); + NamesAndTypesList names_and_types = getColumnsFromTableExpression( + *table_expression, context, materialized, aliases, virtuals); removeDuplicateColumns(names_and_types); diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.reference b/tests/queries/0_stateless/01925_join_materialized_columns.reference index e00de5f458d..8d93af00109 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.reference +++ b/tests/queries/0_stateless/01925_join_materialized_columns.reference @@ -1,10 +1,12 @@ +2020-02-02 13:00:00 fact2 t1_val2 2020-02-05 13:00:00 fact2 t1_val2 +- 2020-02-02 13:00:00 fact2 t1_val2 2020-02-02 2020-02-05 13:00:00 fact2 t1_val2 2020-02-05 - 2020-01-01 2020-01-01 2020-02-02 2020-02-05 - -2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 -2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 +2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 12:00:00 fact1 t2_val2 +2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 12:00:00 fact1 t2_val2 - -2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 -2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 2020-01-01 12:00:00 fact1 t2_val2 +2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 12:00:00 fact1 t2_val2 +2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 12:00:00 fact1 t2_val2 diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql index 7d5acc2cd25..91106a25436 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.sql +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -9,6 +9,8 @@ INSERT INTO t2 VALUES ('2020-01-01 12:00:00', 'fact1', 't2_val2'), ('2020-02-05 SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.dt >= '2020-02-01'; SELECT '-'; +SELECT t1.*, t1.dt, t2.*, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.dt >= '2020-02-01'; +SELECT '-'; SELECT t1.dt, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.dt; SELECT '-'; SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; From af7776554b03e0299c8268397939234312085a66 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 24 Jun 2021 17:16:57 +0300 Subject: [PATCH 370/931] Fix space in ExpressionAnalyzer.cpp --- src/Interpreters/ExpressionAnalyzer.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 2216f1b5818..a393440b1ae 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -899,8 +899,6 @@ JoinPtr SelectQueryExpressionAnalyzer::makeTableJoin( * in the subquery_for_set object this subquery is exposed as source and the temporary table _data1 as the `table`. * - this function shows the expression JOIN _data1. */ - - auto interpreter = interpretSubquery( join_element.table_expression, getContext(), original_right_columns, query_options.copy().setWithMaterialized()); { From 00d268ff7145ce075fa89ec90136c5b25ca3d33f Mon Sep 17 00:00:00 2001 From: Mike Kot Date: Thu, 24 Jun 2021 17:37:52 +0300 Subject: [PATCH 371/931] Light theme support --- website/css/highlight.css | 76 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/website/css/highlight.css b/website/css/highlight.css index 8cb44bf0d18..dbe181d4ae7 100644 --- a/website/css/highlight.css +++ b/website/css/highlight.css @@ -4,6 +4,9 @@ Pygments template by Jan T. Sott (https://github.com/idleberg) Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder) */ + +/* The default color scheme is dark */ + .syntax .hll { background-color: #515151 } .syntax { background: #2d2d2d; color: #f2f0ec } .syntax .c { color: #747369 } /* Comment */ @@ -69,3 +72,76 @@ .syntax .vg { color: #f2777a } /* Name.Variable.Global */ .syntax .vi { color: #f2777a } /* Name.Variable.Instance */ .syntax .il { color: #f99157 } /* Literal.Number.Integer.Long */ + + +@media (prefers-color-scheme: light) { + +.syntax .hll { background-color: #e0e0e0 } +.syntax { background: #ffffff; color: #1d1f21 } +.syntax .c { color: #b4b7b4 } /* Comment */ +.syntax .err { color: #cc6666 } /* Error */ +.syntax .k { color: #b294bb } /* Keyword */ +.syntax .l { color: #de935f } /* Literal */ +.syntax .n { color: #1d1f21 } /* Name */ +.syntax .o { color: #8abeb7 } /* Operator */ +.syntax .p { color: #1d1f21 } /* Punctuation */ +.syntax .cm { color: #b4b7b4 } /* Comment.Multiline */ +.syntax .cp { color: #b4b7b4 } /* Comment.Preproc */ +.syntax .c1 { color: #b4b7b4 } /* Comment.Single */ +.syntax .cs { color: #b4b7b4 } /* Comment.Special */ +.syntax .gd { color: #cc6666 } /* Generic.Deleted */ +.syntax .ge { font-style: italic } /* Generic.Emph */ +.syntax .gh { color: #1d1f21; font-weight: bold } /* Generic.Heading */ +.syntax .gi { color: #b5bd68 } /* Generic.Inserted */ +.syntax .gp { color: #b4b7b4; font-weight: bold } /* Generic.Prompt */ +.syntax .gs { font-weight: bold } /* Generic.Strong */ +.syntax .gu { color: #8abeb7; font-weight: bold } /* Generic.Subheading */ +.syntax .kc { color: #b294bb } /* Keyword.Constant */ +.syntax .kd { color: #b294bb } /* Keyword.Declaration */ +.syntax .kn { color: #8abeb7 } /* Keyword.Namespace */ +.syntax .kp { color: #b294bb } /* Keyword.Pseudo */ +.syntax .kr { color: #b294bb } /* Keyword.Reserved */ +.syntax .kt { color: #f0c674 } /* Keyword.Type */ +.syntax .ld { color: #b5bd68 } /* Literal.Date */ +.syntax .m { color: #de935f } /* Literal.Number */ +.syntax .s { color: #b5bd68 } /* Literal.String */ +.syntax .na { color: #81a2be } /* Name.Attribute */ +.syntax .nb { color: #1d1f21 } /* Name.Builtin */ +.syntax .nc { color: #f0c674 } /* Name.Class */ +.syntax .no { color: #cc6666 } /* Name.Constant */ +.syntax .nd { color: #8abeb7 } /* Name.Decorator */ +.syntax .ni { color: #1d1f21 } /* Name.Entity */ +.syntax .ne { color: #cc6666 } /* Name.Exception */ +.syntax .nf { color: #81a2be } /* Name.Function */ +.syntax .nl { color: #1d1f21 } /* Name.Label */ +.syntax .nn { color: #f0c674 } /* Name.Namespace */ +.syntax .nx { color: #81a2be } /* Name.Other */ +.syntax .py { color: #1d1f21 } /* Name.Property */ +.syntax .nt { color: #8abeb7 } /* Name.Tag */ +.syntax .nv { color: #cc6666 } /* Name.Variable */ +.syntax .ow { color: #8abeb7 } /* Operator.Word */ +.syntax .w { color: #1d1f21 } /* Text.Whitespace */ +.syntax .mf { color: #de935f } /* Literal.Number.Float */ +.syntax .mh { color: #de935f } /* Literal.Number.Hex */ +.syntax .mi { color: #de935f } /* Literal.Number.Integer */ +.syntax .mo { color: #de935f } /* Literal.Number.Oct */ +.syntax .sb { color: #b5bd68 } /* Literal.String.Backtick */ +.syntax .sc { color: #1d1f21 } /* Literal.String.Char */ +.syntax .sd { color: #b4b7b4 } /* Literal.String.Doc */ +.syntax .s2 { color: #b5bd68 } /* Literal.String.Double */ +.syntax .se { color: #de935f } /* Literal.String.Escape */ +.syntax .sh { color: #b5bd68 } /* Literal.String.Heredoc */ +.syntax .si { color: #de935f } /* Literal.String.Interpol */ +.syntax .sx { color: #b5bd68 } /* Literal.String.Other */ +.syntax .sr { color: #b5bd68 } /* Literal.String.Regex */ +.syntax .s1 { color: #b5bd68 } /* Literal.String.Single */ +.syntax .ss { color: #b5bd68 } /* Literal.String.Symbol */ +.syntax .bp { color: #1d1f21 } /* Name.Builtin.Pseudo */ +.syntax .vc { color: #cc6666 } /* Name.Variable.Class */ +.syntax .vg { color: #cc6666 } /* Name.Variable.Global */ +.syntax .vi { color: #cc6666 } /* Name.Variable.Instance */ +.syntax .il { color: #de935f } /* Literal.Number.Integer.Long */ + + + +} From 241b64d02ce7f2227e7d6b21e422b9a2c69e3394 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 24 Jun 2021 17:57:21 +0300 Subject: [PATCH 372/931] Support ALIASed columns for right joined table --- src/Interpreters/ExpressionAnalyzer.cpp | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 4 ++-- src/Interpreters/JoinedTables.cpp | 4 ++-- src/Interpreters/JoinedTables.h | 2 +- src/Interpreters/SelectQueryOptions.h | 6 ++--- src/Interpreters/TreeRewriter.cpp | 6 ++--- src/Interpreters/getTableExpressions.cpp | 6 ++--- src/Interpreters/getTableExpressions.h | 2 +- .../01925_join_materialized_columns.reference | 10 ++++++++ .../01925_join_materialized_columns.sql | 24 +++++++++++++++++-- 10 files changed, 48 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index a393440b1ae..00ffd540da0 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -900,7 +900,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeTableJoin( * - this function shows the expression JOIN _data1. */ auto interpreter = interpretSubquery( - join_element.table_expression, getContext(), original_right_columns, query_options.copy().setWithMaterialized()); + join_element.table_expression, getContext(), original_right_columns, query_options.copy().setWithAllColumns()); { joined_plan = std::make_unique(); interpreter->buildQueryPlan(*joined_plan); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 173d363796e..71181a84e1a 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -329,7 +329,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( metadata_snapshot = storage->getInMemoryMetadataPtr(); } - if (has_input || !joined_tables.resolveTables(options.with_materialized)) + if (has_input || !joined_tables.resolveTables(options.with_all_cols)) joined_tables.makeFakeTable(storage, metadata_snapshot, source_header); /// Rewrite JOINs @@ -338,7 +338,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( rewriteMultipleJoins(query_ptr, joined_tables.tablesWithColumns(), context->getCurrentDatabase(), context->getSettingsRef()); joined_tables.reset(getSelectQuery()); - joined_tables.resolveTables(options.with_materialized); + joined_tables.resolveTables(options.with_all_cols); if (storage && joined_tables.isLeftTableSubquery()) { diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index c0c726b1e9b..86ec067b00c 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -185,9 +185,9 @@ StoragePtr JoinedTables::getLeftTableStorage() return DatabaseCatalog::instance().getTable(table_id, context); } -bool JoinedTables::resolveTables(bool with_materialized) +bool JoinedTables::resolveTables(bool include_all_columns) { - tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, with_materialized); + tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, include_all_columns); if (tables_with_columns.size() != table_expressions.size()) throw Exception("Unexpected tables count", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h index 6cbbb7c1400..52581c19999 100644 --- a/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -30,7 +30,7 @@ public: } StoragePtr getLeftTableStorage(); - bool resolveTables(bool with_materialized); + bool resolveTables(bool include_all_columns); /// Make fake tables_with_columns[0] in case we have predefined input in InterpreterSelectQuery void makeFakeTable(StoragePtr storage, const StorageMetadataPtr & metadata_snapshot, const Block & source_header); diff --git a/src/Interpreters/SelectQueryOptions.h b/src/Interpreters/SelectQueryOptions.h index 8050e184852..1a1f0267ab0 100644 --- a/src/Interpreters/SelectQueryOptions.h +++ b/src/Interpreters/SelectQueryOptions.h @@ -42,7 +42,7 @@ struct SelectQueryOptions bool ignore_alias = false; bool is_internal = false; bool is_subquery = false; // non-subquery can also have subquery_depth > 0, e.g. insert select - bool with_materialized = false; /// asterisk include materialized columns + bool with_all_cols = false; /// asterisk include materialized and aliased columns SelectQueryOptions( QueryProcessingStage::Enum stage = QueryProcessingStage::Complete, @@ -118,9 +118,9 @@ struct SelectQueryOptions return *this; } - SelectQueryOptions & setWithMaterialized(bool value = true) + SelectQueryOptions & setWithAllColumns(bool value = true) { - with_materialized = value; + with_all_cols = value; return *this; } }; diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index e2e7b68e757..b997e53f745 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -897,9 +897,9 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( const auto & right_table = tables_with_columns[1]; auto & cols_from_joined = result.analyzed_join->columns_from_joined_table; cols_from_joined = right_table.columns; - /// query can use materialized columns from right joined table, add it to columns_from_joined_table - cols_from_joined.insert( - cols_from_joined.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); + /// query can use materialized or aliased columns from right joined table, + /// we want to request it for right table + cols_from_joined.insert(cols_from_joined.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); result.analyzed_join->deduplicateAndQualifyColumnNames( source_columns_set, right_table.table.getQualifiedNamePrefix()); diff --git a/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp index 43f7030d06e..2d9391f4673 100644 --- a/src/Interpreters/getTableExpressions.cpp +++ b/src/Interpreters/getTableExpressions.cpp @@ -116,13 +116,13 @@ static NamesAndTypesList getColumnsFromTableExpression( TablesWithColumns getDatabaseAndTablesWithColumns( const ASTTableExprConstPtrs & table_expressions, ContextPtr context, - bool add_materialized) + bool include_all) { TablesWithColumns tables_with_columns; String current_database = context->getCurrentDatabase(); - bool include_alias_cols = context->getSettingsRef().asterisk_include_alias_columns; - bool include_materialized_cols = add_materialized || context->getSettingsRef().asterisk_include_materialized_columns; + bool include_alias_cols = include_all || context->getSettingsRef().asterisk_include_alias_columns; + bool include_materialized_cols = include_all || context->getSettingsRef().asterisk_include_materialized_columns; for (const ASTTableExpression * table_expression : table_expressions) { diff --git a/src/Interpreters/getTableExpressions.h b/src/Interpreters/getTableExpressions.h index 19c27057c2f..6a999729a2f 100644 --- a/src/Interpreters/getTableExpressions.h +++ b/src/Interpreters/getTableExpressions.h @@ -23,6 +23,6 @@ ASTPtr extractTableExpression(const ASTSelectQuery & select, size_t table_number TablesWithColumns getDatabaseAndTablesWithColumns( const ASTTableExprConstPtrs & table_expressions, ContextPtr context, - bool add_materialized = false); + bool include_all = false); } diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.reference b/tests/queries/0_stateless/01925_join_materialized_columns.reference index 8d93af00109..fe00b746e57 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.reference +++ b/tests/queries/0_stateless/01925_join_materialized_columns.reference @@ -10,3 +10,13 @@ - 2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 12:00:00 fact1 t2_val2 2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 12:00:00 fact1 t2_val2 +- +2020-01-01 12:00:00 fact1 t1_val1 2019-01-01 12:00:00 fact4 t2_val2 +2020-01-01 12:00:00 fact1 t1_val1 2020-01-01 12:00:00 fact1 t2_val2 +2020-01-01 13:00:00 fact3 t1_val3 2019-01-01 12:00:00 fact4 t2_val2 +2020-01-01 13:00:00 fact3 t1_val3 2020-01-01 12:00:00 fact1 t2_val2 +- +2020-02-02 13:00:00 fact2 t1_val2 2020-02-05 13:00:00 fact2 t1_val2 +- +fact1t1_val1 fact1t2_val2 +fact2t1_val2 fact2t1_val2 diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql index 91106a25436..16fe00beb63 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.sql +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -1,8 +1,19 @@ DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; -CREATE TABLE t1 (time DateTime, foo String, dimension_1 String, dt Date MATERIALIZED toDate(time)) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo); -CREATE TABLE t2 (time DateTime, bar String, dimension_2 String, dt Date MATERIALIZED toDate(time)) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar); +CREATE TABLE t1 ( + time DateTime, foo String, dimension_1 String, + dt Date MATERIALIZED toDate(time), + dt1 Date MATERIALIZED toDayOfYear(time), + aliascol1 ALIAS foo || dimension_1 +) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo); + +CREATE TABLE t2 ( + time DateTime, bar String, dimension_2 String, + dt Date MATERIALIZED toDate(time), + dt2 Date MATERIALIZED toDayOfYear(time), + aliascol2 ALIAS bar || dimension_2 +) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar); INSERT INTO t1 VALUES ('2020-01-01 12:00:00', 'fact1', 't1_val1'), ('2020-02-02 13:00:00', 'fact2', 't1_val2'), ('2020-01-01 13:00:00', 'fact3', 't1_val3'); INSERT INTO t2 VALUES ('2020-01-01 12:00:00', 'fact1', 't2_val2'), ('2020-02-05 13:00:00', 'fact2', 't1_val2'), ('2019-01-01 12:00:00', 'fact4', 't2_val2'); @@ -16,3 +27,12 @@ SELECT '-'; SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; SELECT '-'; SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT * FROM t1 JOIN t2 ON t1.dt1 = t2.dt2 ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.aliascol2 == 'fact2t1_val2'; +SELECT '-'; +SELECT t1.aliascol1, t2.aliascol2 FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.time, t2.time; +-- SELECT '-'; +-- SELECT * FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time; + From 3c21b042ee8534976479e02864dfdabc71ef84ce Mon Sep 17 00:00:00 2001 From: Mike Kot Date: Thu, 24 Jun 2021 18:16:39 +0300 Subject: [PATCH 373/931] Using light theme as default one --- website/css/highlight.css | 139 ++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 72 deletions(-) diff --git a/website/css/highlight.css b/website/css/highlight.css index dbe181d4ae7..be5fc1025b1 100644 --- a/website/css/highlight.css +++ b/website/css/highlight.css @@ -1,81 +1,10 @@ /* - Name: Base16 Eighties Dark + Name: Base16 Eighties Dark + Ocean light Author: Chris Kempson (http://chriskempson.com) Pygments template by Jan T. Sott (https://github.com/idleberg) Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder) */ -/* The default color scheme is dark */ - -.syntax .hll { background-color: #515151 } -.syntax { background: #2d2d2d; color: #f2f0ec } -.syntax .c { color: #747369 } /* Comment */ -.syntax .err { color: #f2777a } /* Error */ -.syntax .k { color: #cc99cc } /* Keyword */ -.syntax .l { color: #f99157 } /* Literal */ -.syntax .n { color: #f2f0ec } /* Name */ -.syntax .o { color: #66cccc } /* Operator */ -.syntax .p { color: #f2f0ec } /* Punctuation */ -.syntax .cm { color: #747369 } /* Comment.Multiline */ -.syntax .cp { color: #747369 } /* Comment.Preproc */ -.syntax .c1 { color: #747369 } /* Comment.Single */ -.syntax .cs { color: #747369 } /* Comment.Special */ -.syntax .gd { color: #f2777a } /* Generic.Deleted */ -.syntax .ge { font-style: italic } /* Generic.Emph */ -.syntax .gh { color: #f2f0ec; font-weight: bold } /* Generic.Heading */ -.syntax .gi { color: #99cc99 } /* Generic.Inserted */ -.syntax .gp { color: #747369; font-weight: bold } /* Generic.Prompt */ -.syntax .gs { font-weight: bold } /* Generic.Strong */ -.syntax .gu { color: #66cccc; font-weight: bold } /* Generic.Subheading */ -.syntax .kc { color: #cc99cc } /* Keyword.Constant */ -.syntax .kd { color: #cc99cc } /* Keyword.Declaration */ -.syntax .kn { color: #66cccc } /* Keyword.Namespace */ -.syntax .kp { color: #cc99cc } /* Keyword.Pseudo */ -.syntax .kr { color: #cc99cc } /* Keyword.Reserved */ -.syntax .kt { color: #ffcc66 } /* Keyword.Type */ -.syntax .ld { color: #99cc99 } /* Literal.Date */ -.syntax .m { color: #f99157 } /* Literal.Number */ -.syntax .s { color: #99cc99 } /* Literal.String */ -.syntax .na { color: #6699cc } /* Name.Attribute */ -.syntax .nb { color: #f2f0ec } /* Name.Builtin */ -.syntax .nc { color: #ffcc66 } /* Name.Class */ -.syntax .no { color: #f2777a } /* Name.Constant */ -.syntax .nd { color: #66cccc } /* Name.Decorator */ -.syntax .ni { color: #f2f0ec } /* Name.Entity */ -.syntax .ne { color: #f2777a } /* Name.Exception */ -.syntax .nf { color: #6699cc } /* Name.Function */ -.syntax .nl { color: #f2f0ec } /* Name.Label */ -.syntax .nn { color: #ffcc66 } /* Name.Namespace */ -.syntax .nx { color: #6699cc } /* Name.Other */ -.syntax .py { color: #f2f0ec } /* Name.Property */ -.syntax .nt { color: #66cccc } /* Name.Tag */ -.syntax .nv { color: #f2777a } /* Name.Variable */ -.syntax .ow { color: #66cccc } /* Operator.Word */ -.syntax .w { color: #f2f0ec } /* Text.Whitespace */ -.syntax .mf { color: #f99157 } /* Literal.Number.Float */ -.syntax .mh { color: #f99157 } /* Literal.Number.Hex */ -.syntax .mi { color: #f99157 } /* Literal.Number.Integer */ -.syntax .mo { color: #f99157 } /* Literal.Number.Oct */ -.syntax .sb { color: #99cc99 } /* Literal.String.Backtick */ -.syntax .sc { color: #f2f0ec } /* Literal.String.Char */ -.syntax .sd { color: #747369 } /* Literal.String.Doc */ -.syntax .s2 { color: #99cc99 } /* Literal.String.Double */ -.syntax .se { color: #f99157 } /* Literal.String.Escape */ -.syntax .sh { color: #99cc99 } /* Literal.String.Heredoc */ -.syntax .si { color: #f99157 } /* Literal.String.Interpol */ -.syntax .sx { color: #99cc99 } /* Literal.String.Other */ -.syntax .sr { color: #99cc99 } /* Literal.String.Regex */ -.syntax .s1 { color: #99cc99 } /* Literal.String.Single */ -.syntax .ss { color: #99cc99 } /* Literal.String.Symbol */ -.syntax .bp { color: #f2f0ec } /* Name.Builtin.Pseudo */ -.syntax .vc { color: #f2777a } /* Name.Variable.Class */ -.syntax .vg { color: #f2777a } /* Name.Variable.Global */ -.syntax .vi { color: #f2777a } /* Name.Variable.Instance */ -.syntax .il { color: #f99157 } /* Literal.Number.Integer.Long */ - - -@media (prefers-color-scheme: light) { - .syntax .hll { background-color: #e0e0e0 } .syntax { background: #ffffff; color: #1d1f21 } .syntax .c { color: #b4b7b4 } /* Comment */ @@ -142,6 +71,72 @@ .syntax .vi { color: #cc6666 } /* Name.Variable.Instance */ .syntax .il { color: #de935f } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +.syntax .hll { background-color: #515151 } +.syntax { background: #2d2d2d; color: #f2f0ec } +.syntax .c { color: #747369 } /* Comment */ +.syntax .err { color: #f2777a } /* Error */ +.syntax .k { color: #cc99cc } /* Keyword */ +.syntax .l { color: #f99157 } /* Literal */ +.syntax .n { color: #f2f0ec } /* Name */ +.syntax .o { color: #66cccc } /* Operator */ +.syntax .p { color: #f2f0ec } /* Punctuation */ +.syntax .cm { color: #747369 } /* Comment.Multiline */ +.syntax .cp { color: #747369 } /* Comment.Preproc */ +.syntax .c1 { color: #747369 } /* Comment.Single */ +.syntax .cs { color: #747369 } /* Comment.Special */ +.syntax .gd { color: #f2777a } /* Generic.Deleted */ +.syntax .ge { font-style: italic } /* Generic.Emph */ +.syntax .gh { color: #f2f0ec; font-weight: bold } /* Generic.Heading */ +.syntax .gi { color: #99cc99 } /* Generic.Inserted */ +.syntax .gp { color: #747369; font-weight: bold } /* Generic.Prompt */ +.syntax .gs { font-weight: bold } /* Generic.Strong */ +.syntax .gu { color: #66cccc; font-weight: bold } /* Generic.Subheading */ +.syntax .kc { color: #cc99cc } /* Keyword.Constant */ +.syntax .kd { color: #cc99cc } /* Keyword.Declaration */ +.syntax .kn { color: #66cccc } /* Keyword.Namespace */ +.syntax .kp { color: #cc99cc } /* Keyword.Pseudo */ +.syntax .kr { color: #cc99cc } /* Keyword.Reserved */ +.syntax .kt { color: #ffcc66 } /* Keyword.Type */ +.syntax .ld { color: #99cc99 } /* Literal.Date */ +.syntax .m { color: #f99157 } /* Literal.Number */ +.syntax .s { color: #99cc99 } /* Literal.String */ +.syntax .na { color: #6699cc } /* Name.Attribute */ +.syntax .nb { color: #f2f0ec } /* Name.Builtin */ +.syntax .nc { color: #ffcc66 } /* Name.Class */ +.syntax .no { color: #f2777a } /* Name.Constant */ +.syntax .nd { color: #66cccc } /* Name.Decorator */ +.syntax .ni { color: #f2f0ec } /* Name.Entity */ +.syntax .ne { color: #f2777a } /* Name.Exception */ +.syntax .nf { color: #6699cc } /* Name.Function */ +.syntax .nl { color: #f2f0ec } /* Name.Label */ +.syntax .nn { color: #ffcc66 } /* Name.Namespace */ +.syntax .nx { color: #6699cc } /* Name.Other */ +.syntax .py { color: #f2f0ec } /* Name.Property */ +.syntax .nt { color: #66cccc } /* Name.Tag */ +.syntax .nv { color: #f2777a } /* Name.Variable */ +.syntax .ow { color: #66cccc } /* Operator.Word */ +.syntax .w { color: #f2f0ec } /* Text.Whitespace */ +.syntax .mf { color: #f99157 } /* Literal.Number.Float */ +.syntax .mh { color: #f99157 } /* Literal.Number.Hex */ +.syntax .mi { color: #f99157 } /* Literal.Number.Integer */ +.syntax .mo { color: #f99157 } /* Literal.Number.Oct */ +.syntax .sb { color: #99cc99 } /* Literal.String.Backtick */ +.syntax .sc { color: #f2f0ec } /* Literal.String.Char */ +.syntax .sd { color: #747369 } /* Literal.String.Doc */ +.syntax .s2 { color: #99cc99 } /* Literal.String.Double */ +.syntax .se { color: #f99157 } /* Literal.String.Escape */ +.syntax .sh { color: #99cc99 } /* Literal.String.Heredoc */ +.syntax .si { color: #f99157 } /* Literal.String.Interpol */ +.syntax .sx { color: #99cc99 } /* Literal.String.Other */ +.syntax .sr { color: #99cc99 } /* Literal.String.Regex */ +.syntax .s1 { color: #99cc99 } /* Literal.String.Single */ +.syntax .ss { color: #99cc99 } /* Literal.String.Symbol */ +.syntax .bp { color: #f2f0ec } /* Name.Builtin.Pseudo */ +.syntax .vc { color: #f2777a } /* Name.Variable.Class */ +.syntax .vg { color: #f2777a } /* Name.Variable.Global */ +.syntax .vi { color: #f2777a } /* Name.Variable.Instance */ +.syntax .il { color: #f99157 } /* Literal.Number.Integer.Long */ } From 48e19f0c396dd1000824caee040330fa287c2ab3 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Thu, 24 Jun 2021 22:29:57 +0300 Subject: [PATCH 374/931] Create ExternalDistributed.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задокументировал новый табличный движок ExternalDistributed. --- .../integrations/ExternalDistributed.md | 71 +++++++++++++++++++ .../table-engines/integrations/mysql.md | 6 ++ .../table-engines/integrations/postgresql.md | 6 ++ .../en/sql-reference/table-functions/mysql.md | 12 ++++ .../table-functions/postgresql.md | 14 +++- 5 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 docs/en/engines/table-engines/integrations/ExternalDistributed.md diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md new file mode 100644 index 00000000000..550d53460d4 --- /dev/null +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -0,0 +1,71 @@ +--- +toc_priority: 12 +toc_title: ExternalDistributed +--- + +# ExternalDistributed {#externaldistributed} + +The `ExternalDistributed` engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible. + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password'[, `schema`]); +``` + +See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. + +The table structure can differ from the original table structure: + +- Column names should be the same as in the original table, but you can use just some of these columns and in any order. +- Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. + +**Engine Parameters** + +- `engine` — The table engine: `MySQL` or `PostgreSQL`. +- `host:port` — MySQL or PostgreSQL server address. +- `database` — Remote database name. +- `table` — Remote table name. +- `user` — User name. +- `password` — User password. +- `schema` — Non-default table schema. Optional. + +## Implementation Details {#implementation-details} + +Supports multiple replicas that must be listed by a character `|`. For example: + +```sql +CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way several times. This works in favor of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly. + +You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard. + +Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19. + +To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from `prev_weight` to `prev_weights + weight`, where `prev_weights` is the total weight of the shards with the smallest number, and `weight` is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19). + +The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running IN and JOIN by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function: [intHash64](../../../sql-reference/functions/hash-functions.md#inthash64)(UserID). + +A simple reminder from the division is a limited solution for sharding and is not always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables. + +`SELECT` queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. + +You should be concerned about the sharding scheme in the following cases: + +- Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient. +- A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we have done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into "layers", where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries. + +**See Also** + +- [MySQL table engine](../../../engines/table-engines/integrations/mysql.md) +- [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md) +- [URL Table Engine](../../../engines/table-engines/integrations/url.md) +- [Distributed Table Engine](../../../engines/table-engines/special/distributed.md) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 013add6c249..42b5c2a9918 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -55,6 +55,12 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL s The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. +Supports multiple replicas that must be listed by a character `|`. For example: + +```sql +CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + ## Usage Example {#usage-example} Table in MySQL: diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 4474b764d2e..d0465ccbbea 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -49,6 +49,12 @@ PostgreSQL `Array` types are converted into ClickHouse arrays. !!! info "Note" Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. + +Supports multiple replicas that must be listed by a character `|`. For example: + +```sql +CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`. diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index 7b4e2a301b3..016e78de68b 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -39,6 +39,18 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on t The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. +Supports multiple replicas that must be listed by a character `|`. For example: + +```sql +SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + +or + +```sql +SELECT DISTINCT(name) FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + **Returned Value** A table object with the same columns as the original MySQL table. diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 3eab572ac12..313093dccf5 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -43,8 +43,20 @@ PostgreSQL Array types converts into ClickHouse arrays. !!! info "Note" Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. + +Supports multiple replicas that must be listed by a character `|`. For example: -Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`. +```sql +SELECT DISTINCT(name) FROM postgresql(`postgres{1|2|3}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` + +or + +```sql +SELECT DISTINCT(name) FROM postgresql(`postgres2:5431|postgres3:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` + +Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`. **Examples** From 5154f7824906e153caeb39053f7971f94ebd6541 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Thu, 24 Jun 2021 22:49:04 +0300 Subject: [PATCH 375/931] Update ExternalDistributed.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Внес небольшие поправки. --- .../engines/table-engines/integrations/ExternalDistributed.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 550d53460d4..fcc7d2b761d 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -24,7 +24,7 @@ The table structure can differ from the original table structure: - Column names should be the same as in the original table, but you can use just some of these columns and in any order. - Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- The `external_table_functions_use_nulls` setting defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** @@ -52,7 +52,7 @@ Each shard can have a weight defined in the config file. By default, the weight To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from `prev_weight` to `prev_weights + weight`, where `prev_weights` is the total weight of the shards with the smallest number, and `weight` is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19). -The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running IN and JOIN by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function: [intHash64](../../../sql-reference/functions/hash-functions.md#inthash64)(UserID). +The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running `IN` and `JOIN` by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function [intHash64](../../../sql-reference/functions/hash-functions.md#inthash64)(UserID). A simple reminder from the division is a limited solution for sharding and is not always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables. From b9b45d61e9d41e894bef2b25eda18c1b0529f278 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Thu, 24 Jun 2021 23:40:43 +0300 Subject: [PATCH 376/931] Fix See Also MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Поправил ссылки. --- .../engines/table-engines/integrations/ExternalDistributed.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index fcc7d2b761d..12f12c2a7b0 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -67,5 +67,4 @@ You should be concerned about the sharding scheme in the following cases: - [MySQL table engine](../../../engines/table-engines/integrations/mysql.md) - [PostgreSQL table engine](../../../engines/table-engines/integrations/postgresql.md) -- [URL Table Engine](../../../engines/table-engines/integrations/url.md) -- [Distributed Table Engine](../../../engines/table-engines/special/distributed.md) +- [Distributed table engine](../../../engines/table-engines/special/distributed.md) From b9437fa6f6ed9960e676b80fce1e40f71144f1dd Mon Sep 17 00:00:00 2001 From: tavplubix Date: Fri, 25 Jun 2021 00:04:31 +0300 Subject: [PATCH 377/931] Update arcadia_skip_list.txt --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 0f3861c0bbe..82d054a223b 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -92,6 +92,7 @@ 01129_dict_get_join_lose_constness 01138_join_on_distributed_and_tmp 01153_attach_mv_uuid +01155_rename_move_materialized_view 01191_rename_dictionary 01200_mutations_memory_consumption 01211_optimize_skip_unused_shards_type_mismatch From 960d0de73d7e987b9f21e08c1d2b1b0f319fd750 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Fri, 25 Jun 2021 02:05:45 +0300 Subject: [PATCH 378/931] Introduce system.data_skipping_indices table --- .../StorageSystemDataSkippingIndices.cpp | 190 ++++++++++++++++++ .../System/StorageSystemDataSkippingIndices.h | 26 +++ src/Storages/System/attachSystemTables.cpp | 2 + ...917_system_data_skipping_indices.reference | 10 + .../01917_system_data_skipping_indices.sql | 35 ++++ 5 files changed, 263 insertions(+) create mode 100644 src/Storages/System/StorageSystemDataSkippingIndices.cpp create mode 100644 src/Storages/System/StorageSystemDataSkippingIndices.h create mode 100644 tests/queries/0_stateless/01917_system_data_skipping_indices.reference create mode 100644 tests/queries/0_stateless/01917_system_data_skipping_indices.sql diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp new file mode 100644 index 00000000000..4629fb36b27 --- /dev/null +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -0,0 +1,190 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +StorageSystemDataSkippingIndices::StorageSystemDataSkippingIndices(const StorageID & table_id_) + : IStorage(table_id_) +{ + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(ColumnsDescription( + { + { "database", std::make_shared() }, + { "table", std::make_shared() }, + { "name", std::make_shared() }, + { "type", std::make_shared() }, + { "expr", std::make_shared() }, + { "granularity", std::make_shared() }, + })); + setInMemoryMetadata(storage_metadata); +} + +class DataSkippingIndicesSource : public SourceWithProgress +{ +public: + DataSkippingIndicesSource( + std::vector columns_mask_, + Block header, + UInt64 max_block_size_, + ColumnPtr databases_, + ContextPtr context_) + : SourceWithProgress(header) + , column_mask(std::move(columns_mask_)) + , max_block_size(max_block_size_) + , databases(std::move(databases_)) + , context(Context::createCopy(context_)) + , database_idx(0) + {} + + String getName() const override { return "DataSkippingIndices"; } + +protected: + Chunk generate() override + { + if (database_idx >= databases->size()) + return {}; + + MutableColumns res_columns = getPort().getHeader().cloneEmptyColumns(); + + const auto access = context->getAccess(); + const bool check_access_for_databases = !access->isGranted(AccessType::SHOW_TABLES); + + size_t rows_count = 0; + while (rows_count < max_block_size) + { + if (tables_it && !tables_it->isValid()) + ++database_idx; + + while (database_idx < databases->size() && (!tables_it || !tables_it->isValid())) + { + database_name = databases->getDataAt(database_idx).toString(); + database = DatabaseCatalog::instance().tryGetDatabase(database_name); + + if (database) + break; + ++database_idx; + } + + if (database_idx >= databases->size()) + break; + + if (!tables_it || !tables_it->isValid()) + tables_it = database->getTablesIterator(context); + + const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, database_name); + + for (; rows_count < max_block_size && tables_it->isValid(); tables_it->next()) + { + auto table_name = tables_it->name(); + if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) + continue; + + auto const table = tables_it->table(); + if (!table) + continue; + StorageMetadataPtr metadata_snapshot = table->getInMemoryMetadataPtr(); + if (!metadata_snapshot) + continue; + auto const indices = metadata_snapshot->getSecondaryIndices(); + + for (auto const& index : indices) + { + ++rows_count; + + size_t src_index = 0; + size_t res_index = 0; + + // 'database' column + if (column_mask[src_index++]) + res_columns[res_index++]->insert(database_name); + // 'table' column + if (column_mask[src_index++]) + res_columns[res_index++]->insert(table_name); + // 'name' column + if (column_mask[src_index++]) + res_columns[res_index++]->insert(index.name); + // 'type' column + if (column_mask[src_index++]) + res_columns[res_index++]->insert(index.type); + // 'expr' column + if (column_mask[src_index++]) + { + if (auto expression = index.expression_list_ast) + res_columns[res_index++]->insert(queryToString(expression)); + else + res_columns[res_index++]->insertDefault(); + } + // 'granularity' column + if (column_mask[src_index++]) + res_columns[res_index++]->insert(index.granularity); + } + } + } + return Chunk(std::move(res_columns), rows_count); + } + +private: + std::vector column_mask; + UInt64 max_block_size; + ColumnPtr databases; + ContextPtr context; + size_t database_idx; + DatabasePtr database; + std::string database_name; + DatabaseTablesIteratorPtr tables_it; +}; + +Pipe StorageSystemDataSkippingIndices::read( + const Names & column_names, + const StorageMetadataPtr & metadata_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum /* processed_stage */, + size_t max_block_size, + unsigned int /* num_streams */) +{ + metadata_snapshot->check(column_names, getVirtuals(), getStorageID()); + + NameSet names_set(column_names.begin(), column_names.end()); + + Block sample_block = metadata_snapshot->getSampleBlock(); + Block header; + + std::vector columns_mask(sample_block.columns()); + for (size_t i = 0, size = columns_mask.size(); i < size; ++i) + { + if (names_set.count(sample_block.getByPosition(i).name)) + { + columns_mask[i] = 1; + header.insert(sample_block.getByPosition(i)); + } + } + + MutableColumnPtr column = ColumnString::create(); + + const auto databases = DatabaseCatalog::instance().getDatabases(); + for (const auto & [database_name, database] : databases) + { + if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) + continue; + if (database->getEngineName() != "Lazy") + column->insert(database_name); + } + + Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; + VirtualColumnUtils::filterBlockWithQuery(query_info.query, block, context); + + ColumnPtr& filtered_databases = block.getByPosition(0).column; + return Pipe(std::make_shared( + std::move(columns_mask), std::move(header), max_block_size, std::move(filtered_databases), context)); +} + +} diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.h b/src/Storages/System/StorageSystemDataSkippingIndices.h new file mode 100644 index 00000000000..de8d7de706f --- /dev/null +++ b/src/Storages/System/StorageSystemDataSkippingIndices.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + +namespace DB +{ +class StorageSystemDataSkippingIndices : public shared_ptr_helper, public IStorage +{ + friend struct shared_ptr_helper; +public: + std::string getName() const override { return "SystemDataSkippingIndices"; } + + Pipe read( + const Names & column_names, + const StorageMetadataPtr & /*metadata_snapshot*/, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) override; + +protected: + StorageSystemDataSkippingIndices(const StorageID& table_id_); +}; +} diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 830a6c4fcd3..7da65b09d6d 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,7 @@ void attachSystemTablesLocal(IDatabase & system_database) attach(system_database, "user_directories"); attach(system_database, "privileges"); attach(system_database, "errors"); + attach(system_database, "data_skipping_indices"); #if !defined(ARCADIA_BUILD) attach(system_database, "licenses"); attach(system_database, "time_zones"); diff --git a/tests/queries/0_stateless/01917_system_data_skipping_indices.reference b/tests/queries/0_stateless/01917_system_data_skipping_indices.reference new file mode 100644 index 00000000000..b5a4b596a97 --- /dev/null +++ b/tests/queries/0_stateless/01917_system_data_skipping_indices.reference @@ -0,0 +1,10 @@ +default data_01917 d1_idx minmax d1 1 +default data_01917 d1_null_idx minmax assumeNotNull(d1_null) 1 +default data_01917_2 memory set frequency * length(name) 5 +default data_01917_2 sample_index1 minmax length(name), name 4 +default data_01917_2 sample_index2 ngrambf_v1 lower(name), name 4 +2 +3 +d1_idx +d1_null_idx +sample_index1 diff --git a/tests/queries/0_stateless/01917_system_data_skipping_indices.sql b/tests/queries/0_stateless/01917_system_data_skipping_indices.sql new file mode 100644 index 00000000000..768863a630f --- /dev/null +++ b/tests/queries/0_stateless/01917_system_data_skipping_indices.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS data_01917; +DROP TABLE IF EXISTS data_01917_2; + +CREATE TABLE data_01917 +( + key Int, + d1 Int, + d1_null Nullable(Int), + INDEX d1_idx d1 TYPE minmax GRANULARITY 1, + INDEX d1_null_idx assumeNotNull(d1_null) TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key; + +CREATE TABLE data_01917_2 +( + name String, + frequency UInt64, + INDEX memory (frequency * length(name)) TYPE set(1000) GRANULARITY 5, + INDEX sample_index1 (length(name), name) TYPE minmax GRANULARITY 4, + INDEX sample_index2 (lower(name), name) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +) +Engine=MergeTree() +ORDER BY name; + +SELECT * FROM system.data_skipping_indices; + +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917'; +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917_2'; + +SELECT name FROM system.data_skipping_indices WHERE type = 'minmax'; + +DROP TABLE data_01917; +DROP TABLE data_01917_2; + From c2acf122457c3a73a618950063204dccce3980a9 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Fri, 25 Jun 2021 02:49:47 +0300 Subject: [PATCH 379/931] Add english documentation for system.data_skipping_indices --- .../system-tables/data_skipping_indices.md | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 docs/en/operations/system-tables/data_skipping_indices.md diff --git a/docs/en/operations/system-tables/data_skipping_indices.md b/docs/en/operations/system-tables/data_skipping_indices.md new file mode 100644 index 00000000000..515f704797a --- /dev/null +++ b/docs/en/operations/system-tables/data_skipping_indices.md @@ -0,0 +1,39 @@ +# system.data_skipping_indices {#system-data-skipping-indices} + +Contains information about existing data skipping indices in all the tables. + +Columns: + +- `database` ([String](../../sql-reference/data-types/string.md)) — Database name. +- `table` ([String](../../sql-reference/data-types/string.md)) — Table name. +- `name` ([String](../../sql-reference/data-types/string.md)) — Index name. +- `type` ([String](../../sql-reference/data-types/string.md)) — Index type. +- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression used to calculate the index. +- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of granules in the block. + +**Example** + + +```sql +SELECT * FROM system.data_skipping_indices LIMIT 2 FORMAT Vertical; +``` + +```text +Row 1: +────── +database: default +table: user_actions +name: clicks_idx +type: minmax +expr: clicks +granularity: 1 + +Row 2: +────── +database: default +table: users +name: contacts_null_idx +type: minmax +expr: assumeNotNull(contacts_null) +granularity: 1 +``` From d423d07a87f98629b37b3f07b974cbc02ad70089 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Fri, 25 Jun 2021 10:20:22 +0800 Subject: [PATCH 380/931] [mysql] fix mysql select user() return empty --- src/Server/MySQLHandler.cpp | 2 ++ tests/integration/test_mysql_protocol/test.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index 497c0574fae..f06ae2cb8f1 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -130,6 +130,8 @@ void MySQLHandler::run() authenticate(handshake_response.username, handshake_response.auth_plugin_name, handshake_response.auth_response); + connection_context->getClientInfo().initial_user = handshake_response.username; + try { if (!handshake_response.database.empty()) diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index f2d3f46b9bc..6b61a8b6fc8 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -200,6 +200,14 @@ def test_mysql_replacement_query(started_cluster): assert stdout.decode() == 'DATABASE()\ndefault\n' +def test_mysql_select_user(started_cluster): + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + mysql --protocol tcp -h {host} -P {port} default -u default --password=123 + -e "select user();" + '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + assert code == 0 + assert stdout.decode() == 'currentUser()\ndefault\n' + def test_mysql_explain(started_cluster): # EXPLAIN SELECT 1 code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' @@ -312,6 +320,7 @@ def test_mysql_set_variables(started_cluster): assert code == 0 + def test_python_client(started_cluster): client = pymysql.connections.Connection(host=started_cluster.get_instance_ip('node'), user='user_with_double_sha1', password='abacaba', database='default', port=server_port) From ca1f336c4679fa2ac64ce6f025ad37f19c8b8a74 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 24 Jun 2021 23:25:06 +0000 Subject: [PATCH 381/931] Aliases for storageMerge fix --- src/Storages/StorageMerge.cpp | 101 +++++++++++++++--- src/Storages/StorageMerge.h | 12 ++- ...01925_test_storage_merge_aliases.reference | 10 ++ .../01925_test_storage_merge_aliases.sql | 57 ++++++++++ tests/queries/skip_list.json | 3 +- 5 files changed, 169 insertions(+), 14 deletions(-) create mode 100644 tests/queries/0_stateless/01925_test_storage_merge_aliases.reference create mode 100644 tests/queries/0_stateless/01925_test_storage_merge_aliases.sql diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 172805c08ed..7960ce19262 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -205,7 +206,7 @@ Pipe StorageMerge::read( if (selected_tables.empty()) /// FIXME: do we support sampling in this case? return createSources( - {}, query_info, processed_stage, max_block_size, header, {}, real_column_names, modified_context, 0, has_table_virtual_column); + {}, query_info, processed_stage, max_block_size, header, {}, {}, real_column_names, modified_context, 0, has_table_virtual_column); size_t tables_count = selected_tables.size(); Float64 num_streams_multiplier @@ -233,6 +234,9 @@ Pipe StorageMerge::read( query_info.input_order_info = input_sorting_info; } + auto sample_block = getInMemoryMetadataPtr()->getSampleBlock(); + Names required_columns; + for (const auto & table : selected_tables) { size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count); @@ -246,12 +250,60 @@ Pipe StorageMerge::read( if (query_info.query->as()->sampleSize() && !storage->supportsSampling()) throw Exception("Illegal SAMPLE: table doesn't support sampling", ErrorCodes::SAMPLING_NOT_SUPPORTED); + Aliases aliases; auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); + auto storage_columns = storage_metadata_snapshot->getColumns(); + + if (processed_stage == QueryProcessingStage::FetchColumns && !storage_columns.getAliases().empty()) + { + NameSet required_columns_set; + std::function extract_columns_from_alias_expression = [&](ASTPtr expr) + { + if (!expr) + return; + + if (typeid_cast(expr.get())) + return; + + if (const auto * ast_function = typeid_cast(expr.get())) + { + for (const auto & arg : ast_function->arguments->children) + extract_columns_from_alias_expression(arg); + } + else if (const auto * ast_identifier = typeid_cast(expr.get())) + { + auto column = ast_identifier->name(); + const auto column_default = storage_columns.getDefault(column); + bool is_alias = column_default && column_default->kind == ColumnDefaultKind::Alias; + + if (is_alias) + { + auto alias_expression = column_default->expression; + auto type = sample_block.getByName(column).type; + aliases.push_back({ .name = column, .type = type, .expression = alias_expression }); + extract_columns_from_alias_expression(alias_expression); + } + else + { + required_columns_set.insert(column); + } + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected expression: {}", expr->getID()); + } + }; + + for (const auto & column : real_column_names) + extract_columns_from_alias_expression(std::make_shared(column)); + + required_columns = std::vector(required_columns_set.begin(), required_columns_set.end()); + } auto source_pipe = createSources( storage_metadata_snapshot, query_info, processed_stage, - max_block_size, header, table, real_column_names, modified_context, - current_streams, has_table_virtual_column); + max_block_size, header, aliases, table, required_columns.empty() ? real_column_names : required_columns, + modified_context, current_streams, has_table_virtual_column); pipes.emplace_back(std::move(source_pipe)); } @@ -272,6 +324,7 @@ Pipe StorageMerge::createSources( const QueryProcessingStage::Enum & processed_stage, const UInt64 max_block_size, const Block & header, + const Aliases & aliases, const StorageWithLockAndName & storage_with_lock, Names & real_column_names, ContextMutablePtr modified_context, @@ -369,7 +422,7 @@ Pipe StorageMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. - convertingSourceStream(header, metadata_snapshot, modified_context, modified_query_info.query, pipe, processed_stage); + convertingSourceStream(header, metadata_snapshot, aliases, modified_context, modified_query_info.query, pipe, processed_stage); pipe.addTableLock(struct_lock); pipe.addStorageHolder(storage); @@ -492,6 +545,7 @@ void StorageMerge::alter( void StorageMerge::convertingSourceStream( const Block & header, const StorageMetadataPtr & metadata_snapshot, + const Aliases & aliases, ContextPtr local_context, ASTPtr & query, Pipe & pipe, @@ -499,16 +553,39 @@ void StorageMerge::convertingSourceStream( { Block before_block_header = pipe.getHeader(); - auto convert_actions_dag = ActionsDAG::makeConvertingActions( - pipe.getHeader().getColumnsWithTypeAndName(), - header.getColumnsWithTypeAndName(), - ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared(convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + auto storage_sample_block = metadata_snapshot->getSampleBlock(); + auto pipe_columns = pipe.getHeader().getNamesAndTypesList(); - pipe.addSimpleTransform([&](const Block & stream_header) + for (const auto & alias : aliases) { - return std::make_shared(stream_header, convert_actions); - }); + pipe_columns.emplace_back(NameAndTypePair(alias.name, alias.type)); + ASTPtr expr = std::move(alias.expression); + expr->setAlias(alias.name); + + auto syntax_result = TreeRewriter(local_context).analyze(expr, pipe_columns); + auto expression_analyzer = ExpressionAnalyzer{alias.expression, syntax_result, local_context}; + + auto dag = std::make_shared(pipe_columns); + auto actions_dag = expression_analyzer.getActionsDAG(true, false); + auto actions = std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + + pipe.addSimpleTransform([&](const Block & stream_header) + { + return std::make_shared(stream_header, actions); + }); + } + + { + auto convert_actions_dag = ActionsDAG::makeConvertingActions(pipe.getHeader().getColumnsWithTypeAndName(), + header.getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Name); + auto actions = std::make_shared(convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + pipe.addSimpleTransform([&](const Block & stream_header) + { + return std::make_shared(stream_header, actions); + }); + } + auto where_expression = query->as()->where(); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 2339716519c..a9f5b4b8a86 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -84,12 +84,22 @@ protected: const String & source_table_regexp_, ContextPtr context_); + struct AliasData + { + String name; + DataTypePtr type; + ASTPtr expression; + }; + + using Aliases = std::vector; + Pipe createSources( const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage, UInt64 max_block_size, const Block & header, + const Aliases & aliaes, const StorageWithLockAndName & storage_with_lock, Names & real_column_names, ContextMutablePtr modified_context, @@ -98,7 +108,7 @@ protected: bool concat_streams = false); void convertingSourceStream( - const Block & header, const StorageMetadataPtr & metadata_snapshot, + const Block & header, const StorageMetadataPtr & metadata_snapshot, const Aliases & aliases, ContextPtr context, ASTPtr & query, Pipe & pipe, QueryProcessingStage::Enum processed_stage); }; diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases.reference b/tests/queries/0_stateless/01925_test_storage_merge_aliases.reference new file mode 100644 index 00000000000..b0fea25ed4b --- /dev/null +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases.reference @@ -0,0 +1,10 @@ +alias1 +1 4 16 23 +23 16 4 1 +2020-02-02 1 4 2 16 3 23 +alias2 +1 3 4 4 +4 4 3 1 +23 16 4 1 +2020-02-01 1 3 2 4 3 4 +2020-02-02 1 4 2 16 3 23 diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql b/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql new file mode 100644 index 00000000000..b441358fd40 --- /dev/null +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql @@ -0,0 +1,57 @@ +drop table if exists merge; +create table merge +( + dt Date, + colAlias0 Int32, + colAlias1 Int32, + col2 Int32, + colAlias2 UInt32, + col3 Int32, + colAlias3 UInt32 +) +engine = Merge(currentDatabase(), '^alias_'); + +drop table if exists alias_1; +drop table if exists alias_2; + +create table alias_1 +( + dt Date, + col Int32, + colAlias0 UInt32 alias col, + colAlias1 UInt32 alias col3 + colAlias0, + col2 Int32, + colAlias2 Int32 alias colAlias1 + col2 + 10, + col3 Int32, + colAlias3 Int32 alias colAlias2 + colAlias1 + col3 +) +engine = MergeTree() +order by (dt); + +insert into alias_1 (dt, col, col2, col3) values ('2020-02-02', 1, 2, 3); + +select 'alias1'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_1; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge; +select * from merge; + +create table alias_2 +( + dt Date, + col Int32, + col2 Int32, + colAlias0 UInt32 alias col, + colAlias3 Int32 alias col3 + colAlias0, + colAlias1 UInt32 alias colAlias0 + col2, + colAlias2 Int32 alias colAlias0 + colAlias1, + col3 Int32 +) +engine = MergeTree() +order by (dt); + +insert into alias_2 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); + +select 'alias2'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_2; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge; +select * from merge; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 78b8e3065ff..6b1b566aab7 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -842,6 +842,7 @@ "01870_modulo_partition_key", "01870_buffer_flush", // creates database "01889_postgresql_protocol_null_fields", - "01889_check_row_policy_defined_using_user_function" + "01889_check_row_policy_defined_using_user_function", + "01925_test_storage_merge_aliases" ] } From ded9007ca0b402481cf5ccc3f00f0aea078685a0 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 25 Jun 2021 07:00:15 +0000 Subject: [PATCH 382/931] Fix --- src/Storages/StorageMerge.cpp | 4 +++- .../queries/0_stateless/01925_test_storage_merge_aliases.sql | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 7960ce19262..e6c310544f1 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -267,6 +266,9 @@ Pipe StorageMerge::read( if (const auto * ast_function = typeid_cast(expr.get())) { + if (!ast_function->arguments) + return; + for (const auto & arg : ast_function->arguments->children) extract_columns_from_alias_expression(arg); } diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql b/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql index b441358fd40..f3a5b2db62e 100644 --- a/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases.sql @@ -53,5 +53,5 @@ insert into alias_2 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); select 'alias2'; select colAlias0, colAlias1, colAlias2, colAlias3 from alias_2; -select colAlias3, colAlias2, colAlias1, colAlias0 from merge; -select * from merge; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge order by dt; +select * from merge order by dt; From b54287c8da2db0ecbe62f2dd4853cdfb2b166c92 Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 25 Jun 2021 08:00:30 +0000 Subject: [PATCH 383/931] fix --- src/Storages/StorageMerge.cpp | 44 ++++++++++++++----- src/Storages/StorageMerge.h | 10 +++-- ...902_table_function_merge_db_repr.reference | 22 ++++++++++ .../01902_table_function_merge_db_repr.sql | 6 +++ 4 files changed, 69 insertions(+), 13 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 6c439ba0dc6..da8be2b827b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -104,7 +104,7 @@ StorageMerge::StorageMerge( const ColumnsDescription & columns_, const String & comment, const String & source_database_regexp_, - const std::unordered_map> & source_databases_and_tables_, + const DbToTableSetMap & source_databases_and_tables_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) @@ -146,7 +146,7 @@ StoragePtr StorageMerge::getFirstTable(F && predicate) const { const auto & table = iterator->table(); if (table.get() != this && predicate(table)) - return table; + return table; iterator->next(); } @@ -267,7 +267,8 @@ Pipe StorageMerge::read( /** First we make list of selected tables to find out its size. * This is necessary to correctly pass the recommended number of threads to each table. */ - StorageListWithLocks selected_tables = getSelectedTables(local_context, query_info.query, has_table_virtual_column); + StorageListWithLocks selected_tables + = getSelectedTables(local_context, query_info.query, has_database_virtual_column, has_table_virtual_column); if (selected_tables.empty()) /// FIXME: do we support sampling in this case? @@ -483,22 +484,33 @@ Pipe StorageMerge::createSources( } StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( - ContextPtr query_context, - const ASTPtr & query /* = nullptr */, - bool filter_by_virtual_column /* = false */) const + ContextPtr query_context, + const ASTPtr & query /* = nullptr */, + bool filter_by_database_virtual_column /* = false */, + bool filter_by_table_virtual_column /* = false */) const { - assert(!filter_by_virtual_column || query); + assert(!filter_by_database_virtual_column || !filter_by_table_virtual_column || query); const Settings & settings = query_context->getSettingsRef(); StorageListWithLocks selected_tables; DatabaseTablesIterators database_table_iterators = getDatabaseIterators(getContext()); + MutableColumnPtr database_name_virtual_column; MutableColumnPtr table_name_virtual_column; - if (filter_by_virtual_column) + if (filter_by_database_virtual_column) + { + database_name_virtual_column = ColumnString::create(); + } + + if (filter_by_table_virtual_column) + { table_name_virtual_column = ColumnString::create(); + } for (const auto & iterator : database_table_iterators) { + if (filter_by_database_virtual_column) + database_name_virtual_column->insert(iterator->databaseName()); while (iterator->isValid()) { StoragePtr storage = iterator->table(); @@ -512,7 +524,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( { auto table_lock = storage->lockForShare(query_context->getCurrentQueryId(), settings.lock_acquire_timeout); selected_tables.emplace_back(iterator->databaseName(), storage, std::move(table_lock), iterator->name()); - if (filter_by_virtual_column) + if (filter_by_table_virtual_column) table_name_virtual_column->insert(iterator->name()); } @@ -520,7 +532,19 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( } } - if (filter_by_virtual_column) + if (filter_by_database_virtual_column) + { + /// Filter names of selected tables if there is a condition on "_database" virtual column in WHERE clause + Block virtual_columns_block + = Block{ColumnWithTypeAndName(std::move(database_name_virtual_column), std::make_shared(), "_database")}; + VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, query_context); + auto values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_database"); + + /// Remove unused databases from the list + selected_tables.remove_if([&](const auto & elem) { return values.find(std::get<0>(elem)) == values.end(); }); + } + + if (filter_by_table_virtual_column) { /// Filter names of selected tables if there is a condition on "_table" virtual column in WHERE clause Block virtual_columns_block = Block{ColumnWithTypeAndName(std::move(table_name_virtual_column), std::make_shared(), "_table")}; diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 74ccb7b397b..a8dedc17788 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -49,9 +49,10 @@ public: const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot) const override; private: + using DbToTableSetMap = std::unordered_map>; std::optional source_database_regexp; std::optional source_table_regexp; - std::optional>> source_databases_and_tables; + std::optional source_databases_and_tables; /// (Database, Table, Lock, TableName) using StorageWithLockAndName = std::tuple; @@ -59,7 +60,10 @@ private: using DatabaseTablesIterators = std::vector; StorageMerge::StorageListWithLocks getSelectedTables( - ContextPtr query_context, const ASTPtr & query = nullptr, bool filter_by_virtual_column = false) const; + ContextPtr query_context, + const ASTPtr & query = nullptr, + bool filter_by_database_virtual_column = false, + bool filter_by_table_virtual_column = false) const; template StoragePtr getFirstTable(F && predicate) const; @@ -75,7 +79,7 @@ protected: const ColumnsDescription & columns_, const String & comment, const String & source_database_regexp_, - const std::unordered_map> & source_databases_and_tables_, + const DbToTableSetMap & source_databases_and_tables_, ContextPtr context_); StorageMerge( diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference index 20436d8a267..f87f140a985 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference @@ -121,6 +121,28 @@ SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n 01902_db3 t3 7 01902_db3 t3 8 01902_db3 t3 9 +SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = 01902_db1 ORDER BY _database, _table, n +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 +SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = t1 ORDER BY _database, _table, n +01902_db1 t1 0 +01902_db1 t1 1 +01902_db1 t1 2 +01902_db1 t1 3 +01902_db1 t1 4 +01902_db1 t1 5 +01902_db1 t1 6 +01902_db1 t1 7 +01902_db1 t1 8 +01902_db1 t1 9 CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*) SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n 01902_db1 t1 0 diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index bb8e744246f..230ea2b8b1a 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -27,6 +27,12 @@ SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n; SELECT 'SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n'; SELECT _database, _table, n FROM merge('^01902_db.*', '^t.*') ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = 01902_db1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = '01902_db1' ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = t1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = 't1' ORDER BY _database, _table, n; + USE 01902_db1; SELECT 'CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*)'; From 4a3145f5860368055ff0f3904e20757eceb4bcac Mon Sep 17 00:00:00 2001 From: Storozhuk Kostiantyn <56565543+sand6255@users.noreply.github.com> Date: Thu, 24 Jun 2021 15:05:47 +0300 Subject: [PATCH 384/931] Materialize my sql support enum data type * Implemented Enum for MaterializeMySQL --- programs/odbc-bridge/ODBCBlockInputStream.cpp | 2 + src/Core/ExternalResultDescription.cpp | 4 +- src/Core/ExternalResultDescription.h | 2 + src/Core/MySQL/MySQLReplication.cpp | 17 +++- src/DataStreams/MongoDBBlockInputStream.cpp | 2 + .../PostgreSQLBlockInputStream.cpp | 2 + src/DataTypes/DataTypeEnum.cpp | 3 + .../CassandraBlockInputStream.cpp | 2 + src/Dictionaries/RedisBlockInputStream.cpp | 2 + src/Formats/MySQLBlockInputStream.cpp | 9 +++ .../MySQL/InterpretersMySQLDDLQuery.cpp | 32 ++++++-- .../MySQL/tests/gtest_create_rewritten.cpp | 22 +++++ tests/integration/parallel.json | 2 + tests/integration/parallel_skip.json | 2 + .../materialize_with_ddl.py | 80 +++++++++++++++++++ .../test_materialize_mysql_database/test.py | 9 +++ 16 files changed, 182 insertions(+), 10 deletions(-) diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index b23d09e0481..ba100cac3c9 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -115,6 +115,8 @@ void ODBCBlockInputStream::insertValue( assert_cast(column).insertValue(row.get(idx)); break; case ValueType::vtFixedString:[[fallthrough]]; + case ValueType::vtEnum8: + case ValueType::vtEnum16: case ValueType::vtString: assert_cast(column).insert(row.get(idx)); break; diff --git a/src/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp index 3f6191523a9..809d8c5900d 100644 --- a/src/Core/ExternalResultDescription.cpp +++ b/src/Core/ExternalResultDescription.cpp @@ -67,9 +67,9 @@ void ExternalResultDescription::init(const Block & sample_block_) else if (which.isUUID()) types.emplace_back(ValueType::vtUUID, is_nullable); else if (which.isEnum8()) - types.emplace_back(ValueType::vtString, is_nullable); + types.emplace_back(ValueType::vtEnum8, is_nullable); else if (which.isEnum16()) - types.emplace_back(ValueType::vtString, is_nullable); + types.emplace_back(ValueType::vtEnum16, is_nullable); else if (which.isDateTime64()) types.emplace_back(ValueType::vtDateTime64, is_nullable); else if (which.isDecimal32()) diff --git a/src/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h index 4cc0879c6f0..6311f816964 100644 --- a/src/Core/ExternalResultDescription.h +++ b/src/Core/ExternalResultDescription.h @@ -22,6 +22,8 @@ struct ExternalResultDescription vtInt64, vtFloat32, vtFloat64, + vtEnum8, + vtEnum16, vtString, vtDate, vtDateTime, diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index cb8cdf05c68..4eba236e7c9 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -298,7 +298,6 @@ namespace MySQLReplication } /// Types that do not used in the binlog event: - /// MYSQL_TYPE_ENUM /// MYSQL_TYPE_SET /// MYSQL_TYPE_TINY_BLOB /// MYSQL_TYPE_MEDIUM_BLOB @@ -562,6 +561,22 @@ namespace MySQLReplication row.push_back(dispatch((meta >> 8) & 0xFF, meta & 0xFF, read_decimal)); break; } + case MYSQL_TYPE_ENUM: + { + if((meta & 0xFF) == 1) + { + UInt8 val = 0; + payload.readStrict(reinterpret_cast(&val), 1); + row.push_back(Field{UInt8{val}}); + } + else + { + UInt16 val = 0; + payload.readStrict(reinterpret_cast(&val), 2); + row.push_back(Field{UInt16{val}}); + } + break; + } case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: { diff --git a/src/DataStreams/MongoDBBlockInputStream.cpp b/src/DataStreams/MongoDBBlockInputStream.cpp index 31fa17675bc..d583cb0d5b4 100644 --- a/src/DataStreams/MongoDBBlockInputStream.cpp +++ b/src/DataStreams/MongoDBBlockInputStream.cpp @@ -243,6 +243,8 @@ namespace insertNumber(column, value, name); break; + case ValueType::vtEnum8: + case ValueType::vtEnum16: case ValueType::vtString: { if (value.type() == Poco::MongoDB::ElementTraits::TypeId) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index a41280847a5..bff9d31ef81 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -157,6 +157,8 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view assert_cast(column).insertValue(pqxx::from_string(value)); break; case ValueType::vtFixedString:[[fallthrough]]; + case ValueType::vtEnum8: + case ValueType::vtEnum16: case ValueType::vtString: assert_cast(column).insertData(value.data(), value.size()); break; diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index b8b0b906cc4..89fc2b0c0fd 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -262,6 +262,9 @@ void registerDataTypeEnum(DataTypeFactory & factory) factory.registerDataType("Enum8", createExact>); factory.registerDataType("Enum16", createExact>); factory.registerDataType("Enum", create); + + /// MySQL + factory.registerAlias("ENUM", "Enum", DataTypeFactory::CaseInsensitive); } } diff --git a/src/Dictionaries/CassandraBlockInputStream.cpp b/src/Dictionaries/CassandraBlockInputStream.cpp index 9afc0069d48..57a4555ea87 100644 --- a/src/Dictionaries/CassandraBlockInputStream.cpp +++ b/src/Dictionaries/CassandraBlockInputStream.cpp @@ -110,6 +110,8 @@ void CassandraBlockInputStream::insertValue(IColumn & column, ValueType type, co assert_cast(column).insertValue(value); break; } + case ValueType::vtEnum8: + case ValueType::vtEnum16: case ValueType::vtString: { const char * value = nullptr; diff --git a/src/Dictionaries/RedisBlockInputStream.cpp b/src/Dictionaries/RedisBlockInputStream.cpp index bf02de9b9b5..71d055e1d68 100644 --- a/src/Dictionaries/RedisBlockInputStream.cpp +++ b/src/Dictionaries/RedisBlockInputStream.cpp @@ -91,6 +91,8 @@ namespace DB case ValueType::vtFloat64: insert(column, string_value); break; + case ValueType::vtEnum8: + case ValueType::vtEnum16: case ValueType::vtString: assert_cast(column).insert(parse(string_value)); break; diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index 3ea86c82fa3..3f51cb8d311 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -157,6 +158,14 @@ namespace assert_cast(column).insertValue(value.getDouble()); read_bytes_size += 8; break; + case ValueType::vtEnum8: + assert_cast(column).insertValue(static_cast &>(data_type).castToValue(value.data()).get()); + read_bytes_size += assert_cast(column).byteSize(); + break; + case ValueType::vtEnum16: + assert_cast(column).insertValue(static_cast &>(data_type).castToValue(value.data()).get()); + read_bytes_size += assert_cast(column).byteSize(); + break; case ValueType::vtString: assert_cast(column).insertData(value.data(), value.size()); read_bytes_size += assert_cast(column).byteSize(); diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index fbd537781de..d37b05e88eb 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -92,22 +92,40 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini } ASTPtr data_type = declare_column->data_type; + auto * data_type_function = data_type->as(); - if (is_unsigned) + if (data_type_function) { - auto * data_type_function = data_type->as(); + String type_name_upper = Poco::toUpper(data_type_function->name); - if (data_type_function) + if (is_unsigned) { - String type_name_upper = Poco::toUpper(data_type_function->name); - /// For example(in MySQL): CREATE TABLE test(column_name INT NOT NULL ... UNSIGNED) - if (type_name_upper.find("INT") != std::string::npos && !endsWith(type_name_upper, "SIGNED") + if (type_name_upper.find("INT") != String::npos && !endsWith(type_name_upper, "SIGNED") && !endsWith(type_name_upper, "UNSIGNED")) data_type_function->name = type_name_upper + " UNSIGNED"; } - } + /// Transforms MySQL ENUM's list of strings to ClickHouse string-integer pairs + /// For example ENUM('a', 'b', 'c') -> ENUM('a'=1, 'b'=2, 'c'=3) + /// Elements on a position further than 32767 are assigned negative values, starting with -32768. + /// Note: Enum would be transfomed to Enum8 if number of ellements is less then 128, otherwise it would be transformed to Enum16. + if (type_name_upper.find("ENUM") != String::npos) + { + UInt16 i = 0; + for (ASTPtr & child : data_type_function->arguments->children) + { + auto newChild = std::make_shared(); + newChild->name = "equals"; + auto * literal = child->as(); + + newChild->arguments = std::make_shared(); + newChild->arguments->children.push_back(std::make_shared(literal->value.get())); + newChild->arguments->children.push_back(std::make_shared(Int16(++i))); + child = newChild; + } + } + } if (is_nullable) data_type = makeASTFunction("Nullable", data_type); diff --git a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp index 4bd65ae45a1..036b933a461 100644 --- a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp +++ b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp @@ -235,3 +235,25 @@ TEST(MySQLCreateRewritten, QueryWithColumnComments) std::string(MATERIALIZEMYSQL_TABLE_COLUMNS) + ") ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY tuple(key)"); } + +TEST(MySQLCreateRewritten, QueryWithEnum) +{ + tryRegisterFunctions(); + const auto & context_holder = getContext(); + + EXPECT_EQ(queryToString(tryRewrittenCreateQuery( + "CREATE TABLE `test_database`.`test_table_1`(`key` INT NOT NULL PRIMARY KEY, `test` ENUM('a','b','c'))", context_holder.context)), + "CREATE TABLE test_database.test_table_1 (`key` Int32, `test` Nullable(Enum8('a' = 1, 'b' = 2, 'c' = 3))" + + std::string(MATERIALIZEMYSQL_TABLE_COLUMNS) + + ") ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY tuple(key)"); + EXPECT_EQ(queryToString(tryRewrittenCreateQuery( + "CREATE TABLE `test_database`.`test_table_1`(`key` INT NOT NULL PRIMARY KEY, `test` ENUM('a','b','c') NOT NULL)", context_holder.context)), + "CREATE TABLE test_database.test_table_1 (`key` Int32, `test` Enum8('a' = 1, 'b' = 2, 'c' = 3)" + + std::string(MATERIALIZEMYSQL_TABLE_COLUMNS) + + ") ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY tuple(key)"); + EXPECT_EQ(queryToString(tryRewrittenCreateQuery( + "CREATE TABLE `test_database`.`test_table_1`(`key` INT NOT NULL PRIMARY KEY, `test` ENUM('a','b','c') COMMENT 'test_comment')", context_holder.context)), + "CREATE TABLE test_database.test_table_1 (`key` Int32, `test` Nullable(Enum8('a' = 1, 'b' = 2, 'c' = 3)) COMMENT 'test_comment'" + + std::string(MATERIALIZEMYSQL_TABLE_COLUMNS) + + ") ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY tuple(key)"); +} diff --git a/tests/integration/parallel.json b/tests/integration/parallel.json index f82e33138fc..2879f258406 100644 --- a/tests/integration/parallel.json +++ b/tests/integration/parallel.json @@ -177,6 +177,8 @@ "test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node1]", "test_materialize_mysql_database/test.py::test_materialize_with_column_comments[clickhouse_node0]", "test_materialize_mysql_database/test.py::test_materialize_with_column_comments[clickhouse_node1]", + "test_materialize_mysql_database/test.py::test_materialize_with_enum[clickhouse_node0]", + "test_materialize_mysql_database/test.py::test_materialize_with_enum[clickhouse_node1]", "test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node0]", "test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node1]", "test_parts_delete_zookeeper/test.py::test_merge_doesnt_work_without_zookeeper", diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 7d124f4eac7..2c993691d78 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -181,6 +181,8 @@ "test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node1]", "test_materialize_mysql_database/test.py::test_materialize_with_column_comments[clickhouse_node0]", "test_materialize_mysql_database/test.py::test_materialize_with_column_comments[clickhouse_node1]", + "test_materialize_mysql_database/test.py::test_materialize_with_enum[clickhouse_node0]", + "test_materialize_mysql_database/test.py::test_materialize_with_enum[clickhouse_node1]", "test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node0]", "test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node1]", "test_parts_delete_zookeeper/test.py::test_merge_doesnt_work_without_zookeeper", diff --git a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py index c5db90821e2..5b267a28321 100644 --- a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py @@ -853,6 +853,86 @@ def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_n clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test") mysql_node.query("DROP DATABASE materialize_with_column_comments_test") +def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name): + mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test") + clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test") + mysql_node.query("CREATE DATABASE materialize_with_enum8_test") + enum8_values_count = 127 + enum8_values = "" + enum8_values_with_backslash = "" + for i in range(1, enum8_values_count): + enum8_values += '\'' + str(i) + "\', " + enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " + enum8_values += '\'' + str(enum8_values_count) + '\'' + enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count) + mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB") + mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')") + clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializeMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name)) + check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n") + mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')") + check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n") + check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + clickhouse_node.query("DROP DATABASE materialize_with_enum8_test") + mysql_node.query("DROP DATABASE materialize_with_enum8_test") + +def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name): + mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test") + clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test") + mysql_node.query("CREATE DATABASE materialize_with_enum16_test") + enum16_values_count = 600 + enum16_values = "" + enum16_values_with_backslash = "" + for i in range(1, enum16_values_count): + enum16_values += '\'' + str(i) + "\', " + enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " + enum16_values += '\'' + str(enum16_values_count) + '\'' + enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count) + mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB") + mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')") + clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializeMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name)) + check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n") + mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')") + check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n") + check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + clickhouse_node.query("DROP DATABASE materialize_with_enum16_test") + mysql_node.query("DROP DATABASE materialize_with_enum16_test") + +def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name): + mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test") + clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test") + mysql_node.query("CREATE DATABASE alter_enum8_to_enum16_test") + + enum8_values_count = 100 + enum8_values = "" + enum8_values_with_backslash = "" + for i in range(1, enum8_values_count): + enum8_values += '\'' + str(i) + "\', " + enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " + enum8_values += '\'' + str(enum8_values_count) + '\'' + enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count) + mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB") + mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')") + clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializeMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name)) + mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')") + check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n") + check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + + enum16_values_count = 600 + enum16_values = "" + enum16_values_with_backslash = "" + for i in range(1, enum16_values_count): + enum16_values += '\'' + str(i) + "\', " + enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " + enum16_values += '\'' + str(enum16_values_count) + '\'' + enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count) + mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")") + check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')") + check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n") + + clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test") + mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test") + def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name): clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col") mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col") diff --git a/tests/integration/test_materialize_mysql_database/test.py b/tests/integration/test_materialize_mysql_database/test.py index e26500f07b3..252cf551d2d 100644 --- a/tests/integration/test_materialize_mysql_database/test.py +++ b/tests/integration/test_materialize_mysql_database/test.py @@ -223,6 +223,15 @@ def test_materialize_with_column_comments(started_cluster, started_mysql_8_0, st materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_8_0, "mysql80") +@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) +def test_materialize_with_enum(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): + materialize_with_ddl.materialize_with_enum8_test(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.materialize_with_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.alter_enum8_to_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.materialize_with_enum8_test(clickhouse_node, started_mysql_8_0, "mysql80") + materialize_with_ddl.materialize_with_enum16_test(clickhouse_node, started_mysql_8_0, "mysql80") + materialize_with_ddl.alter_enum8_to_enum16_test(clickhouse_node, started_mysql_8_0, "mysql80") + @pytest.mark.parametrize(('clickhouse_node'), [node_disable_bytes_settings, node_disable_rows_settings]) def test_mysql_settings(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): From 7516d390918a762e68b81d32e2257a2340814040 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Fri, 25 Jun 2021 16:44:28 +0800 Subject: [PATCH 385/931] Style fixes --- src/Core/MySQL/MySQLReplication.cpp | 2 +- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index 4eba236e7c9..81015507d8d 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -563,7 +563,7 @@ namespace MySQLReplication } case MYSQL_TYPE_ENUM: { - if((meta & 0xFF) == 1) + if ((meta & 0xFF) == 1) { UInt8 val = 0; payload.readStrict(reinterpret_cast(&val), 1); diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index d37b05e88eb..c26f183c3e5 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -109,7 +109,7 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini /// Transforms MySQL ENUM's list of strings to ClickHouse string-integer pairs /// For example ENUM('a', 'b', 'c') -> ENUM('a'=1, 'b'=2, 'c'=3) /// Elements on a position further than 32767 are assigned negative values, starting with -32768. - /// Note: Enum would be transfomed to Enum8 if number of ellements is less then 128, otherwise it would be transformed to Enum16. + /// Note: Enum would be transformed to Enum8 if number of ellements is less then 128, otherwise it would be transformed to Enum16. if (type_name_upper.find("ENUM") != String::npos) { UInt16 i = 0; From 4fadca34714896d54a56eeff9c5b26aa95c9d4b5 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Fri, 25 Jun 2021 16:52:33 +0800 Subject: [PATCH 386/931] Trigger Build From 3c71e067fae24680d19906b20fd7e48575962031 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Fri, 25 Jun 2021 19:27:03 +0800 Subject: [PATCH 387/931] Update docs and style --- .../en/engines/database-engines/materialize-mysql.md | 1 + src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/en/engines/database-engines/materialize-mysql.md b/docs/en/engines/database-engines/materialize-mysql.md index 69d3122c268..93e4aedfd5a 100644 --- a/docs/en/engines/database-engines/materialize-mysql.md +++ b/docs/en/engines/database-engines/materialize-mysql.md @@ -49,6 +49,7 @@ When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](. | DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) | | DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) | +| ENUM | [Enum](../../sql-reference/data-types/enum.md) | | STRING | [String](../../sql-reference/data-types/string.md) | | VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) | | BLOB | [String](../../sql-reference/data-types/string.md) | diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index c26f183c3e5..7988a9ab48b 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -115,14 +115,14 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini UInt16 i = 0; for (ASTPtr & child : data_type_function->arguments->children) { - auto newChild = std::make_shared(); - newChild->name = "equals"; + auto new_child = std::make_shared(); + new_child->name = "equals"; auto * literal = child->as(); - newChild->arguments = std::make_shared(); - newChild->arguments->children.push_back(std::make_shared(literal->value.get())); - newChild->arguments->children.push_back(std::make_shared(Int16(++i))); - child = newChild; + new_child->arguments = std::make_shared(); + new_child->arguments->children.push_back(std::make_shared(literal->value.get())); + new_child->arguments->children.push_back(std::make_shared(Int16(++i))); + child = new_child; } } } From 2949cd1e6f7cfe5a26619681a454bf4776dc5d41 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Jun 2021 13:46:19 +0300 Subject: [PATCH 388/931] Support ALIASed columns in JOIN ON expression --- src/Interpreters/DatabaseAndTableWithAlias.h | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 3 ++- src/Interpreters/InterpreterSelectQuery.cpp | 1 - src/Interpreters/TreeRewriter.cpp | 19 ++++++++++++------- .../01925_join_materialized_columns.reference | 2 ++ .../01925_join_materialized_columns.sql | 5 ++--- 6 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/DatabaseAndTableWithAlias.h b/src/Interpreters/DatabaseAndTableWithAlias.h index e60674d93c6..e43481025a0 100644 --- a/src/Interpreters/DatabaseAndTableWithAlias.h +++ b/src/Interpreters/DatabaseAndTableWithAlias.h @@ -61,7 +61,7 @@ struct TableWithColumnNamesAndTypes names.insert(col.name); } - bool hasColumn(const String & name) const { return names.count(name); } + bool hasColumn(const String & name) const { return names.contains(name); } void addHiddenColumns(const NamesAndTypesList & addition) { diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 00ffd540da0..326b4ac6705 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -806,7 +806,8 @@ JoinPtr SelectQueryExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain } ExpressionActionsChain::Step & step = chain.lastStep(columns_after_array_join); - chain.steps.push_back(std::make_unique(syntax->analyzed_join, table_join, step.getResultColumns())); + chain.steps.push_back(std::make_unique( + syntax->analyzed_join, table_join, step.getResultColumns())); chain.addStep(); return table_join; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 71181a84e1a..f95750ed5e2 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index b997e53f745..679ab4ea354 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -508,14 +508,10 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul } /// Find the columns that are obtained by JOIN. -void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query, +void collectJoinedColumns(TableJoin & analyzed_join, const ASTTableJoin & table_join, const TablesWithColumns & tables, const Aliases & aliases) { - const ASTTablesInSelectQueryElement * node = select_query.join(); - if (!node || tables.size() < 2) - return; - - const auto & table_join = node->table_join->as(); + assert(tables.size() >= 2); if (table_join.using_expression_list) { @@ -936,7 +932,16 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( setJoinStrictness( *select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys, result.analyzed_join->table_join); - collectJoinedColumns(*result.analyzed_join, *select_query, tables_with_columns, result.aliases); + if (const auto * join_ast = select_query->join(); join_ast && tables_with_columns.size() >= 2) + { + auto & table_join_ast = join_ast->table_join->as(); + if (table_join_ast.using_expression_list && result.metadata_snapshot) + replaceAliasColumnsInQuery(table_join_ast.using_expression_list, result.metadata_snapshot->getColumns(), result.array_join_result_to_source, getContext()); + if (table_join_ast.on_expression && result.metadata_snapshot) + replaceAliasColumnsInQuery(table_join_ast.on_expression, result.metadata_snapshot->getColumns(), result.array_join_result_to_source, getContext()); + + collectJoinedColumns(*result.analyzed_join, table_join_ast, tables_with_columns, result.aliases); + } result.aggregates = getAggregates(query, *select_query); result.window_function_asts = getWindowFunctions(query, *select_query); diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.reference b/tests/queries/0_stateless/01925_join_materialized_columns.reference index fe00b746e57..1dfda3c769b 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.reference +++ b/tests/queries/0_stateless/01925_join_materialized_columns.reference @@ -20,3 +20,5 @@ - fact1t1_val1 fact1t2_val2 fact2t1_val2 fact2t1_val2 +- +2020-02-02 13:00:00 2020-02-05 13:00:00 diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql index 16fe00beb63..6a34fef96ab 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.sql +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -33,6 +33,5 @@ SELECT '-'; SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.aliascol2 == 'fact2t1_val2'; SELECT '-'; SELECT t1.aliascol1, t2.aliascol2 FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.time, t2.time; --- SELECT '-'; --- SELECT * FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time; - +SELECT '-'; +SELECT t1.time, t2.time FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time; From 07a47b4bd36cea58af2a714cf784439ef583ca5c Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 25 Jun 2021 13:51:17 +0000 Subject: [PATCH 389/931] fix --- src/Common/OptimizedRegularExpression.cpp | 18 ---- src/Common/OptimizedRegularExpression.h | 2 - src/Common/StringUtils/StringUtils.h | 1 + src/Interpreters/DatabaseCatalog.cpp | 30 +++--- src/Interpreters/DatabaseCatalog.h | 1 + .../evaluateConstantExpression.cpp | 18 ++++ src/Interpreters/evaluateConstantExpression.h | 2 + src/Storages/StorageMerge.cpp | 54 +++++++--- src/Storages/StorageMerge.h | 12 ++- src/TableFunctions/TableFunctionMerge.cpp | 101 +++++++++++------- src/TableFunctions/TableFunctionMerge.h | 4 +- 11 files changed, 152 insertions(+), 91 deletions(-) diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index ba1b82ee2fe..1464923e6ab 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -2,8 +2,6 @@ #include #include -#include - #define MIN_LENGTH_FOR_STRSTR 3 #define MAX_SUBPATTERNS 1024 @@ -344,22 +342,6 @@ OptimizedRegularExpressionImpl::OptimizedRegularExpressionImpl(cons } } -template -bool OptimizedRegularExpressionImpl::fullMatch(const std::string & subject) const -{ - if (is_trivial) - { - if (required_substring.empty()) - return subject.empty(); - - if (is_case_insensitive) - return Poco::toLower(subject) == Poco::toLower(required_substring); - else - return subject == required_substring; - } - - return RegexType::FullMatch(StringPieceType(subject.data(), subject.size()), *re2); -} template bool OptimizedRegularExpressionImpl::match(const char * subject, size_t subject_size) const diff --git a/src/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h index 1dc136a0148..fddefe596c4 100644 --- a/src/Common/OptimizedRegularExpression.h +++ b/src/Common/OptimizedRegularExpression.h @@ -64,8 +64,6 @@ public: OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); - bool fullMatch(const std::string & subject) const; - bool match(const std::string & subject) const { return match(subject.data(), subject.size()); diff --git a/src/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h index 20c0a5ca380..b37e447082b 100644 --- a/src/Common/StringUtils/StringUtils.h +++ b/src/Common/StringUtils/StringUtils.h @@ -283,3 +283,4 @@ inline void trim(std::string_view & str, char c = ' ') trimLeft(str, c); trimRight(str, c); } +} diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 4ed4f258b29..82f800f2c9b 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1,23 +1,25 @@ -#include -#include -#include -#include -#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include -#include -#include -#include #include -#include +#include +#include #include +#include +#include +#include #if !defined(ARCADIA_BUILD) # include "config_core.h" diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 74bfb814ce4..2a8aa742f0c 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 2525f9672ed..13d9ef47894 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -100,6 +100,24 @@ ASTPtr evaluateConstantExpressionForDatabaseName(const ASTPtr & node, ContextPtr return res; } +std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context) +{ + if (const auto * func = node->as(); func->name == "REGEXP") + { + if (func->children.size() != 1) + throw Exception("Arguments for REGEXP in Merge ENGINE should be 1", ErrorCodes::BAD_ARGUMENTS); + + auto * literal = func->children[0]->as(); + if (!literal || literal->value.safeGet().empty()) + throw Exception("Argument for REGEXP in Merge ENGINE should be a non empty String Literal", ErrorCodes::BAD_ARGUMENTS); + + return std::tuple{true, literal->value.safeGet()}; + } + + auto ast = evaluateConstantExpressionForDatabaseName(node, context); + return std::tuple{false, ast->as()->value.safeGet()}; +} + namespace { using Conjunction = ColumnsWithTypeAndName; diff --git a/src/Interpreters/evaluateConstantExpression.h b/src/Interpreters/evaluateConstantExpression.h index b95982f5b99..70f7bb9bd86 100644 --- a/src/Interpreters/evaluateConstantExpression.h +++ b/src/Interpreters/evaluateConstantExpression.h @@ -53,4 +53,6 @@ ASTPtr evaluateConstantExpressionForDatabaseName(const ASTPtr & node, ContextPtr */ std::optional evaluateExpressionOverConstantCondition(const ASTPtr & node, const ExpressionActionsPtr & target_expr, size_t & limit); +// Evaluate database name or regexp for StorageMerge and TableFunction merge +std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index add03695c6c..93cf224feb8 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -45,13 +45,16 @@ StorageMerge::StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_regexp_, + const String & source_database_name_or_regexp_, + bool database_is_regexp_, const DbToTableSetMap & source_databases_and_tables_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , source_database_regexp(source_database_regexp_) + , source_database_regexp(source_database_name_or_regexp_) , source_databases_and_tables(source_databases_and_tables_) + , source_database_name_or_regexp(source_database_name_or_regexp_) + , database_is_regexp(database_is_regexp_) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -63,13 +66,16 @@ StorageMerge::StorageMerge( const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_regexp_, + const String & source_database_name_or_regexp_, + bool database_is_regexp_, const String & source_table_regexp_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , source_database_regexp(source_database_regexp_) + , source_database_regexp(source_database_name_or_regexp_) , source_table_regexp(source_table_regexp_) + , source_database_name_or_regexp(source_database_name_or_regexp_) + , database_is_regexp(database_is_regexp_) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -502,6 +508,23 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( return selected_tables; } +DatabaseTablesIteratorPtr StorageMerge::getDatabaseIterator(const String & database_name, ContextPtr local_context) const +{ + auto database = DatabaseCatalog::instance().getDatabase(database_name); + + auto table_name_match = [this, &database_name](const String & table_name_) -> bool { + if (source_databases_and_tables) + { + const auto & source_tables = (*source_databases_and_tables).at(database_name); + return source_tables.count(table_name_); + } + else + return source_table_regexp->match(table_name_); + }; + + return database->getTablesIterator(local_context, table_name_match); +} + StorageMerge::DatabaseTablesIterators StorageMerge::getDatabaseIterators(ContextPtr local_context) const { try @@ -516,22 +539,19 @@ StorageMerge::DatabaseTablesIterators StorageMerge::getDatabaseIterators(Context DatabaseTablesIterators database_table_iterators; - auto databases = DatabaseCatalog::instance().getDatabases(); + /// database_name argument is not a regexp + if (!database_is_regexp) + database_table_iterators.emplace_back(getDatabaseIterator(source_database_name_or_regexp, local_context)); - for (const auto & db : databases) + /// database_name argument is a regexp + else { - if (source_database_regexp->fullMatch(db.first)) + auto databases = DatabaseCatalog::instance().getDatabases(); + + for (const auto & db : databases) { - auto table_name_match = [this, &db](const String & table_name_) -> bool { - if (source_databases_and_tables) - { - const auto & source_tables = (*source_databases_and_tables).at(db.first); - return source_tables.count(table_name_); - } - else - return source_table_regexp->fullMatch(table_name_); - }; - database_table_iterators.emplace_back(db.second->getTablesIterator(local_context, table_name_match)); + if (source_database_regexp->match(db.first)) + database_table_iterators.emplace_back(getDatabaseIterator(db.first, local_context)); } } diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index d72e16618d3..fc31e5ab2e5 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -50,10 +50,14 @@ public: private: using DbToTableSetMap = std::unordered_map>; + std::optional source_database_regexp; std::optional source_table_regexp; std::optional source_databases_and_tables; + String source_database_name_or_regexp; + bool database_is_regexp = false; + /// (Database, Table, Lock, TableName) using StorageWithLockAndName = std::tuple; using StorageListWithLocks = std::list; @@ -68,6 +72,8 @@ private: template StoragePtr getFirstTable(F && predicate) const; + DatabaseTablesIteratorPtr getDatabaseIterator(const String & database_name, ContextPtr context) const; + DatabaseTablesIterators getDatabaseIterators(ContextPtr context) const; NamesAndTypesList getVirtuals() const override; @@ -78,7 +84,8 @@ protected: const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_regexp_, + const String & source_database_name_or_regexp_, + bool database_is_regexp_, const DbToTableSetMap & source_databases_and_tables_, ContextPtr context_); @@ -86,7 +93,8 @@ protected: const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment, - const String & source_database_regexp_, + const String & source_database_name_or_regexp_, + bool database_is_regexp_, const String & source_table_regexp_, ContextPtr context_); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 330f4f2c25f..85e338a3a8c 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -49,10 +49,12 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - args[0] = evaluateConstantExpressionForDatabaseName(args[0], context); - args[1] = evaluateConstantExpressionAsLiteral(args[1], context); + auto [is_regexp, source_database_name_or_regexp_] = evaluateDatabaseNameForMergeEngine(args[0], context); - source_database_regexp = args[0]->as().value.safeGet(); + database_is_regexp = is_regexp; + source_database_name_or_regexp = source_database_name_or_regexp_; + + args[1] = evaluateConstantExpressionAsLiteral(args[1], context); source_table_regexp = args[1]->as().value.safeGet(); } @@ -62,46 +64,41 @@ const std::unordered_map> & TableFunctionMerg if (source_databases_and_tables) return *source_databases_and_tables; - OptimizedRegularExpression database_re(source_database_regexp); - OptimizedRegularExpression table_re(source_table_regexp); + source_databases_and_tables.emplace(); - auto table_name_match = [&](const String & table_name_) { return table_re.fullMatch(table_name_); }; - - auto access = context->getAccess(); - - auto databases = DatabaseCatalog::instance().getDatabases(); - - for (const auto & db : databases) + /// database_name is not a regexp + if (!database_is_regexp) { - if (database_re.fullMatch(db.first)) - { - bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, db.first); - bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, db.first); - std::unordered_set source_tables; - for (auto it = db.second->getTablesIterator(context, table_name_match); it->isValid(); it->next()) - { - if (!it->table()) - continue; - bool granted_show = granted_show_on_all_tables || access->isGranted(AccessType::SHOW_TABLES, db.first, it->name()); - if (!granted_show) - continue; - if (!granted_select_on_all_tables) - access->checkAccess(AccessType::SELECT, db.first, it->name()); - source_tables.insert(it->name()); - } - - if (!source_tables.empty()) - (*source_databases_and_tables)[db.first] = source_tables; - } + auto source_tables = getMatchedTablesWithAccess(source_database_name_or_regexp, source_table_regexp, context); + if (source_tables.empty()) + throwNoTablesMatchRegexp(source_database_name_or_regexp, source_table_regexp); + (*source_databases_and_tables)[source_database_name_or_regexp] = source_tables; } - if ((*source_databases_and_tables).empty()) - throwNoTablesMatchRegexp(source_database_regexp, source_table_regexp); + /// database_name is a regexp + else + { + OptimizedRegularExpression database_re(source_database_name_or_regexp); + auto databases = DatabaseCatalog::instance().getDatabases(); + + for (const auto & db : databases) + { + if (database_re.match(db.first)) + { + auto source_tables = getMatchedTablesWithAccess(db.first, source_table_regexp, context); + + if (!source_tables.empty()) + (*source_databases_and_tables)[db.first] = source_tables; + } + } + + if ((*source_databases_and_tables).empty()) + throwNoTablesMatchRegexp(source_database_name_or_regexp, source_table_regexp); + } return *source_databases_and_tables; } - ColumnsDescription TableFunctionMerge::getActualTableStructure(ContextPtr context) const { for (const auto & db_with_tables : getSourceDatabasesAndTables(context)) @@ -111,7 +108,7 @@ ColumnsDescription TableFunctionMerge::getActualTableStructure(ContextPtr contex return ColumnsDescription{storage->getInMemoryMetadataPtr()->getColumns().getAllPhysical()}; } - throwNoTablesMatchRegexp(source_database_regexp, source_table_regexp); + throwNoTablesMatchRegexp(source_database_name_or_regexp, source_table_regexp); } @@ -121,7 +118,8 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & /*ast_function*/, Cont StorageID(getDatabaseName(), table_name), getActualTableStructure(context), String{}, - source_database_regexp, + source_database_name_or_regexp, + database_is_regexp, getSourceDatabasesAndTables(context), context); @@ -129,6 +127,35 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & /*ast_function*/, Cont return res; } +NameSet +TableFunctionMerge::getMatchedTablesWithAccess(const String & database_name, const String & table_regexp, const ContextPtr & context) +{ + OptimizedRegularExpression table_re(table_regexp); + + auto table_name_match = [&](const String & table_name) { return table_re.match(table_name); }; + + auto access = context->getAccess(); + + auto database = DatabaseCatalog::instance().getDatabase(database_name); + + bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, database_name); + bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, database_name); + + NameSet tables; + + for (auto it = database->getTablesIterator(context, table_name_match); it->isValid(); it->next()) + { + if (!it->table()) + continue; + bool granted_show = granted_show_on_all_tables || access->isGranted(AccessType::SHOW_TABLES, database_name, it->name()); + if (!granted_show) + continue; + if (!granted_select_on_all_tables) + access->checkAccess(AccessType::SELECT, database_name, it->name()); + tables.insert(it->name()); + } + return tables; +} void registerTableFunctionMerge(TableFunctionFactory & factory) { diff --git a/src/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h index b971a00d4b6..0802dc12474 100644 --- a/src/TableFunctions/TableFunctionMerge.h +++ b/src/TableFunctions/TableFunctionMerge.h @@ -22,9 +22,11 @@ private: const std::unordered_map> & getSourceDatabasesAndTables(ContextPtr context) const; ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + static NameSet getMatchedTablesWithAccess(const String & database_name, const String & table_regexp, const ContextPtr & context); - String source_database_regexp; + String source_database_name_or_regexp; String source_table_regexp; + bool database_is_regexp = false; mutable std::optional>> source_databases_and_tables; }; From 06bad997201af908bf016b78161a9e1b4fe0592c Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 25 Jun 2021 17:07:46 +0300 Subject: [PATCH 390/931] Update src/Functions/geoToH3.cpp Co-authored-by: Bharat Nallan --- src/Functions/geoToH3.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index 90e29248d32..57973ab94fe 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -86,7 +86,8 @@ public: H3Index hindex; H3Error err = latLngToCell(&coord, res, &hindex); - if (err) { + if (err) + { throw Exception( "Incorrect coordinates lat:" + std::to_string(coord.lat) + " lng:" + std::to_string(coord.lng) + " err:" + std::to_string(err), ErrorCodes::INCORRECT_DATA); From ca04b077eff5d305af624959a0a463115aa8668c Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Fri, 25 Jun 2021 22:30:11 +0800 Subject: [PATCH 391/931] Improved column comments support --- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 6 ++++-- .../test_materialize_mysql_database/materialize_with_ddl.py | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index fbd537781de..0e08a5d892f 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -564,7 +564,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( if (alter_command->type == MySQLParser::ASTAlterCommand::ADD_COLUMN) { const auto & additional_columns_name_and_type = getColumnsList(alter_command->additional_columns); - const auto & additional_columns = InterpreterCreateQuery::formatColumns(additional_columns_name_and_type); + const auto & additional_columns_description = createColumnsDescription(additional_columns_name_and_type, alter_command->additional_columns); + const auto & additional_columns = InterpreterCreateQuery::formatColumns(additional_columns_description); for (size_t index = 0; index < additional_columns_name_and_type.size(); ++index) { @@ -658,7 +659,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( if (!alter_command->old_name.empty()) modify_columns.front().name = alter_command->old_name; - rewritten_command->col_decl = InterpreterCreateQuery::formatColumns(modify_columns)->children[0]; + const auto & modify_columns_description = createColumnsDescription(modify_columns, alter_command->additional_columns); + rewritten_command->col_decl = InterpreterCreateQuery::formatColumns(modify_columns_description)->children[0]; if (!alter_command->column_name.empty()) { diff --git a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py index c5db90821e2..f7228fd6071 100644 --- a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py @@ -850,6 +850,10 @@ def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_n mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB") clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializeMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name)) check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'") + check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'") + check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test") mysql_node.query("DROP DATABASE materialize_with_column_comments_test") From c2ac9b6027e0bb9a0fc966b0287cf1b5b575b660 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 25 Jun 2021 14:30:58 +0000 Subject: [PATCH 392/931] Better --- src/Storages/StorageMerge.cpp | 64 ++++++++++++++--------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index e6c310544f1..a076393e7e6 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -255,51 +257,39 @@ Pipe StorageMerge::read( if (processed_stage == QueryProcessingStage::FetchColumns && !storage_columns.getAliases().empty()) { - NameSet required_columns_set; - std::function extract_columns_from_alias_expression = [&](ASTPtr expr) + auto syntax_result = TreeRewriter(local_context).analyzeSelect(query_info.query, TreeRewriterResult({}, storage, storage_metadata_snapshot)); + ASTPtr required_columns_expr_list = std::make_shared(); + + ASTPtr column_expr; + for (const auto & column : real_column_names) { - if (!expr) - return; + const auto column_default = storage_columns.getDefault(column); + bool is_alias = column_default && column_default->kind == ColumnDefaultKind::Alias; - if (typeid_cast(expr.get())) - return; - - if (const auto * ast_function = typeid_cast(expr.get())) + if (is_alias) { - if (!ast_function->arguments) - return; + column_expr = column_default->expression->clone(); + replaceAliasColumnsInQuery(column_expr, storage_metadata_snapshot->getColumns(), + syntax_result->array_join_result_to_source, local_context); - for (const auto & arg : ast_function->arguments->children) - extract_columns_from_alias_expression(arg); - } - else if (const auto * ast_identifier = typeid_cast(expr.get())) - { - auto column = ast_identifier->name(); - const auto column_default = storage_columns.getDefault(column); - bool is_alias = column_default && column_default->kind == ColumnDefaultKind::Alias; + auto column_description = storage_columns.get(column); + column_expr = addTypeConversionToAST(std::move(column_expr), column_description.type->getName(), + storage_metadata_snapshot->getColumns().getAll(), local_context); + column_expr = setAlias(column_expr, column); - if (is_alias) - { - auto alias_expression = column_default->expression; - auto type = sample_block.getByName(column).type; - aliases.push_back({ .name = column, .type = type, .expression = alias_expression }); - extract_columns_from_alias_expression(alias_expression); - } - else - { - required_columns_set.insert(column); - } + auto type = sample_block.getByName(column).type; + aliases.push_back({ .name = column, .type = type, .expression = column_expr->clone() }); } else - { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected expression: {}", expr->getID()); - } - }; + column_expr = std::make_shared(column); - for (const auto & column : real_column_names) - extract_columns_from_alias_expression(std::make_shared(column)); + required_columns_expr_list->children.emplace_back(std::move(column_expr)); + } - required_columns = std::vector(required_columns_set.begin(), required_columns_set.end()); + syntax_result = TreeRewriter(local_context).analyze(required_columns_expr_list, storage_columns.getAllPhysical(), + storage, storage_metadata_snapshot); + auto alias_actions = ExpressionAnalyzer(required_columns_expr_list, syntax_result, local_context).getActionsDAG(true); + required_columns = alias_actions->getRequiredColumns().getNames(); } auto source_pipe = createSources( @@ -562,8 +552,6 @@ void StorageMerge::convertingSourceStream( { pipe_columns.emplace_back(NameAndTypePair(alias.name, alias.type)); ASTPtr expr = std::move(alias.expression); - expr->setAlias(alias.name); - auto syntax_result = TreeRewriter(local_context).analyze(expr, pipe_columns); auto expression_analyzer = ExpressionAnalyzer{alias.expression, syntax_result, local_context}; From 6bc0a628cd4a803c290ca9116c8f6f9b3d6bea03 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 25 Jun 2021 17:49:28 +0300 Subject: [PATCH 393/931] Remove PrewhereDAGInfo. --- src/Interpreters/ExpressionAnalyzer.cpp | 3 +- src/Interpreters/ExpressionAnalyzer.h | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 73 +++++-------------- .../getHeaderForProcessingStage.cpp | 4 +- .../QueryPlan/ReadFromMergeTree.cpp | 5 +- src/Processors/QueryPlan/ReadFromMergeTree.h | 1 + src/Storages/IStorage.cpp | 7 +- .../MergeTreeBaseSelectProcessor.cpp | 36 ++++++--- .../MergeTree/MergeTreeBaseSelectProcessor.h | 6 +- .../MergeTree/MergeTreeBlockReadUtils.cpp | 6 +- src/Storages/MergeTree/MergeTreeData.cpp | 29 +++----- .../MergeTree/MergeTreeRangeReader.cpp | 2 +- src/Storages/MergeTree/MergeTreeRangeReader.h | 22 +++++- .../MergeTreeReverseSelectProcessor.cpp | 3 +- .../MergeTreeReverseSelectProcessor.h | 1 + .../MergeTree/MergeTreeSelectProcessor.cpp | 3 +- .../MergeTree/MergeTreeSelectProcessor.h | 1 + ...rgeTreeThreadSelectBlockInputProcessor.cpp | 3 +- ...MergeTreeThreadSelectBlockInputProcessor.h | 2 + src/Storages/SelectQueryInfo.h | 26 +------ src/Storages/StorageBuffer.cpp | 7 +- 21 files changed, 108 insertions(+), 134 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index fe52b30da7b..96f898e3fe6 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1514,7 +1514,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (auto actions = query_analyzer.appendPrewhere(chain, !first_stage, additional_required_columns_after_prewhere)) { - prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName()); + prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName()); if (allowEarlyConstantFolding(*prewhere_info->prewhere_actions, settings)) { @@ -1734,7 +1734,6 @@ void ExpressionAnalysisResult::checkActions() const check_actions(prewhere_info->prewhere_actions); check_actions(prewhere_info->alias_actions); - check_actions(prewhere_info->remove_columns_actions); } } diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 70ff5643b7c..272a5166102 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -239,7 +239,7 @@ struct ExpressionAnalysisResult /// Columns will be removed after prewhere actions execution. NameSet columns_to_remove_after_prewhere; - PrewhereDAGInfoPtr prewhere_info; + PrewhereInfoPtr prewhere_info; FilterDAGInfoPtr filter_info; ConstantFilterDescription prewhere_constant_filter_description; ConstantFilterDescription where_constant_filter_description; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 7cca527cbc1..4d741bfc484 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -958,11 +958,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu if (expressions.prewhere_info) { - if (expressions.prewhere_info->row_level_filter_actions) + if (expressions.prewhere_info->row_level_filter) { auto row_level_filter_step = std::make_unique( query_plan.getCurrentDataStream(), - expressions.prewhere_info->row_level_filter_actions, + expressions.prewhere_info->row_level_filter, expressions.prewhere_info->row_level_column_name, false); @@ -978,18 +978,6 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu prewhere_step->setStepDescription("PREWHERE"); query_plan.addStep(std::move(prewhere_step)); - - // To remove additional columns in dry run - // For example, sample column which can be removed in this stage - // TODO There seems to be no place initializing remove_columns_actions - if (expressions.prewhere_info->remove_columns_actions) - { - auto remove_columns = std::make_unique( - query_plan.getCurrentDataStream(), expressions.prewhere_info->remove_columns_actions); - - remove_columns->setStepDescription("Remove unnecessary columns after PREWHERE"); - query_plan.addStep(std::move(remove_columns)); - } } } else @@ -1479,33 +1467,29 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan( if (prewhere_info.alias_actions) { - pipe.addSimpleTransform( - [&](const Block & header) { return std::make_shared(header, prewhere_info.alias_actions); }); + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, + std::make_shared(prewhere_info.alias_actions)); + }); } if (prewhere_info.row_level_filter) { pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, prewhere_info.row_level_filter, prewhere_info.row_level_column_name, true); + return std::make_shared(header, + std::make_shared(prewhere_info.row_level_filter), + prewhere_info.row_level_column_name, true); }); } pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, prewhere_info.prewhere_actions, prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); + header, std::make_shared(prewhere_info.prewhere_actions), + prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); - - // To remove additional columns - // In some cases, we did not read any marks so that the pipeline.streams is empty - // Thus, some columns in prewhere are not removed as expected - // This leads to mismatched header in distributed table - if (prewhere_info.remove_columns_actions) - { - pipe.addSimpleTransform( - [&](const Block & header) { return std::make_shared(header, prewhere_info.remove_columns_actions); }); - } } auto read_from_pipe = std::make_unique(std::move(pipe)); @@ -1560,7 +1544,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (does_storage_support_prewhere && settings.optimize_move_to_prewhere) { /// Execute row level filter in prewhere as a part of "move to prewhere" optimization. - expressions.prewhere_info = std::make_shared( + expressions.prewhere_info = std::make_shared( std::move(expressions.filter_info->actions), std::move(expressions.filter_info->column_name)); expressions.prewhere_info->prewhere_actions->projectInput(false); @@ -1572,9 +1556,9 @@ void InterpreterSelectQuery::addPrewhereAliasActions() else { /// Add row level security actions to prewhere. - expressions.prewhere_info->row_level_filter_actions = std::move(expressions.filter_info->actions); + expressions.prewhere_info->row_level_filter = std::move(expressions.filter_info->actions); expressions.prewhere_info->row_level_column_name = std::move(expressions.filter_info->column_name); - expressions.prewhere_info->row_level_filter_actions->projectInput(false); + expressions.prewhere_info->row_level_filter->projectInput(false); expressions.filter_info = nullptr; } } @@ -1613,9 +1597,9 @@ void InterpreterSelectQuery::addPrewhereAliasActions() auto prewhere_required_columns = prewhere_info->prewhere_actions->getRequiredColumns().getNames(); required_columns_from_prewhere.insert(prewhere_required_columns.begin(), prewhere_required_columns.end()); - if (prewhere_info->row_level_filter_actions) + if (prewhere_info->row_level_filter) { - auto row_level_required_columns = prewhere_info->row_level_filter_actions->getRequiredColumns().getNames(); + auto row_level_required_columns = prewhere_info->row_level_filter->getRequiredColumns().getNames(); required_columns_from_prewhere.insert(row_level_required_columns.begin(), row_level_required_columns.end()); } } @@ -1898,28 +1882,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc auto & prewhere_info = analysis_result.prewhere_info; if (prewhere_info) - { - auto actions_settings = ExpressionActionsSettings::fromContext(context, CompileExpressions::yes); - - query_info.prewhere_info = std::make_shared(); - query_info.prewhere_info->prewhere_actions - = std::make_shared(prewhere_info->prewhere_actions, actions_settings); - - if (prewhere_info->row_level_filter_actions) - query_info.prewhere_info->row_level_filter - = std::make_shared(prewhere_info->row_level_filter_actions, actions_settings); - if (prewhere_info->alias_actions) - query_info.prewhere_info->alias_actions - = std::make_shared(prewhere_info->alias_actions, actions_settings); - if (prewhere_info->remove_columns_actions) - query_info.prewhere_info->remove_columns_actions - = std::make_shared(prewhere_info->remove_columns_actions, actions_settings); - - query_info.prewhere_info->prewhere_column_name = prewhere_info->prewhere_column_name; - query_info.prewhere_info->remove_prewhere_column = prewhere_info->remove_prewhere_column; - query_info.prewhere_info->row_level_column_name = prewhere_info->row_level_column_name; - query_info.prewhere_info->need_filter = prewhere_info->need_filter; - } + query_info.prewhere_info = prewhere_info; /// Create optimizer with prepared actions. /// Maybe we will need to calc input_order_info later, e.g. while reading from StorageMerge. diff --git a/src/Interpreters/getHeaderForProcessingStage.cpp b/src/Interpreters/getHeaderForProcessingStage.cpp index 335575a6362..19837cc05d9 100644 --- a/src/Interpreters/getHeaderForProcessingStage.cpp +++ b/src/Interpreters/getHeaderForProcessingStage.cpp @@ -98,12 +98,12 @@ Block getHeaderForProcessingStage( if (prewhere_info.row_level_filter) { - prewhere_info.row_level_filter->execute(header); + header = prewhere_info.row_level_filter->updateHeader(std::move(header)); header.erase(prewhere_info.row_level_column_name); } if (prewhere_info.prewhere_actions) - prewhere_info.prewhere_actions->execute(header); + header = prewhere_info.prewhere_actions->updateHeader(std::move(header)); if (prewhere_info.remove_prewhere_column) header.erase(prewhere_info.prewhere_column_name); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index fd5de98b4c0..2dc8246cde7 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -94,6 +94,7 @@ ReadFromMergeTree::ReadFromMergeTree( , data(data_) , query_info(query_info_) , prewhere_info(getPrewhereInfo(query_info)) + , actions_settings(ExpressionActionsSettings::fromContext(context_)) , metadata_snapshot(std::move(metadata_snapshot_)) , metadata_snapshot_base(std::move(metadata_snapshot_base_)) , context(std::move(context_)) @@ -157,7 +158,7 @@ Pipe ReadFromMergeTree::readFromPool( i, pool, min_marks_for_concurrent_read, max_block_size, settings.preferred_block_size_bytes, settings.preferred_max_column_in_block_size_bytes, data, metadata_snapshot, use_uncompressed_cache, - prewhere_info, reader_settings, virt_column_names); + prewhere_info, actions_settings, reader_settings, virt_column_names); if (i == 0) { @@ -180,7 +181,7 @@ ProcessorPtr ReadFromMergeTree::createSource( return std::make_shared( data, metadata_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, - prewhere_info, true, reader_settings, virt_column_names, part.part_index_in_query); + prewhere_info, actions_settings, true, reader_settings, virt_column_names, part.part_index_in_query); } Pipe ReadFromMergeTree::readInOrder( diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 6e1efffdb02..a5184d28593 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -90,6 +90,7 @@ private: const MergeTreeData & data; SelectQueryInfo query_info; PrewhereInfoPtr prewhere_info; + ExpressionActionsSettings actions_settings; StorageMetadataPtr metadata_snapshot; StorageMetadataPtr metadata_snapshot_base; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 83c91dffd7f..c73eb62d039 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -198,7 +198,7 @@ NameDependencies IStorage::getDependentViewsByColumn(ContextPtr context) const return name_deps; } -std::string PrewhereDAGInfo::dump() const +std::string PrewhereInfo::dump() const { WriteBufferFromOwnString ss; ss << "PrewhereDagInfo\n"; @@ -213,11 +213,6 @@ std::string PrewhereDAGInfo::dump() const ss << "prewhere_actions " << prewhere_actions->dumpDAG() << "\n"; } - if (remove_columns_actions) - { - ss << "remove_columns_actions " << remove_columns_actions->dumpDAG() << "\n"; - } - ss << "remove_prewhere_column " << remove_prewhere_column << ", need_filter " << need_filter << "\n"; diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index d9cb949042c..68f754b08fb 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -26,6 +26,7 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor( const MergeTreeData & storage_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, UInt64 max_block_size_rows_, UInt64 preferred_block_size_bytes_, UInt64 preferred_max_column_in_block_size_bytes_, @@ -49,6 +50,23 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor( for (auto it = virt_column_names.rbegin(); it != virt_column_names.rend(); ++it) if (header_without_virtual_columns.has(*it)) header_without_virtual_columns.erase(*it); + + if (prewhere_info) + { + prewhere_actions = std::make_unique(); + if (prewhere_info->alias_actions) + prewhere_actions->alias_actions = std::make_shared(prewhere_info->alias_actions, actions_settings); + + if (prewhere_info->row_level_filter) + prewhere_actions->row_level_filter = std::make_shared(prewhere_info->row_level_filter, actions_settings); + + prewhere_actions->prewhere_actions = std::make_shared(prewhere_info->prewhere_actions, actions_settings); + + prewhere_actions->row_level_column_name = prewhere_info->row_level_column_name; + prewhere_actions->prewhere_column_name = prewhere_info->prewhere_column_name; + prewhere_actions->remove_prewhere_column = prewhere_info->remove_prewhere_column; + prewhere_actions->need_filter = prewhere_info->need_filter; + } } @@ -78,14 +96,14 @@ void MergeTreeBaseSelectProcessor::initializeRangeReaders(MergeTreeReadTask & cu { if (reader->getColumns().empty()) { - current_task.range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_info, true); + current_task.range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_actions.get(), true); } else { MergeTreeRangeReader * pre_reader_ptr = nullptr; if (pre_reader != nullptr) { - current_task.pre_range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_info, false); + current_task.pre_range_reader = MergeTreeRangeReader(pre_reader.get(), nullptr, prewhere_actions.get(), false); pre_reader_ptr = ¤t_task.pre_range_reader; } @@ -396,16 +414,17 @@ void MergeTreeBaseSelectProcessor::injectVirtualColumns( chunk.setColumns(columns, num_rows); } -void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const PrewhereInfoPtr & prewhere_info) +Block MergeTreeBaseSelectProcessor::transformHeader( + Block block, const PrewhereInfoPtr & prewhere_info, const DataTypePtr & partition_value_type, const Names & virtual_columns) { if (prewhere_info) { if (prewhere_info->alias_actions) - prewhere_info->alias_actions->execute(block); + block = prewhere_info->alias_actions->updateHeader(std::move(block)); if (prewhere_info->row_level_filter) { - prewhere_info->row_level_filter->execute(block); + block = prewhere_info->row_level_filter->updateHeader(std::move(block)); auto & row_level_column = block.getByName(prewhere_info->row_level_column_name); if (!row_level_column.type->canBeUsedInBooleanContext()) { @@ -417,7 +436,7 @@ void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const P } if (prewhere_info->prewhere_actions) - prewhere_info->prewhere_actions->execute(block); + block = prewhere_info->prewhere_actions->updateHeader(std::move(block)); auto & prewhere_column = block.getByName(prewhere_info->prewhere_column_name); if (!prewhere_column.type->canBeUsedInBooleanContext()) @@ -434,12 +453,7 @@ void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const P ctn.column = ctn.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); } } -} -Block MergeTreeBaseSelectProcessor::transformHeader( - Block block, const PrewhereInfoPtr & prewhere_info, const DataTypePtr & partition_value_type, const Names & virtual_columns) -{ - executePrewhereActions(block, prewhere_info); injectVirtualColumns(block, nullptr, partition_value_type, virtual_columns); return block; } diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index 4615dec089f..2ae39dbb058 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -13,7 +13,7 @@ namespace DB class IMergeTreeReader; class UncompressedCache; class MarkCache; - +struct PrewhereActions; /// Base class for MergeTreeThreadSelectProcessor and MergeTreeSelectProcessor class MergeTreeBaseSelectProcessor : public SourceWithProgress @@ -24,6 +24,7 @@ public: const MergeTreeData & storage_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, UInt64 max_block_size_rows_, UInt64 preferred_block_size_bytes_, UInt64 preferred_max_column_in_block_size_bytes_, @@ -36,8 +37,6 @@ public: static Block transformHeader( Block block, const PrewhereInfoPtr & prewhere_info, const DataTypePtr & partition_value_type, const Names & virtual_columns); - static void executePrewhereActions(Block & block, const PrewhereInfoPtr & prewhere_info); - protected: Chunk generate() final; @@ -61,6 +60,7 @@ protected: StorageMetadataPtr metadata_snapshot; PrewhereInfoPtr prewhere_info; + std::unique_ptr prewhere_actions; UInt64 max_block_size_rows; UInt64 preferred_block_size_bytes; diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 15b4fbd31c0..b8698ae3e01 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -272,16 +272,16 @@ MergeTreeReadTaskColumns getReadTaskColumns( if (prewhere_info) { if (prewhere_info->alias_actions) - pre_column_names = prewhere_info->alias_actions->getRequiredColumns(); + pre_column_names = prewhere_info->alias_actions->getRequiredColumnsNames(); else { - pre_column_names = prewhere_info->prewhere_actions->getRequiredColumns(); + pre_column_names = prewhere_info->prewhere_actions->getRequiredColumnsNames(); if (prewhere_info->row_level_filter) { NameSet names(pre_column_names.begin(), pre_column_names.end()); - for (auto & name : prewhere_info->row_level_filter->getRequiredColumns()) + for (auto & name : prewhere_info->row_level_filter->getRequiredColumnsNames()) { if (names.count(name) == 0) pre_column_names.push_back(name); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index abc37f52ff9..f6d542d5f2c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3940,15 +3940,9 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( if (analysis_result.prewhere_info) { - const auto & prewhere_info = analysis_result.prewhere_info; - candidate.prewhere_info = std::make_shared(); - candidate.prewhere_info->prewhere_column_name = prewhere_info->prewhere_column_name; - candidate.prewhere_info->remove_prewhere_column = prewhere_info->remove_prewhere_column; - // std::cerr << fmt::format("remove prewhere column : {}", candidate.prewhere_info->remove_prewhere_column) << std::endl; - candidate.prewhere_info->row_level_column_name = prewhere_info->row_level_column_name; - candidate.prewhere_info->need_filter = prewhere_info->need_filter; + candidate.prewhere_info = analysis_result.prewhere_info; - auto prewhere_actions = prewhere_info->prewhere_actions->clone(); + auto prewhere_actions = candidate.prewhere_info->prewhere_actions->clone(); auto prewhere_required_columns = required_columns; // required_columns should not contain columns generated by prewhere for (const auto & column : prewhere_actions->getResultColumns()) @@ -3956,28 +3950,27 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( // std::cerr << fmt::format("prewhere_actions = \n{}", prewhere_actions->dumpDAG()) << std::endl; // Prewhere_action should not add missing keys. prewhere_required_columns = prewhere_actions->foldActionsByProjection( - prewhere_required_columns, projection.sample_block_for_keys, prewhere_info->prewhere_column_name, false); + prewhere_required_columns, projection.sample_block_for_keys, candidate.prewhere_info->prewhere_column_name, false); // std::cerr << fmt::format("prewhere_actions = \n{}", prewhere_actions->dumpDAG()) << std::endl; // std::cerr << fmt::format("prewhere_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; if (prewhere_required_columns.empty()) return false; - candidate.prewhere_info->prewhere_actions = std::make_shared(prewhere_actions, actions_settings); + candidate.prewhere_info->prewhere_actions = prewhere_actions; - if (prewhere_info->row_level_filter_actions) + if (candidate.prewhere_info->row_level_filter) { - auto row_level_filter_actions = prewhere_info->row_level_filter_actions->clone(); + auto row_level_filter_actions = candidate.prewhere_info->row_level_filter->clone(); prewhere_required_columns = row_level_filter_actions->foldActionsByProjection( - prewhere_required_columns, projection.sample_block_for_keys, prewhere_info->row_level_column_name, false); + prewhere_required_columns, projection.sample_block_for_keys, candidate.prewhere_info->row_level_column_name, false); // std::cerr << fmt::format("row_level_filter_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; if (prewhere_required_columns.empty()) return false; - candidate.prewhere_info->row_level_filter - = std::make_shared(row_level_filter_actions, actions_settings); + candidate.prewhere_info->row_level_filter = row_level_filter_actions; } - if (prewhere_info->alias_actions) + if (candidate.prewhere_info->alias_actions) { - auto alias_actions = prewhere_info->alias_actions->clone(); + auto alias_actions = candidate.prewhere_info->alias_actions->clone(); // std::cerr << fmt::format("alias_actions = \n{}", alias_actions->dumpDAG()) << std::endl; prewhere_required_columns = alias_actions->foldActionsByProjection(prewhere_required_columns, projection.sample_block_for_keys, {}, false); @@ -3985,7 +3978,7 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( // std::cerr << fmt::format("alias_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; if (prewhere_required_columns.empty()) return false; - candidate.prewhere_info->alias_actions = std::make_shared(alias_actions, actions_settings); + candidate.prewhere_info->alias_actions = alias_actions; } required_columns.insert(prewhere_required_columns.begin(), prewhere_required_columns.end()); } diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 27682b81c94..8072aa6a3dc 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -520,7 +520,7 @@ size_t MergeTreeRangeReader::ReadResult::countBytesInResultFilter(const IColumn: MergeTreeRangeReader::MergeTreeRangeReader( IMergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, - const PrewhereInfoPtr & prewhere_info_, + const PrewhereActions * prewhere_info_, bool last_reader_in_chain_) : merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 18075e52bdd..7c36ca49c99 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -15,6 +15,24 @@ class MergeTreeIndexGranularity; struct PrewhereInfo; using PrewhereInfoPtr = std::shared_ptr; +class ExpressionActions; +using ExpressionActionsPtr = std::shared_ptr; + +struct PrewhereActions +{ + /// Actions which are executed in order to alias columns are used for prewhere actions. + ExpressionActionsPtr alias_actions; + /// Actions for row level security filter. Applied separately before prewhere_actions. + /// This actions are separate because prewhere condition should not be executed over filtered rows. + ExpressionActionsPtr row_level_filter; + /// Actions which are executed on block in order to get filter column for prewhere step. + ExpressionActionsPtr prewhere_actions; + String row_level_column_name; + String prewhere_column_name; + bool remove_prewhere_column = false; + bool need_filter = false; +}; + /// MergeTreeReader iterator which allows sequential reading for arbitrary number of rows between pairs of marks in the same part. /// Stores reading state, which can be inside granule. Can skip rows in current granule and start reading from next mark. /// Used generally for reading number of rows less than index granularity to decrease cache misses for fat blocks. @@ -24,7 +42,7 @@ public: MergeTreeRangeReader( IMergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, - const PrewhereInfoPtr & prewhere_info_, + const PrewhereActions * prewhere_info_, bool last_reader_in_chain_); MergeTreeRangeReader() = default; @@ -217,7 +235,7 @@ private: IMergeTreeReader * merge_tree_reader = nullptr; const MergeTreeIndexGranularity * index_granularity = nullptr; MergeTreeRangeReader * prev_reader = nullptr; /// If not nullptr, read from prev_reader firstly. - PrewhereInfoPtr prewhere_info; + const PrewhereActions * prewhere_info; Stream stream; diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp index e9527efaa4a..81833b76735 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp @@ -23,6 +23,7 @@ MergeTreeReverseSelectProcessor::MergeTreeReverseSelectProcessor( MarkRanges mark_ranges_, bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, bool check_columns, const MergeTreeReaderSettings & reader_settings_, const Names & virt_column_names_, @@ -31,7 +32,7 @@ MergeTreeReverseSelectProcessor::MergeTreeReverseSelectProcessor( : MergeTreeBaseSelectProcessor{ metadata_snapshot_->getSampleBlockForColumns(required_columns_, storage_.getVirtuals(), storage_.getStorageID()), - storage_, metadata_snapshot_, prewhere_info_, max_block_size_rows_, + storage_, metadata_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, reader_settings_, use_uncompressed_cache_, virt_column_names_}, required_columns{std::move(required_columns_)}, diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h index c9fd06c5534..b807c2d912c 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h @@ -27,6 +27,7 @@ public: MarkRanges mark_ranges, bool use_uncompressed_cache, const PrewhereInfoPtr & prewhere_info, + ExpressionActionsSettings actions_settings, bool check_columns, const MergeTreeReaderSettings & reader_settings, const Names & virt_column_names = {}, diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 980afa170e9..ce342a69fe0 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -23,6 +23,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( MarkRanges mark_ranges_, bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, bool check_columns_, const MergeTreeReaderSettings & reader_settings_, const Names & virt_column_names_, @@ -31,7 +32,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( : MergeTreeBaseSelectProcessor{ metadata_snapshot_->getSampleBlockForColumns(required_columns_, storage_.getVirtuals(), storage_.getStorageID()), - storage_, metadata_snapshot_, prewhere_info_, max_block_size_rows_, + storage_, metadata_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, reader_settings_, use_uncompressed_cache_, virt_column_names_}, required_columns{std::move(required_columns_)}, diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 925c437f1ce..b63107b6dbf 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -27,6 +27,7 @@ public: MarkRanges mark_ranges, bool use_uncompressed_cache, const PrewhereInfoPtr & prewhere_info, + ExpressionActionsSettings actions_settings, bool check_columns, const MergeTreeReaderSettings & reader_settings, const Names & virt_column_names = {}, diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp index 2f0aad77d96..daefb17038a 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp @@ -19,11 +19,12 @@ MergeTreeThreadSelectBlockInputProcessor::MergeTreeThreadSelectBlockInputProcess const StorageMetadataPtr & metadata_snapshot_, const bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings_, const Names & virt_column_names_) : MergeTreeBaseSelectProcessor{ - pool_->getHeader(), storage_, metadata_snapshot_, prewhere_info_, max_block_size_rows_, + pool_->getHeader(), storage_, metadata_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, reader_settings_, use_uncompressed_cache_, virt_column_names_}, thread{thread_}, diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h index 2b2ed36fc18..30c551eede0 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h +++ b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h @@ -25,7 +25,9 @@ public: const StorageMetadataPtr & metadata_snapshot_, const bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, + ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings_, + const Names & virt_column_names_); String getName() const override { return "MergeTreeThread"; } diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 73cf3893a89..a7d2ae3e7dd 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -21,9 +21,6 @@ using ActionsDAGPtr = std::shared_ptr; struct PrewhereInfo; using PrewhereInfoPtr = std::shared_ptr; -struct PrewhereDAGInfo; -using PrewhereDAGInfoPtr = std::shared_ptr; - struct FilterInfo; using FilterInfoPtr = std::shared_ptr; @@ -45,34 +42,19 @@ using ClusterPtr = std::shared_ptr; struct PrewhereInfo { /// Actions which are executed in order to alias columns are used for prewhere actions. - ExpressionActionsPtr alias_actions; + ActionsDAGPtr alias_actions; /// Actions for row level security filter. Applied separately before prewhere_actions. /// This actions are separate because prewhere condition should not be executed over filtered rows. - ExpressionActionsPtr row_level_filter; + ActionsDAGPtr row_level_filter; /// Actions which are executed on block in order to get filter column for prewhere step. - ExpressionActionsPtr prewhere_actions; - /// Actions which are executed after reading from storage in order to remove unused columns. - ExpressionActionsPtr remove_columns_actions; - String row_level_column_name; - String prewhere_column_name; - bool remove_prewhere_column = false; - bool need_filter = false; -}; - -/// Same as PrewhereInfo, but with ActionsDAG. -struct PrewhereDAGInfo -{ - ActionsDAGPtr alias_actions; - ActionsDAGPtr row_level_filter_actions; ActionsDAGPtr prewhere_actions; - ActionsDAGPtr remove_columns_actions; String row_level_column_name; String prewhere_column_name; bool remove_prewhere_column = false; bool need_filter = false; - PrewhereDAGInfo() = default; - explicit PrewhereDAGInfo(ActionsDAGPtr prewhere_actions_, String prewhere_column_name_) + PrewhereInfo() = default; + explicit PrewhereInfo(ActionsDAGPtr prewhere_actions_, String prewhere_column_name_) : prewhere_actions(std::move(prewhere_actions_)), prewhere_column_name(std::move(prewhere_column_name_)) {} std::string dump() const; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 55dc2d12c9d..a433cd248c7 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -369,13 +369,14 @@ void StorageBuffer::read( { if (query_info.prewhere_info) { + auto actions_settings = ExpressionActionsSettings::fromContext(local_context); if (query_info.prewhere_info->alias_actions) { pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( header, - query_info.prewhere_info->alias_actions); + std::make_shared(query_info.prewhere_info->alias_actions, actions_settings)); }); } @@ -385,7 +386,7 @@ void StorageBuffer::read( { return std::make_shared( header, - query_info.prewhere_info->row_level_filter, + std::make_shared(query_info.prewhere_info->row_level_filter, actions_settings), query_info.prewhere_info->row_level_column_name, false); }); @@ -395,7 +396,7 @@ void StorageBuffer::read( { return std::make_shared( header, - query_info.prewhere_info->prewhere_actions, + std::make_shared(query_info.prewhere_info->prewhere_actions, actions_settings), query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); From 61550f91f2c15744e1eb96a284b104c75433117d Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 25 Jun 2021 15:01:57 +0000 Subject: [PATCH 394/931] fix and update test --- src/Common/StringUtils/StringUtils.h | 1 - src/Interpreters/DatabaseCatalog.cpp | 32 ++++++++--------- src/Interpreters/DatabaseCatalog.h | 1 - .../evaluateConstantExpression.cpp | 6 ++-- src/Storages/StorageMerge.cpp | 7 ++-- ...902_table_function_merge_db_repr.reference | 31 ++++++++++++++--- .../01902_table_function_merge_db_repr.sql | 34 ++++++++++++------- 7 files changed, 70 insertions(+), 42 deletions(-) diff --git a/src/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h index b37e447082b..20c0a5ca380 100644 --- a/src/Common/StringUtils/StringUtils.h +++ b/src/Common/StringUtils/StringUtils.h @@ -283,4 +283,3 @@ inline void trim(std::string_view & str, char c = ' ') trimLeft(str, c); trimRight(str, c); } -} diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 82f800f2c9b..4ed4f258b29 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1,25 +1,23 @@ -#include -#include -#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include +#include #if !defined(ARCADIA_BUILD) # include "config_core.h" diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 2a8aa742f0c..74bfb814ce4 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 13d9ef47894..93cc1b4516c 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -102,12 +102,12 @@ ASTPtr evaluateConstantExpressionForDatabaseName(const ASTPtr & node, ContextPtr std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context) { - if (const auto * func = node->as(); func->name == "REGEXP") + if (const auto * func = node->as(); func && func->name == "REGEXP") { - if (func->children.size() != 1) + if (func->arguments->children.size() != 1) throw Exception("Arguments for REGEXP in Merge ENGINE should be 1", ErrorCodes::BAD_ARGUMENTS); - auto * literal = func->children[0]->as(); + auto * literal = func->arguments->children[0]->as(); if (!literal || literal->value.safeGet().empty()) throw Exception("Argument for REGEXP in Merge ENGINE should be a non empty String Literal", ErrorCodes::BAD_ARGUMENTS); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 93cf224feb8..7af0f5a71b3 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -670,14 +670,13 @@ void registerStorageMerge(StorageFactory & factory) " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - engine_args[0] = evaluateConstantExpressionForDatabaseName(engine_args[0], args.getLocalContext()); - engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.getLocalContext()); + auto [is_regexp, source_database_name_or_regexp] = evaluateDatabaseNameForMergeEngine(engine_args[0], args.getLocalContext()); - String source_database_regexp = engine_args[0]->as().value.safeGet(); + engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.getLocalContext()); String table_name_regexp = engine_args[1]->as().value.safeGet(); return StorageMerge::create( - args.table_id, args.columns, args.comment, source_database_regexp, table_name_regexp, args.getContext()); + args.table_id, args.columns, args.comment, source_database_name_or_regexp, is_regexp, table_name_regexp, args.getContext()); }); } diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference index f87f140a985..18cd6b85306 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference @@ -1,4 +1,4 @@ -CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(^01902_db.*, ^t.*) +CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(REGEXP(^01902_db), ^t) SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n 01902_db t 0 01902_db t 1 @@ -40,7 +40,7 @@ SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n 01902_db3 t3 7 01902_db3 t3 8 01902_db3 t3 9 -SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n +SELECT _database, _table, n FROM merge(REGEXP(^01902_db), ^t) ORDER BY _database, _table, n 01902_db t 0 01902_db t 1 01902_db t 2 @@ -143,7 +143,30 @@ SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = t1 ORDER BY _da 01902_db1 t1 7 01902_db1 t1 8 01902_db1 t1 9 -CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*) +CREATE TABLE t_merge1 as 01902_db.t ENGINE=Merge(01902_db, ^t$) +SELECT _database, _table, n FROM 01902_db.t_merge1 ORDER BY _database, _table, n +01902_db t 0 +01902_db t 1 +01902_db t 2 +01902_db t 3 +01902_db t 4 +01902_db t 5 +01902_db t 6 +01902_db t 7 +01902_db t 8 +01902_db t 9 +SELECT _database, _table, n FROM merge(01902_db, ^t$) ORDER BY _database, _table, n +01902_db t 0 +01902_db t 1 +01902_db t 2 +01902_db t 3 +01902_db t 4 +01902_db t 5 +01902_db t 6 +01902_db t 7 +01902_db t 8 +01902_db t 9 +CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t) SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n 01902_db1 t1 0 01902_db1 t1 1 @@ -155,7 +178,7 @@ SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, 01902_db1 t1 7 01902_db1 t1 8 01902_db1 t1 9 -SELECT _database, _table, n FROM merge(currentDatabase(), ^t.*) ORDER BY _database, _table, n +SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n 01902_db1 t1 0 01902_db1 t1 1 01902_db1 t1 2 diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index 230ea2b8b1a..22b8f6879c8 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -18,14 +18,14 @@ INSERT INTO 01902_db1.t1 SELECT * FROM numbers(10); INSERT INTO 01902_db2.t2 SELECT * FROM numbers(10); INSERT INTO 01902_db3.t3 SELECT * FROM numbers(10); -SELECT 'CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(^01902_db.*, ^t.*)'; -CREATE TABLE 01902_db.t_merge as 01902_db.t ENGINE=Merge('^01902_db.*', '^t.*'); +SELECT 'CREATE TABLE t_merge as 01902_db.t ENGINE=Merge(REGEXP(^01902_db), ^t)'; +CREATE TABLE 01902_db.t_merge as 01902_db.t ENGINE=Merge(REGEXP('^01902_db'), '^t'); SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n; -SELECT 'SELECT _database, _table, n FROM merge(^db, ^t) ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM merge('^01902_db.*', '^t.*') ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM merge(REGEXP(^01902_db), ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(REGEXP('^01902_db'), '^t') ORDER BY _database, _table, n; SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = 01902_db1 ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = '01902_db1' ORDER BY _database, _table, n; @@ -33,18 +33,28 @@ SELECT _database, _table, n FROM 01902_db.t_merge WHERE _database = '01902_db1' SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = t1 ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge WHERE _table = 't1' ORDER BY _database, _table, n; +-- not regexp +SELECT 'CREATE TABLE t_merge1 as 01902_db.t ENGINE=Merge(01902_db, ^t$)'; +CREATE TABLE 01902_db.t_merge1 as 01902_db.t ENGINE=Merge('01902_db', '^t$'); + +SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db.t_merge1 ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM merge(01902_db, ^t$) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge('01902_db', '^t$') ORDER BY _database, _table, n; + USE 01902_db1; -SELECT 'CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t.*)'; -CREATE TABLE 01902_db.t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), '^t.*'); +SELECT 'CREATE TABLE t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), ^t)'; +CREATE TABLE 01902_db.t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), '^t'); SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n; -SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t.*) ORDER BY _database, _table, n'; -SELECT _database, _table, n FROM merge(currentDatabase(), '^t.*') ORDER BY _database, _table, n; +SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; -DROP DATABASE 01902_db; -DROP DATABASE 01902_db1; -DROP DATABASE 01902_db2; -DROP DATABASE 01902_db3; +-- DROP DATABASE 01902_db; +-- DROP DATABASE 01902_db1; +-- DROP DATABASE 01902_db2; +-- DROP DATABASE 01902_db3; From a947565de7be2acb5384e90165bb622978b84151 Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 25 Jun 2021 15:05:45 +0000 Subject: [PATCH 395/931] fix --- tests/queries/skip_list.json | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index ae86684de25..52c2d468498 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -843,7 +843,6 @@ "01870_buffer_flush", // creates database "01889_postgresql_protocol_null_fields", "01889_check_row_policy_defined_using_user_function", - "01902_table_function_merge_db_repr ", "01921_concurrent_ttl_and_normal_merges_zookeeper_long" // heavy test, better to run sequentially ] } From 14e43cfafaa0a41e04da897ef3cccc0749ed8c46 Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Fri, 25 Jun 2021 18:21:56 +0300 Subject: [PATCH 396/931] Add CI check for darwin-aarch64 (#25560) * Add support for darwin-aarch64 * Fix PVS warnings * Add build configuration * Fix packager args --- base/common/DateLUT.h | 2 +- cmake/embed_binary.cmake | 13 ++++++++++--- cmake/linux/toolchain-aarch64.cmake | 1 - contrib/cctz-cmake/CMakeLists.txt | 2 +- docker/packager/binary/Dockerfile | 15 ++++++++++++--- docker/packager/binary/build.sh | 4 +++- docker/packager/packager | 15 ++++++++++++--- src/Functions/FunctionsCoding.h | 2 +- src/IO/ReadHelpers.h | 4 ++-- src/Interpreters/ExpressionActions.h | 2 +- src/Parsers/IParser.h | 4 ++-- src/Parsers/IParserBase.h | 2 +- .../MySQL/tests/gtest_alter_command_parser.cpp | 2 +- src/Storages/MergeTree/MergeTreePartInfo.h | 2 +- tests/ci/ci_config.json | 10 ++++++++++ 15 files changed, 58 insertions(+), 22 deletions(-) diff --git a/base/common/DateLUT.h b/base/common/DateLUT.h index 378b4360f3b..31fc6b1e24b 100644 --- a/base/common/DateLUT.h +++ b/base/common/DateLUT.h @@ -17,7 +17,7 @@ class DateLUT : private boost::noncopyable { public: /// Return singleton DateLUTImpl instance for the default time zone. - static ALWAYS_INLINE const DateLUTImpl & instance() + static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071 { const auto & date_lut = getInstance(); return *date_lut.default_impl.load(std::memory_order_acquire); diff --git a/cmake/embed_binary.cmake b/cmake/embed_binary.cmake index d520de1ab6f..e132c590520 100644 --- a/cmake/embed_binary.cmake +++ b/cmake/embed_binary.cmake @@ -33,10 +33,16 @@ macro(clickhouse_embed_binaries) message(FATAL_ERROR "The list of binary resources to embed may not be empty") endif() - # If cross-compiling, ensure we use the toolchain file and target the - # actual target architecture + # If cross-compiling, ensure we use the toolchain file and target the actual target architecture if (CMAKE_CROSSCOMPILING) - set(CROSS_COMPILE_FLAGS "--target=${CMAKE_C_COMPILER_TARGET} --gcc-toolchain=${TOOLCHAIN_FILE}") + set(CROSS_COMPILE_FLAGS --target=${CMAKE_C_COMPILER_TARGET}) + + # FIXME: find a way to properly pass all cross-compile flags to custom command in CMake + if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") + list(APPEND CROSS_COMPILE_FLAGS -isysroot ${CMAKE_OSX_SYSROOT} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}) + else () + list(APPEND CROSS_COMPILE_FLAGS -isysroot ${CMAKE_SYSROOT}) + endif () else() set(CROSS_COMPILE_FLAGS "") endif() @@ -67,6 +73,7 @@ macro(clickhouse_embed_binaries) ${CMAKE_C_COMPILER} "${CROSS_COMPILE_FLAGS}" -c -o "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" + COMMAND_EXPAND_LISTS ) set_source_files_properties("${RESOURCE_OBJ}" PROPERTIES EXTERNAL_OBJECT true GENERATED true) endforeach() diff --git a/cmake/linux/toolchain-aarch64.cmake b/cmake/linux/toolchain-aarch64.cmake index 7bda3484101..e3924fdc537 100644 --- a/cmake/linux/toolchain-aarch64.cmake +++ b/cmake/linux/toolchain-aarch64.cmake @@ -4,7 +4,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc") -get_filename_component (TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" REALPATH) # We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9. set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE) diff --git a/contrib/cctz-cmake/CMakeLists.txt b/contrib/cctz-cmake/CMakeLists.txt index 96e2af5fb03..d6697fd5d78 100644 --- a/contrib/cctz-cmake/CMakeLists.txt +++ b/contrib/cctz-cmake/CMakeLists.txt @@ -26,7 +26,7 @@ if (NOT USE_INTERNAL_CCTZ_LIBRARY) set_property (TARGET cctz PROPERTY IMPORTED_LOCATION ${LIBRARY_CCTZ}) set_property (TARGET cctz PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INCLUDE_CCTZ}) endif() - + set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp") file(REMOVE ${SYSTEM_STORAGE_TZ_FILE}) file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n") diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 56b2af5cf84..29225bbfeb8 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -72,7 +72,7 @@ RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \ && cd .. \ && rm -rf apple-libtapi -# Build and install tools for cross-linking to Darwin +# Build and install tools for cross-linking to Darwin (x86-64) RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd cctools-port/cctools \ && ./configure --prefix=/cctools --with-libtapi=/cctools \ @@ -81,8 +81,17 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd ../.. \ && rm -rf cctools-port -# Download toolchain for Darwin -RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz +# Build and install tools for cross-linking to Darwin (aarch64) +RUN git clone https://github.com/tpoechtrager/cctools-port.git \ + && cd cctools-port/cctools \ + && ./configure --prefix=/cctools --with-libtapi=/cctools \ + --target=aarch64-apple-darwin \ + && make install \ + && cd ../.. \ + && rm -rf cctools-port + +# Download toolchain and SDK for Darwin +RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz # Download toolchain for ARM # It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling. diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index cf74105fbbb..d6614bbb9e2 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -3,7 +3,9 @@ set -x -e mkdir -p build/cmake/toolchain/darwin-x86_64 -tar xJf MacOSX10.15.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 +tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 + +ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64 mkdir -p build/cmake/toolchain/linux-aarch64 tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 diff --git a/docker/packager/packager b/docker/packager/packager index 81474166cc9..c05c85d3e28 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -58,6 +58,7 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries): CLANG_PREFIX = "clang" DARWIN_SUFFIX = "-darwin" + DARWIN_ARM_SUFFIX = "-darwin-aarch64" ARM_SUFFIX = "-aarch64" FREEBSD_SUFFIX = "-freebsd" @@ -66,9 +67,10 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ is_clang = compiler.startswith(CLANG_PREFIX) is_cross_darwin = compiler.endswith(DARWIN_SUFFIX) + is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX) is_cross_arm = compiler.endswith(ARM_SUFFIX) is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) - is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd + is_cross_compile = is_cross_darwin or is_cross_darwin_arm or is_cross_arm or is_cross_freebsd # Explicitly use LLD with Clang by default. # Don't force linker for cross-compilation. @@ -82,6 +84,13 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib") cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld") cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake") + elif is_cross_darwin_arm: + cc = compiler[:-len(DARWIN_ARM_SUFFIX)] + cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar") + cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool") + cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib") + cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld") + cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake") elif is_cross_arm: cc = compiler[:-len(ARM_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") @@ -185,8 +194,8 @@ if __name__ == "__main__": parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", - "gcc-10"), default="clang-11") + parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64", + "clang-11-freebsd", "gcc-10"), default="clang-11") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index f8cc0a21ebe..da667bfc691 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -992,7 +992,7 @@ public: UInt8 byte = x >> offset; /// Leading zeros. - if (byte == 0 && !was_nonzero && offset) + if (byte == 0 && !was_nonzero && offset) // -V560 continue; was_nonzero = true; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index ffcfeea3827..d4e2db0b553 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -184,7 +184,7 @@ inline bool checkString(const String & s, ReadBuffer & buf) return checkString(s.c_str(), buf); } -inline bool checkChar(char c, ReadBuffer & buf) +inline bool checkChar(char c, ReadBuffer & buf) // -V1071 { if (buf.eof() || *buf.position() != c) return false; @@ -393,7 +393,7 @@ void readIntText(T & x, ReadBuffer & buf) } template -bool tryReadIntText(T & x, ReadBuffer & buf) +bool tryReadIntText(T & x, ReadBuffer & buf) // -V1071 { return readIntTextImpl(x, buf); } diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index c446b339072..7699e82a73b 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -253,7 +253,7 @@ struct ExpressionActionsChain : WithContext steps.clear(); } - ActionsDAGPtr getLastActions(bool allow_empty = false) + ActionsDAGPtr getLastActions(bool allow_empty = false) // -V1071 { if (steps.empty()) { diff --git a/src/Parsers/IParser.h b/src/Parsers/IParser.h index 7dc31e4c1eb..bf9fcb407ee 100644 --- a/src/Parsers/IParser.h +++ b/src/Parsers/IParser.h @@ -88,13 +88,13 @@ public: */ virtual bool parse(Pos & pos, ASTPtr & node, Expected & expected) = 0; - bool ignore(Pos & pos, Expected & expected) + bool ignore(Pos & pos, Expected & expected) // -V1071 { ASTPtr ignore_node; return parse(pos, ignore_node, expected); } - bool ignore(Pos & pos) + bool ignore(Pos & pos) // -V1071 { Expected expected; return ignore(pos, expected); diff --git a/src/Parsers/IParserBase.h b/src/Parsers/IParserBase.h index 100450263a4..cf69e5f2dfa 100644 --- a/src/Parsers/IParserBase.h +++ b/src/Parsers/IParserBase.h @@ -35,7 +35,7 @@ public: return res; } - bool parse(Pos & pos, ASTPtr & node, Expected & expected) override; + bool parse(Pos & pos, ASTPtr & node, Expected & expected) override; // -V1071 protected: virtual bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) = 0; diff --git a/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp b/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp index b2b40ddaba2..ba8f90f2427 100644 --- a/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_alter_command_parser.cpp @@ -9,7 +9,7 @@ using namespace DB; using namespace DB::MySQLParser; -static inline ASTPtr tryParserQuery(IParser & parser, const String & query) +static inline ASTPtr tryParserQuery(IParser & parser, const String & query) // -V1071 { return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, 0); } diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index 66d5342b67f..338fe7549aa 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -85,7 +85,7 @@ struct MergeTreePartInfo return static_cast(max_block - min_block + 1); } - static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version); + static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version); // -V1071 static bool tryParsePartName(const String & part_name, MergeTreePartInfo * part_info, MergeTreeDataFormatVersion format_version); diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 03bc013138d..2efa6ec6fef 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -152,6 +152,16 @@ "splitted": "unsplitted", "tidy": "disable", "with_coverage": false + }, + { + "compiler": "clang-11-darwin-aarch64", + "build-type": "", + "sanitizer": "", + "package-type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": false } ], "tests_config": { From cec2399015274a9f67f7b59dad5fdb7a7aee0581 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 25 Jun 2021 18:30:49 +0300 Subject: [PATCH 397/931] Add working test --- ..._different_expression_name_alias.reference | 2 ++ .../01923_different_expression_name_alias.sql | 36 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 tests/queries/0_stateless/01923_different_expression_name_alias.reference create mode 100644 tests/queries/0_stateless/01923_different_expression_name_alias.sql diff --git a/tests/queries/0_stateless/01923_different_expression_name_alias.reference b/tests/queries/0_stateless/01923_different_expression_name_alias.reference new file mode 100644 index 00000000000..b261da18d51 --- /dev/null +++ b/tests/queries/0_stateless/01923_different_expression_name_alias.reference @@ -0,0 +1,2 @@ +1 +0 diff --git a/tests/queries/0_stateless/01923_different_expression_name_alias.sql b/tests/queries/0_stateless/01923_different_expression_name_alias.sql new file mode 100644 index 00000000000..09108cef483 --- /dev/null +++ b/tests/queries/0_stateless/01923_different_expression_name_alias.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS distributed_tbl; +DROP TABLE IF EXISTS merge_tree_table; + +CREATE TABLE merge_tree_table +( + Date Date, + SomeType UInt8, + Alternative1 UInt64, + Alternative2 UInt64, + User UInt32, + CharID UInt64 ALIAS multiIf(SomeType IN (3, 4, 11), 0, SomeType IN (7, 8), Alternative1, Alternative2) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO merge_tree_table VALUES(toDate('2016-03-01'), 4, 0, 0, 1486392); + +SELECT count() FROM merge_tree_table; + +CREATE TABLE distributed_tbl +( + Date Date, + SomeType UInt8, + Alternative1 UInt64, + Alternative2 UInt64, + CharID UInt64, + User UInt32 +) +ENGINE = Distributed(test_shard_localhost, currentDatabase(), merge_tree_table); + +SELECT identity(CharID) AS x +FROM distributed_tbl +WHERE (Date = toDate('2016-03-01')) AND (User = 1486392) AND (x = 0); + +DROP TABLE IF EXISTS distributed_tbl; +DROP TABLE IF EXISTS merge_tree_table; From f8b1a6d185ddfedfd0d8b54bbc2c1d6eaebe38a9 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 25 Jun 2021 18:33:31 +0300 Subject: [PATCH 398/931] =?UTF-8?q?WriteBuffer=20is=20canceled=20?= =?UTF-8?q?=F0=9F=98=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/Functions/DummyJSONParser.h | 6 ++---- src/Functions/FunctionSQLJSON.h | 27 ++++++++++++--------------- src/Functions/SimdJSONParser.h | 7 +------ 3 files changed, 15 insertions(+), 25 deletions(-) diff --git a/src/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h index 128ee88e0ca..01fdab1abb6 100644 --- a/src/Functions/DummyJSONParser.h +++ b/src/Functions/DummyJSONParser.h @@ -2,8 +2,6 @@ #include #include -#include -#include namespace DB { @@ -42,7 +40,7 @@ struct DummyJSONParser Array getArray() const { return {}; } Object getObject() const { return {}; } - ALWAYS_INLINE Element getUnderlyingElement() const { return {}; } + Element getElement() { return {}; } }; /// References an array in a JSON document. @@ -101,7 +99,7 @@ struct DummyJSONParser #endif }; -inline ALWAYS_INLINE WriteBufferFromString& operator<<(WriteBufferFromString& out, const DB::DummyJSONParser::Element &) +inline ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, DummyJSONParser::Element) { return out; } diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 1e9b25ee508..dc31ee2d3ff 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -8,8 +8,6 @@ #include #include #include -#include -#include #include #include #include @@ -30,10 +28,10 @@ namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; - extern const int BAD_ARGUMENTS; +extern const int ILLEGAL_COLUMN; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; +extern const int BAD_ARGUMENTS; } class FunctionSQLJSONHelpers @@ -281,11 +279,11 @@ public: return false; } - String result; - WriteBufferFromString out(result); - out << current_element.getUnderlyingElement(); + std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + out << current_element.getElement(); + auto output_str = out.str(); ColumnString & col_str = assert_cast(dest); - col_str.insertData(result.data(), result.size()); + col_str.insertData(output_str.data(), output_str.size()); return true; } }; @@ -309,9 +307,7 @@ public: GeneratorJSONPath generator_json_path(query_ptr); Element current_element = root; VisitorStatus status; - String result; - WriteBufferFromString out(result); - + std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM /// Create json array of results: [res1, res2, ...] out << "["; bool success = false; @@ -324,7 +320,7 @@ public: out << ", "; } success = true; - out << current_element.getUnderlyingElement(); + out << current_element.getElement(); } else if (status == VisitorStatus::Error) { @@ -338,7 +334,8 @@ public: return false; } ColumnString & col_str = assert_cast(dest); - col_str.insertData(reinterpret_cast(result.data()), result.size()); + auto output_str = out.str(); + col_str.insertData(output_str.data(), output_str.size()); return true; } }; diff --git a/src/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h index c11fca3272c..c5793088baf 100644 --- a/src/Functions/SimdJSONParser.h +++ b/src/Functions/SimdJSONParser.h @@ -50,7 +50,7 @@ struct SimdJSONParser ALWAYS_INLINE Array getArray() const; ALWAYS_INLINE Object getObject() const; - ALWAYS_INLINE simdjson::dom::element getUnderlyingElement() const { return element; } + ALWAYS_INLINE simdjson::dom::element getElement() const { return element; } private: simdjson::dom::element element; @@ -165,11 +165,6 @@ inline ALWAYS_INLINE SimdJSONParser::Object SimdJSONParser::Element::getObject() return element.get_object().value_unsafe(); } -inline ALWAYS_INLINE WriteBuffer& operator<<(WriteBuffer& out, const DB::SimdJSONParser::Element & element) -{ - return out << element.getUnderlyingElement(); -} - } #endif From 6981eb64ac9e641070c167fff598a4172e75948c Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 25 Jun 2021 19:24:22 +0300 Subject: [PATCH 399/931] Fixes --- src/Functions/FunctionSQLJSON.h | 30 +++++++------------ .../JSONPath/ASTs/ASTJSONPathRange.h | 4 +-- .../JSONPath/Generators/GeneratorJSONPath.h | 9 +++--- 3 files changed, 17 insertions(+), 26 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index dc31ee2d3ff..9e13d447b7d 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -41,7 +41,7 @@ public: class Executor { public: - static ColumnPtr run(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) + static ColumnPtr run(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, uint32_t parse_depth) { MutableColumnPtr to{result_type->createColumn()}; to->reserve(input_rows_count); @@ -76,23 +76,10 @@ public: ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } - /// If argument is successfully cast to (ColumnConst *) then it is quoted string - /// Example: - /// SomeFunction('some string argument') - /// - /// Otherwise it is a column - /// Example: - /// SomeFunction(database.table.column) - const ColumnPtr & arg_jsonpath = first_column.column; const auto * arg_jsonpath_const = typeid_cast(arg_jsonpath.get()); const auto * arg_jsonpath_string = typeid_cast(arg_jsonpath_const->getDataColumnPtr().get()); - if (!arg_jsonpath_string) - { - throw Exception{"Illegal column " + arg_jsonpath->getName(), ErrorCodes::ILLEGAL_COLUMN}; - } - const ColumnPtr & arg_json = second_column.column; const auto * col_json_const = typeid_cast(arg_json.get()); const auto * col_json_string @@ -102,14 +89,14 @@ public: const ColumnString::Chars & chars_path = arg_jsonpath_string->getChars(); const ColumnString::Offsets & offsets_path = arg_jsonpath_string->getOffsets(); - /// Get data and offsets for 1 argument (JSON) + /// Prepare to parse 1 argument (JSONPath) const char * query_begin = reinterpret_cast(&chars_path[0]); const char * query_end = query_begin + offsets_path[0] - 1; /// Tokenize query Tokens tokens(query_begin, query_end); /// Max depth 0 indicates that depth is not limited - IParser::Pos token_iterator(tokens, 0); + IParser::Pos token_iterator(tokens, parse_depth); /// Parse query and create AST tree Expected expected; @@ -121,7 +108,7 @@ public: throw Exception{"Unable to parse JSONPath", ErrorCodes::BAD_ARGUMENTS}; } - /// Get data and offsets for 1 argument (JSON) + /// Get data and offsets for 2 argument (JSON) const ColumnString::Chars & chars_json = col_json_string->getChars(); const ColumnString::Offsets & offsets_json = col_json_string->getOffsets(); @@ -179,12 +166,13 @@ public: /// 1. Lexer(path) -> Tokens /// 2. Create ASTPtr /// 3. Parser(Tokens, ASTPtr) -> complete AST - /// 4. Execute functions, call interpreter for each json (in function) + /// 4. Execute functions: call getNextItem on generator and handle each item + uint32_t parse_depth = getContext()->getSettingsRef().max_parser_depth; #if USE_SIMDJSON if (getContext()->getSettingsRef().allow_simdjson) - return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); + return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count, parse_depth); #endif - return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count); + return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count, parse_depth); } }; @@ -325,6 +313,8 @@ public: else if (status == VisitorStatus::Error) { /// ON ERROR + /// Here it is possible to handle errors with ON ERROR (as described in ISO/IEC TR 19075-6), + /// however this functionality is not implemented yet } current_element = root; } diff --git a/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h index 8a963d7fc6b..746c6211f29 100644 --- a/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h +++ b/src/Functions/JSONPath/ASTs/ASTJSONPathRange.h @@ -14,8 +14,8 @@ public: public: /// Ranges to lookup in json array ($[0, 1, 2, 4 to 9]) - /// Range is represented as - /// Single index is represented as + /// Range is represented as + /// Single index is represented as std::vector> ranges; bool is_star = false; }; diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h index 071a7ac3089..b918ceac003 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generators/GeneratorJSONPath.h @@ -73,7 +73,8 @@ public: { while (true) { - auto root = element; + /// element passed to us actually is root, so here we assign current to root + auto current = element; if (current_visitor < 0) { return VisitorStatus::Exhausted; @@ -81,13 +82,13 @@ public: for (int i = 0; i < current_visitor; ++i) { - visitors[i]->apply(root); + visitors[i]->apply(current); } VisitorStatus status = VisitorStatus::Error; for (size_t i = current_visitor; i < visitors.size(); ++i) { - status = visitors[i]->visit(root); + status = visitors[i]->visit(current); current_visitor = i; if (status == VisitorStatus::Error || status == VisitorStatus::Ignore) { @@ -98,7 +99,7 @@ public: if (status != VisitorStatus::Ignore) { - element = root; + element = current; return status; } } From 64652c3597c0afd55c9b7ec6c8778dd9923a4d8e Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:55:25 +0300 Subject: [PATCH 400/931] Update docs/en/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 9e2291a346d..794d009f644 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Arguments** - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. -- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. From 15bc66111959619a7052b96c0c08fe7141a3a2e8 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:55:48 +0300 Subject: [PATCH 401/931] Update docs/ru/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 0f3c8f68c9c..95f45066e71 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Аргументы** - `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам. -- `source` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные символы в режиме "только чтение": `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `source` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные символы в режиме "только чтение": `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `access_key_id` и `secret_access_key` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. From 0b886acd8d06786d4bc650597469318bb85626fd Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:56:02 +0300 Subject: [PATCH 402/931] Update docs/ru/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 95f45066e71..1d9477f5787 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -3,7 +3,7 @@ toc_priority: 55 toc_title: s3Cluster --- -# Табличная Функция s3Cluster {#s3Cluster-table-function} +# Табличная функция s3Cluster {#s3Cluster-table-function} Позволяет обрабатывать файлы из [Amazon S3](https://aws.amazon.com/s3/) параллельно из многих узлов в указанном кластере. На узле-инициаторе функция создает соединение со всеми узлами в кластере, раскрывает звездочки в пути к файлу S3 и динамически отправляет каждый файл. На рабочем узле функция запрашивает у инициатора следующую задачу для обработки и обрабатывает ее. Это повторяется до тех пор, пока все задачи не будут завершены. From 638b2862b8873c0a742fdc596f2f9c77e7ae6d98 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:56:11 +0300 Subject: [PATCH 403/931] Update docs/ru/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 1d9477f5787..8b4dae91fc7 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -5,7 +5,7 @@ toc_title: s3Cluster # Табличная функция s3Cluster {#s3Cluster-table-function} -Позволяет обрабатывать файлы из [Amazon S3](https://aws.amazon.com/s3/) параллельно из многих узлов в указанном кластере. На узле-инициаторе функция создает соединение со всеми узлами в кластере, раскрывает звездочки в пути к файлу S3 и динамически отправляет каждый файл. На рабочем узле функция запрашивает у инициатора следующую задачу для обработки и обрабатывает ее. Это повторяется до тех пор, пока все задачи не будут завершены. +Позволяет обрабатывать файлы из [Amazon S3](https://aws.amazon.com/s3/) параллельно из многих узлов в указанном кластере. На узле-инициаторе функция создает соединение со всеми узлами в кластере, заменяет символы '*' в пути к файлу S3 и динамически отправляет каждый файл. На рабочем узле функция запрашивает у инициатора следующую задачу и обрабатывает ее. Это повторяется до тех пор, пока все задачи не будут завершены. **Синтаксис** From 400e6855322edcc20f967b8e3f54379569e0b4fd Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:56:18 +0300 Subject: [PATCH 404/931] Update docs/ru/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 8b4dae91fc7..9ad20c4b5e1 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -33,7 +33,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); ``` -Подсчитаем общее количество строк во всех файлах кластера `cluster_simple`: +Подсчет общего количества строк во всех файлах кластера `cluster_simple`: ``` sql SELECT count(*) FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))'); From 25c8e09dbb113e03158c7f0a8751ac47a509fcad Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Fri, 25 Jun 2021 19:56:26 +0300 Subject: [PATCH 405/931] Update docs/ru/sql-reference/table-functions/s3Cluster.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 9ad20c4b5e1..32916a2b122 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -27,7 +27,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Примеры** -Выведем данные из всех файлов кластера `cluster_simple`: +Вывод данных из всех файлов кластера `cluster_simple`: ``` sql SELECT * FROM s3Cluster('cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon); From 7892e44467294d6f02b0bd73c8b8a6b450f17694 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Fri, 25 Jun 2021 21:14:08 +0300 Subject: [PATCH 406/931] =?UTF-8?q?Fix=20style=20yet=20again=20?= =?UTF-8?q?=F0=9F=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/Functions/FunctionSQLJSON.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 9e13d447b7d..a6024a27e95 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -28,7 +28,6 @@ namespace DB { namespace ErrorCodes { -extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; extern const int BAD_ARGUMENTS; From 80628843239ec2e5c71dde5aae986f7f1e715ee6 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Fri, 25 Jun 2021 21:25:25 +0300 Subject: [PATCH 407/931] Translate to Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Выполнил перевод на русский язык. --- .../integrations/ExternalDistributed.md | 28 +++------- .../table-engines/integrations/mysql.md | 2 +- .../table-engines/integrations/postgresql.md | 2 +- .../integrations/ExternalDistributed.md | 54 +++++++++++++++++++ .../table-engines/integrations/mysql.md | 10 +++- .../table-engines/integrations/postgresql.md | 18 ++++--- .../ru/sql-reference/table-functions/mysql.md | 12 +++++ .../table-functions/postgresql.md | 12 +++++ 8 files changed, 106 insertions(+), 32 deletions(-) create mode 100644 docs/ru/engines/table-engines/integrations/ExternalDistributed.md diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 12f12c2a7b0..19e0b9b9ceb 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -15,7 +15,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], ... -) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password'[, `schema`]); +) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password'); ``` See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. @@ -24,44 +24,28 @@ The table structure can differ from the original table structure: - Column names should be the same as in the original table, but you can use just some of these columns and in any order. - Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- The `external_table_functions_use_nulls` setting defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- The `external_table_functions_use_nulls` setting defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** -- `engine` — The table engine: `MySQL` or `PostgreSQL`. +- `engine` — The table engine `MySQL` or `PostgreSQL`. - `host:port` — MySQL or PostgreSQL server address. - `database` — Remote database name. - `table` — Remote table name. - `user` — User name. - `password` — User password. -- `schema` — Non-default table schema. Optional. ## Implementation Details {#implementation-details} -Supports multiple replicas that must be listed by a character `|`. For example: +Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example: ```sql CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ``` -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way several times. This works in favor of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly. +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way several times. -You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard. - -Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19. - -To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from `prev_weight` to `prev_weights + weight`, where `prev_weights` is the total weight of the shards with the smallest number, and `weight` is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19). - -The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running `IN` and `JOIN` by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function [intHash64](../../../sql-reference/functions/hash-functions.md#inthash64)(UserID). - -A simple reminder from the division is a limited solution for sharding and is not always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables. - -`SELECT` queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. - -You should be concerned about the sharding scheme in the following cases: - -- Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient. -- A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we have done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into "layers", where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries. +You can specify up to any number of shards and to any number of replicas for each shard. **See Also** diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 42b5c2a9918..b4664e257a1 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -29,7 +29,7 @@ The table structure can differ from the original MySQL table structure: - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index d0465ccbbea..88ed3613d01 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -24,7 +24,7 @@ The table structure can differ from the original PostgreSQL table structure: - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md new file mode 100644 index 00000000000..f50f671d589 --- /dev/null +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -0,0 +1,54 @@ +--- +toc_priority: 12 +toc_title: ExternalDistributed +--- + +# ExternalDistributed {#externaldistributed} + +Движок `ExternalDistributed` позволяет выполнять запросы `SELECT` и `INSERT` для таблиц на удаленном сервере MySQL или PostgreSQL. Принимает в качестве аргумента табличные движки [MySQL](../../../engines/table-engines/integrations/mysql.md) или [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md), поэтому возможно шардирование. + +## Создание таблицы {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password'); +``` + +Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). + +Структура таблицы может отличаться от исходной структуры таблицы: + +- Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. + +**Параметры движка** + +- `engine` — табличный движок `MySQL` или `PostgreSQL`. +- `host:port` — адрес сервера MySQL или PostgreSQL. +- `database` — имя базы данных на сервере. +- `table` — имя таблицы. +- `user` — имя пользователя. +- `password` — пароль пользователя. + +## Особенности реализации {#implementation-details} + +Поддерживает несколько реплик, которые должны быть перечислены через `|`, а шарды — через `,`. Например: + +```sql +CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + +При указании реплик для каждого из шардов при чтении будет выбрана одна из доступных реплик. Если соединиться не удалось, то будет выбрана следующая реплика, и так для всех реплик. Если попытка соединения для всех реплик не удалась, то будут снова произведены попытки соединения по кругу и так несколько раз. + +Вы можете указать любое количество шардов и любое количество реплик для каждого шарда. + +**Смотрите также** + +- [Табличный движок MySQL](../../../engines/table-engines/integrations/mysql.md) +- [Табличный движок PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) +- [Табличный движок Distributed](../../../engines/table-engines/special/distributed.md) diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 784c941c173..0951077937e 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -23,8 +23,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от исходной структуры таблицы MySQL: - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** @@ -50,6 +50,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL. +Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: + +```sql +CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + ## Пример использования {#primer-ispolzovaniia} Таблица в MySQL: diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index cb8e38ae5c9..c4d11a81f22 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -23,16 +23,16 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: - Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** - `host:port` — адрес сервера PostgreSQL. -- `database` — Имя базы данных на сервере PostgreSQL. -- `table` — Имя таблицы. -- `user` — Имя пользователя PostgreSQL. -- `password` — Пароль пользователя PostgreSQL. +- `database` — имя базы данных на сервере PostgreSQL. +- `table` — имя таблицы. +- `user` — имя пользователя PostgreSQL. +- `password` — пароль пользователя PostgreSQL. - `schema` — имя схемы, если не используется схема по умолчанию. Необязательный аргумент. ## Особенности реализации {#implementation-details} @@ -49,6 +49,12 @@ PostgreSQL массивы конвертируются в массивы ClickHo !!! info "Внимание" Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. + +Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: + +```sql +CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index 665f1058ba2..4c5a913971d 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -38,6 +38,18 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_ Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL. +Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: + +```sql +SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + +или + +```sql +SELECT DISTINCT(name) FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +``` + **Возвращаемое значение** Объект таблицы с теми же столбцами, что и в исходной таблице MySQL. diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 2d8afe28f1e..7811033b9c9 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -43,6 +43,18 @@ PostgreSQL массивы конвертируются в массивы ClickHo !!! info "Примечание" Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. + +Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: + +```sql +SELECT DISTINCT(name) FROM postgresql(`postgres{1|2|3}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` + +или + +```sql +SELECT DISTINCT(name) FROM postgresql(`postgres2:5431|postgres3:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +``` При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. From db1c70975e3200d33547a72009426ebaaa8925d7 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 25 Jun 2021 23:27:18 +0300 Subject: [PATCH 408/931] Compile expressions updated documentation --- docs/en/operations/settings/settings.md | 30 +++++++------------ docs/en/sql-reference/statements/system.md | 2 +- docs/ja/operations/settings/settings.md | 16 ---------- docs/ru/operations/settings/settings.md | 32 +++++++------------- docs/ru/sql-reference/statements/system.md | 34 +++++++++++----------- docs/zh/operations/settings/settings.md | 21 ++++++------- docs/zh/sql-reference/statements/system.md | 10 +++---- 7 files changed, 55 insertions(+), 90 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index faaf13fb8df..08cf9daeb28 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -379,7 +379,7 @@ Default value: `1`. ## insert_null_as_default {#insert_null_as_default} -Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type. +Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type. If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause. @@ -1182,7 +1182,7 @@ Possible values: Default value: `1`. -**Additional Info** +**Additional Info** This setting is useful for replicated tables with a sampling key. A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases: @@ -1194,14 +1194,7 @@ This setting is useful for replicated tables with a sampling key. A query may be !!! warning "Warning" This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details. -## compile {#compile} - -Enable compilation of queries. By default, 0 (disabled). - -The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). -If this portion of the pipeline was compiled, the query may run faster due to the deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution. - -## compile_expressions {#compile_expressions} +## compile_expressions {#compile-expressions} Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime. @@ -1212,14 +1205,11 @@ Possible values: Default value: `1`. -## min_count_to_compile {#min-count-to-compile} +## min_count_to_compile_expression {#min-count-to-compile-expression} -How many times to potentially use a compiled chunk of code before running compilation. By default, 3. -For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running. +Minimum count of executing same expression before it is get compiled. -Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they do not use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. +Default value: `3`. ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} @@ -2683,7 +2673,7 @@ Default value: `0`. ## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty} Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility. -It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries. +It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries. Possible values: @@ -2867,7 +2857,7 @@ Default value: `0`. ## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} -Adds a modifier `SYNC` to all `DROP` and `DETACH` queries. +Adds a modifier `SYNC` to all `DROP` and `DETACH` queries. Possible values: @@ -2973,7 +2963,7 @@ Enables or disables using the original column names instead of aliases in query Possible values: - 0 — The column name is substituted with the alias. -- 1 — The column name is not substituted with the alias. +- 1 — The column name is not substituted with the alias. Default value: `0`. @@ -3086,7 +3076,7 @@ SELECT sum(a), sumCount(b).1, sumCount(b).2, - (sumCount(b).1) / (sumCount(b).2) + (sumCount(b).1) / (sumCount(b).2) FROM fuse_tbl ``` diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 1708d594641..d1526c10203 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -119,7 +119,7 @@ For manage uncompressed data cache parameters use following server level setting ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} Reset the compiled expression cache. Used in development of ClickHouse and performance tests. -Complied expression cache used when query/user/profile enable option [compile](../../operations/settings/settings.md#compile) +Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions) ## FLUSH LOGS {#query_language-system-flush_logs} diff --git a/docs/ja/operations/settings/settings.md b/docs/ja/operations/settings/settings.md index 530edf780f0..8ba30f318ab 100644 --- a/docs/ja/operations/settings/settings.md +++ b/docs/ja/operations/settings/settings.md @@ -817,22 +817,6 @@ load_balancing = first_or_random のための一貫性を異なる部分に同じデータを分割)、このオプションにしているときだけサンプリングキーを設定します。 レプリカラグは制御されません。 -## コンパイル {#compile} - -を編集ます。 既定では、0(無効)です。 - -コンパイルは、クエリ処理パイプラインの一部にのみ使用されます。 -この部分のパイプラインのためのクエリを実行するアによる展開の短サイクルinlining集計機能。 複数の単純な集計関数を使用するクエリでは、最大のパフォーマンスの向上が見られます。 通常、性能は軽微であります。 非常に珍しい例で遅くなクエリを実行します。 - -## min_count_to_compile {#min-count-to-compile} - -り方を潜在的に利用コチャンクのコードの実行前に作成する。 デフォルトでは3. -For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -値が1以上の場合、コンパイルは別のスレッドで非同期に実行されます。 結果は、現在実行中のクエリを含め、準備が整うとすぐに使用されます。 - -コンパイルされたコードは、クエリで使用される集計関数とGROUP BY句内のキーの種類のそれぞれの異なる組み合わせに必要です。 -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. - ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} 値がtrueの場合、json\*Int64およびUInt64形式(ほとんどのJavaScript実装との互換性のため)を使用するときに整数が引用符で表示されます。 diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index a105316eab0..be3695badc5 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -348,7 +348,7 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2; ## input_format_null_as_default {#settings-input-format-null-as-default} Включает или отключает инициализацию [значениями по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) ячеек с [NULL](../../sql-reference/syntax.md#null-literal), если тип данных столбца не позволяет [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). -Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. +Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. Эта настройка используется для запросов [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) для текстовых входных форматов. @@ -361,7 +361,7 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2; ## insert_null_as_default {#insert_null_as_default} -Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). +Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable). Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки. Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`. @@ -1181,17 +1181,9 @@ load_balancing = round_robin !!! warning "Предупреждение" Параллельное выполнение запроса может привести к неверному результату, если в запросе есть объединение или подзапросы и при этом таблицы не удовлетворяют определенным требованиям. Подробности смотрите в разделе [Распределенные подзапросы и max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries). +## compile_expressions {#compile-expressions} -## compile {#compile} - -Включить компиляцию запросов. По умолчанию - 0 (выключено). - -Компиляция предусмотрена только для части конвейера обработки запроса - для первой стадии агрегации (GROUP BY). -В случае, если эта часть конвейера была скомпилирована, запрос может работать быстрее, за счёт разворачивания коротких циклов и инлайнинга вызовов агрегатных функций. Максимальный прирост производительности (до четырёх раз в редких случаях) достигается на запросах с несколькими простыми агрегатными функциями. Как правило, прирост производительности незначителен. В очень редких случаях возможно замедление выполнения запроса. - -## compile_expressions {#compile_expressions} - -Включает или выключает компиляцию часто используемых функций и операторов. Компиляция производится в нативный код платформы с помощью LLVM во время выполнения. +Включает или выключает компиляцию часто используемых функций и операторов. Компиляция производится в нативный код платформы с помощью LLVM во время выполнения. Возможные значения: @@ -1199,14 +1191,12 @@ load_balancing = round_robin - 1 — компиляция включена. Значение по умолчанию: `1`. -## min_count_to_compile {#min-count-to-compile} -После скольких раз, когда скомпилированный кусок кода мог пригодиться, выполнить его компиляцию. По умолчанию - 3. -Для тестирования можно установить значение 0: компиляция выполняется синхронно, и запрос ожидает окончания процесса компиляции перед продолжением выполнения. Во всех остальных случаях используйте значения, начинающиеся с 1. Как правило, компиляция занимает по времени около 5-10 секунд. -В случае, если значение равно 1 или больше, компиляция выполняется асинхронно, в отдельном потоке. При готовности результата, он сразу же будет использован, в том числе, уже выполняющимися в данный момент запросами. +## min_count_to_compile_expression {#min-count-to-compile-expression} -Скомпилированный код требуется для каждого разного сочетания используемых в запросе агрегатных функций и вида ключей в GROUP BY. -Результаты компиляции сохраняются в директории build в виде .so файлов. Количество результатов компиляции не ограничено, так как они не занимают много места. При перезапуске сервера, старые результаты будут использованы, за исключением случая обновления сервера - тогда старые результаты удаляются. +Минимальное количество выполнений одного и того же выражения до его компиляции. + +Значение по умолчанию: `3`. ## input_format_skip_unknown_fields {#input-format-skip-unknown-fields} @@ -2731,7 +2721,7 @@ SELECT * FROM test2; - 0 — запрос `INSERT` добавляет данные в конец файла после существующих. - 1 — `INSERT` удаляет имеющиеся в файле данные и замещает их новыми. -Значение по умолчанию: `0`. +Значение по умолчанию: `0`. ## allow_experimental_geo_types {#allow-experimental-geo-types} @@ -2745,7 +2735,7 @@ SELECT * FROM test2; ## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously} -Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`. +Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`. Возможные значения: @@ -2823,7 +2813,7 @@ SELECT * FROM test2; **Пример** -Какие изменения привносит включение и выключение настройки: +Какие изменения привносит включение и выключение настройки: Запрос: diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index 2589408b8fa..634343d112f 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -5,14 +5,14 @@ toc_title: SYSTEM # Запросы SYSTEM {#query-language-system} -- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries) +- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries) - [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) - [RELOAD DICTIONARY](#query_language-system-reload-dictionary) - [RELOAD MODELS](#query_language-system-reload-models) - [RELOAD MODEL](#query_language-system-reload-model) - [DROP DNS CACHE](#query_language-system-drop-dns-cache) - [DROP MARK CACHE](#query_language-system-drop-mark-cache) -- [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache) +- [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache) - [DROP COMPILED EXPRESSION CACHE](#query_language-system-drop-compiled-expression-cache) - [DROP REPLICA](#query_language-system-drop-replica) - [FLUSH LOGS](#query_language-system-flush_logs) @@ -24,10 +24,10 @@ toc_title: SYSTEM - [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) - [STOP MERGES](#query_language-system-stop-merges) - [START MERGES](#query_language-system-start-merges) -- [STOP TTL MERGES](#query_language-stop-ttl-merges) -- [START TTL MERGES](#query_language-start-ttl-merges) -- [STOP MOVES](#query_language-stop-moves) -- [START MOVES](#query_language-start-moves) +- [STOP TTL MERGES](#query_language-stop-ttl-merges) +- [START TTL MERGES](#query_language-start-ttl-merges) +- [STOP MOVES](#query_language-stop-moves) +- [START MOVES](#query_language-start-moves) - [STOP FETCHES](#query_language-system-stop-fetches) - [START FETCHES](#query_language-system-start-fetches) - [STOP REPLICATED SENDS](#query_language-system-start-replicated-sends) @@ -36,13 +36,13 @@ toc_title: SYSTEM - [START REPLICATION QUEUES](#query_language-system-start-replication-queues) - [SYNC REPLICA](#query_language-system-sync-replica) - [RESTART REPLICA](#query_language-system-restart-replica) -- [RESTART REPLICAS](#query_language-system-restart-replicas) +- [RESTART REPLICAS](#query_language-system-restart-replicas) -## RELOAD EMBEDDED DICTIONARIES] {#query_language-system-reload-emdedded-dictionaries} +## RELOAD EMBEDDED DICTIONARIES] {#query_language-system-reload-emdedded-dictionaries} Перегружает все [Встроенные словари](../dictionaries/internal-dicts.md). -По умолчанию встроенные словари выключены. +По умолчанию встроенные словари выключены. Всегда возвращает `Ok.`, вне зависимости от результата обновления встроенных словарей. - + ## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} Перегружает все словари, которые были успешно загружены до этого. @@ -115,7 +115,7 @@ SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk'; ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} Сбрасывает кеш скомпилированных выражений. Используется при разработке ClickHouse и тестах производительности. -Компилированные выражения используются когда включена настройка уровня запрос/пользователь/профиль [compile](../../operations/settings/settings.md#compile) +Cкомпилированные выражения используются когда включена настройка уровня запрос/пользователь/профиль [compile-expressions](../../operations/settings/settings.md#compile-expressions) ## FLUSH LOGS {#query_language-system-flush_logs} @@ -194,7 +194,7 @@ SYSTEM START MERGES [ON VOLUME | [db.]merge_tree_family_table_name SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] ``` -### START TTL MERGES {#query_language-start-ttl-merges} +### START TTL MERGES {#query_language-start-ttl-merges} Запускает фоновые процессы удаления старых данных основанные на [выражениях TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) для таблиц семейства MergeTree: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: @@ -203,7 +203,7 @@ SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] ``` -### STOP MOVES {#query_language-stop-moves} +### STOP MOVES {#query_language-stop-moves} Позволяет остановить фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: @@ -212,7 +212,7 @@ SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ``` -### START MOVES {#query_language-start-moves} +### START MOVES {#query_language-start-moves} Запускает фоновые процессы переноса данных основанные [табличных выражениях TTL с использованием TO VOLUME или TO DISK](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: Возвращает `Ok.` даже если указана несуществующая таблица или таблица имеет тип отличный от MergeTree. Возвращает ошибку если указана не существующая база данных: @@ -261,7 +261,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues} -Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: +Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] @@ -269,7 +269,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ### START REPLICATION QUEUES {#query_language-system-start-replication-queues} -Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: +Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] @@ -277,7 +277,7 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ### SYNC REPLICA {#query_language-system-sync-replica} -Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени: +Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени: ``` sql SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md index 720b822ce29..ef4f4f86d01 100644 --- a/docs/zh/operations/settings/settings.md +++ b/docs/zh/operations/settings/settings.md @@ -817,21 +817,22 @@ load_balancing = first_or_random 为了保持一致性(以获取相同数据拆分的不同部分),此选项仅在设置了采样键时有效。 副本滞后不受控制。 -## 编译 {#compile} +## compile_expressions {#compile-expressions} -启用查询的编译。 默认情况下,0(禁用)。 +啟用或禁用在運行時使用 LLVM 將常用的簡單函數和運算符編譯為本機代碼。 -编译仅用于查询处理管道的一部分:用于聚合的第一阶段(GROUP BY)。 -如果编译了管道的这一部分,则由于部署周期较短和内联聚合函数调用,查询可能运行得更快。 对于具有多个简单聚合函数的查询,可以看到最大的性能改进(在极少数情况下可快四倍)。 通常,性能增益是微不足道的。 在极少数情况下,它可能会减慢查询执行速度。 +可能的值: -## min_count_to_compile {#min-count-to-compile} +- 0 — 禁用。 +- 1 — 啟用。 -在运行编译之前可能使用已编译代码块的次数。 默认情况下,3。 -For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -如果该值为1或更大,则编译在单独的线程中异步进行。 结果将在准备就绪后立即使用,包括当前正在运行的查询。 +默認值:`1`。 -对于查询中使用的聚合函数的每个不同组合以及GROUP BY子句中的键类型,都需要编译代码。 -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. +## min_count_to_compile_expression {#min-count-to-compile-expression} + +在編譯之前執行相同表達式的最小計數。 + +默認值:`3`。 ## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers} diff --git a/docs/zh/sql-reference/statements/system.md b/docs/zh/sql-reference/statements/system.md index 9952f383236..7f2b7ae1082 100644 --- a/docs/zh/sql-reference/statements/system.md +++ b/docs/zh/sql-reference/statements/system.md @@ -95,7 +95,7 @@ SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk'; ## DROP COMPILED EXPRESSION CACHE {#query_language-system-drop-compiled-expression-cache} 重置已编译的表达式缓存。用于ClickHouse开发和性能测试。 -当 `query/user/profile` 启用配置项 [compile](../../operations/settings/settings.md#compile)时,编译的表达式缓存开启。 +当 `query/user/profile` 启用配置项 [compile-expressions](../../operations/settings/settings.md#compile-expressions)时,编译的表达式缓存开启。 ## FLUSH LOGS {#query_language-system-flush_logs} @@ -209,7 +209,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ### STOP FETCHES {#query_language-system-stop-fetches} -停止后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 +停止后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 不管表引擎类型如何或表/数据库是否存,都返回 `OK.`。 ``` sql @@ -218,7 +218,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ### START FETCHES {#query_language-system-start-fetches} -启动后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 +启动后台获取 `ReplicatedMergeTree`系列引擎表中插入的数据块。 不管表引擎类型如何或表/数据库是否存,都返回 `OK.`。 ``` sql @@ -227,7 +227,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] ### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends} -停止通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 +停止通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 ``` sql SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] @@ -235,7 +235,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ### START REPLICATED SENDS {#query_language-system-start-replicated-sends} -启动通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 +启动通过后台分发 `ReplicatedMergeTree`系列引擎表中新插入的数据块到集群的其它副本节点。 ``` sql SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] From 67a6721c25d8520fee8d92b4710333f4e9a8c4eb Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 26 Jun 2021 00:14:49 +0300 Subject: [PATCH 409/931] Ruin syntax highlighting (it was harmful), continuation of #25682 --- website/css/highlight.css | 68 +-------------------------------------- 1 file changed, 1 insertion(+), 67 deletions(-) diff --git a/website/css/highlight.css b/website/css/highlight.css index be5fc1025b1..52f65bfc74e 100644 --- a/website/css/highlight.css +++ b/website/css/highlight.css @@ -1,76 +1,10 @@ /* - Name: Base16 Eighties Dark + Ocean light + Name: Base16 Eighties Dark Author: Chris Kempson (http://chriskempson.com) Pygments template by Jan T. Sott (https://github.com/idleberg) Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder) */ -.syntax .hll { background-color: #e0e0e0 } -.syntax { background: #ffffff; color: #1d1f21 } -.syntax .c { color: #b4b7b4 } /* Comment */ -.syntax .err { color: #cc6666 } /* Error */ -.syntax .k { color: #b294bb } /* Keyword */ -.syntax .l { color: #de935f } /* Literal */ -.syntax .n { color: #1d1f21 } /* Name */ -.syntax .o { color: #8abeb7 } /* Operator */ -.syntax .p { color: #1d1f21 } /* Punctuation */ -.syntax .cm { color: #b4b7b4 } /* Comment.Multiline */ -.syntax .cp { color: #b4b7b4 } /* Comment.Preproc */ -.syntax .c1 { color: #b4b7b4 } /* Comment.Single */ -.syntax .cs { color: #b4b7b4 } /* Comment.Special */ -.syntax .gd { color: #cc6666 } /* Generic.Deleted */ -.syntax .ge { font-style: italic } /* Generic.Emph */ -.syntax .gh { color: #1d1f21; font-weight: bold } /* Generic.Heading */ -.syntax .gi { color: #b5bd68 } /* Generic.Inserted */ -.syntax .gp { color: #b4b7b4; font-weight: bold } /* Generic.Prompt */ -.syntax .gs { font-weight: bold } /* Generic.Strong */ -.syntax .gu { color: #8abeb7; font-weight: bold } /* Generic.Subheading */ -.syntax .kc { color: #b294bb } /* Keyword.Constant */ -.syntax .kd { color: #b294bb } /* Keyword.Declaration */ -.syntax .kn { color: #8abeb7 } /* Keyword.Namespace */ -.syntax .kp { color: #b294bb } /* Keyword.Pseudo */ -.syntax .kr { color: #b294bb } /* Keyword.Reserved */ -.syntax .kt { color: #f0c674 } /* Keyword.Type */ -.syntax .ld { color: #b5bd68 } /* Literal.Date */ -.syntax .m { color: #de935f } /* Literal.Number */ -.syntax .s { color: #b5bd68 } /* Literal.String */ -.syntax .na { color: #81a2be } /* Name.Attribute */ -.syntax .nb { color: #1d1f21 } /* Name.Builtin */ -.syntax .nc { color: #f0c674 } /* Name.Class */ -.syntax .no { color: #cc6666 } /* Name.Constant */ -.syntax .nd { color: #8abeb7 } /* Name.Decorator */ -.syntax .ni { color: #1d1f21 } /* Name.Entity */ -.syntax .ne { color: #cc6666 } /* Name.Exception */ -.syntax .nf { color: #81a2be } /* Name.Function */ -.syntax .nl { color: #1d1f21 } /* Name.Label */ -.syntax .nn { color: #f0c674 } /* Name.Namespace */ -.syntax .nx { color: #81a2be } /* Name.Other */ -.syntax .py { color: #1d1f21 } /* Name.Property */ -.syntax .nt { color: #8abeb7 } /* Name.Tag */ -.syntax .nv { color: #cc6666 } /* Name.Variable */ -.syntax .ow { color: #8abeb7 } /* Operator.Word */ -.syntax .w { color: #1d1f21 } /* Text.Whitespace */ -.syntax .mf { color: #de935f } /* Literal.Number.Float */ -.syntax .mh { color: #de935f } /* Literal.Number.Hex */ -.syntax .mi { color: #de935f } /* Literal.Number.Integer */ -.syntax .mo { color: #de935f } /* Literal.Number.Oct */ -.syntax .sb { color: #b5bd68 } /* Literal.String.Backtick */ -.syntax .sc { color: #1d1f21 } /* Literal.String.Char */ -.syntax .sd { color: #b4b7b4 } /* Literal.String.Doc */ -.syntax .s2 { color: #b5bd68 } /* Literal.String.Double */ -.syntax .se { color: #de935f } /* Literal.String.Escape */ -.syntax .sh { color: #b5bd68 } /* Literal.String.Heredoc */ -.syntax .si { color: #de935f } /* Literal.String.Interpol */ -.syntax .sx { color: #b5bd68 } /* Literal.String.Other */ -.syntax .sr { color: #b5bd68 } /* Literal.String.Regex */ -.syntax .s1 { color: #b5bd68 } /* Literal.String.Single */ -.syntax .ss { color: #b5bd68 } /* Literal.String.Symbol */ -.syntax .bp { color: #1d1f21 } /* Name.Builtin.Pseudo */ -.syntax .vc { color: #cc6666 } /* Name.Variable.Class */ -.syntax .vg { color: #cc6666 } /* Name.Variable.Global */ -.syntax .vi { color: #cc6666 } /* Name.Variable.Instance */ -.syntax .il { color: #de935f } /* Literal.Number.Integer.Long */ - @media (prefers-color-scheme: dark) { .syntax .hll { background-color: #515151 } From e766d2cb1f246495f5dde6eb56d4907f4a02226d Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 26 Jun 2021 03:33:59 +0300 Subject: [PATCH 410/931] Update 01917_system_data_skipping_indices.sql --- .../0_stateless/01917_system_data_skipping_indices.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01917_system_data_skipping_indices.sql b/tests/queries/0_stateless/01917_system_data_skipping_indices.sql index 768863a630f..bfe9d6398b3 100644 --- a/tests/queries/0_stateless/01917_system_data_skipping_indices.sql +++ b/tests/queries/0_stateless/01917_system_data_skipping_indices.sql @@ -23,12 +23,12 @@ CREATE TABLE data_01917_2 Engine=MergeTree() ORDER BY name; -SELECT * FROM system.data_skipping_indices; +SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase(); -SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917'; -SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917_2'; +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917' AND database = currentDatabase(); +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917_2' AND database = currentDatabase(); -SELECT name FROM system.data_skipping_indices WHERE type = 'minmax'; +SELECT name FROM system.data_skipping_indices WHERE type = 'minmax' AND database = currentDatabase(); DROP TABLE data_01917; DROP TABLE data_01917_2; From fdc965c5796dfa101b9ecb8a2512be115e24e32b Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 26 Jun 2021 04:07:25 +0300 Subject: [PATCH 411/931] Update recipes.md --- docs/en/getting-started/example-datasets/recipes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/example-datasets/recipes.md b/docs/en/getting-started/example-datasets/recipes.md index 0f4e81c8470..22c068af358 100644 --- a/docs/en/getting-started/example-datasets/recipes.md +++ b/docs/en/getting-started/example-datasets/recipes.md @@ -65,7 +65,7 @@ By checking the row count: Query: -``` sq; +``` sql SELECT count() FROM recipes; ``` From a29bd2c4b3b2c7dc6c9d424a0be8b7bbc90fa1f7 Mon Sep 17 00:00:00 2001 From: "Matwey V. Kornilov" Date: Sat, 26 Jun 2021 11:25:54 +0300 Subject: [PATCH 412/931] Add missed #include Signed-off-by: Matwey V. Kornilov --- src/Storages/MergeTree/MergeTreePartInfo.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index 66d5342b67f..e928ee92cfd 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include From 008adabec2eb9b905473f33381dd4af1eed38a11 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 18 Jun 2021 01:29:41 +0300 Subject: [PATCH 413/931] Support REPLACE DICTIONARY, CREATE OR REPLACE DICTIONARY queries --- src/Interpreters/InterpreterCreateQuery.cpp | 5 ++ src/Parsers/ASTCreateQuery.cpp | 10 +++- src/Parsers/ParserCreateQuery.cpp | 24 +++++++-- src/Storages/StorageDictionary.cpp | 17 +++++-- src/Storages/StorageDictionary.h | 4 +- .../01913_replace_dictionary.reference | 2 + .../0_stateless/01913_replace_dictionary.sql | 50 +++++++++++++++++++ 7 files changed, 99 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/01913_replace_dictionary.reference create mode 100644 tests/queries/0_stateless/01913_replace_dictionary.sql diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 28d88bdd8df..38e5b266e13 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1101,6 +1101,7 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, [[maybe_unused]] bool done = doCreateTable(create, properties); assert(done); ast_drop->table = create.table; + ast_drop->is_dictionary = create.is_dictionary; ast_drop->database = create.database; ast_drop->kind = ASTDropQuery::Drop; created = true; @@ -1113,14 +1114,18 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, ASTRenameQuery::Table{create.database, create.table}, ASTRenameQuery::Table{create.database, table_to_replace_name} }; + ast_rename->elements.push_back(std::move(elem)); ast_rename->exchange = true; + ast_rename->dictionary = create.is_dictionary; + InterpreterRenameQuery(ast_rename, getContext()).execute(); replaced = true; InterpreterDropQuery(ast_drop, getContext()).execute(); create.table = table_to_replace_name; + return fillTableIfNeeded(create); } catch (...) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index d6d424beb3a..8d8a0a1840a 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -305,8 +305,16 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat } else { + String action = "CREATE"; + if (attach) + action = "ATTACH"; + else if (replace_table && create_or_replace) + action = "CREATE OR REPLACE"; + else if (replace_table) + action = "REPLACE"; + /// Always DICTIONARY - settings.ostr << (settings.hilite ? hilite_keyword : "") << (attach ? "ATTACH " : "CREATE ") << "DICTIONARY " + settings.ostr << (settings.hilite ? hilite_keyword : "") << action << "DICTIONARY " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); if (uuid != UUIDHelpers::Nil) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 23a53ed3244..d4525883e36 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -971,6 +971,8 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E { ParserKeyword s_create("CREATE"); ParserKeyword s_attach("ATTACH"); + ParserKeyword s_replace("REPLACE"); + ParserKeyword s_or_replace("OR REPLACE"); ParserKeyword s_dictionary("DICTIONARY"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_on("ON"); @@ -982,6 +984,8 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E ParserDictionary dictionary_p; bool if_not_exists = false; + bool replace = false; + bool or_replace = false; ASTPtr name; ASTPtr attributes; @@ -989,13 +993,21 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E String cluster_str; bool attach = false; - if (!s_create.ignore(pos, expected)) + + if (s_create.ignore(pos, expected)) { - if (s_attach.ignore(pos, expected)) - attach = true; - else - return false; + if (s_or_replace.ignore(pos, expected)) + { + replace = true; + or_replace = true; + } } + else if (s_attach.ignore(pos, expected)) + attach = true; + else if (s_replace.ignore(pos, expected)) + replace = true; + else + return false; if (!s_dictionary.ignore(pos, expected)) return false; @@ -1031,6 +1043,8 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E node = query; query->is_dictionary = true; query->attach = attach; + query->create_or_replace = or_replace; + query->replace_table = replace; auto dict_id = name->as()->getTableId(); query->database = dict_id.database_name; diff --git a/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp index 5aeaff590e1..2c37f0ef641 100644 --- a/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -167,7 +167,7 @@ Pipe StorageDictionary::read( const size_t max_block_size, const unsigned /*threads*/) { - auto dictionary = getContext()->getExternalDictionariesLoader().getDictionary(dictionary_name, local_context); + auto dictionary = getContext()->getExternalDictionariesLoader().getDictionary(getStorageID().getInternalDictionaryName(), local_context); auto stream = dictionary->getBlockInputStream(column_names, max_block_size); /// TODO: update dictionary interface for processors. return Pipe(std::make_shared(stream)); @@ -215,23 +215,30 @@ LoadablesConfigurationPtr StorageDictionary::getConfiguration() const void StorageDictionary::renameInMemory(const StorageID & new_table_id) { + auto previous_table_id = getStorageID(); + auto previous_dictionary_name = getStorageID().getInternalDictionaryName(); + auto new_dictionary_name = new_table_id.getInternalDictionaryName(); + + IStorage::renameInMemory(new_table_id); + + dictionary_name = new_dictionary_name; + if (configuration) { configuration->setString("dictionary.database", new_table_id.database_name); configuration->setString("dictionary.name", new_table_id.table_name); const auto & external_dictionaries_loader = getContext()->getExternalDictionariesLoader(); - external_dictionaries_loader.reloadConfig(getStorageID().getInternalDictionaryName()); + external_dictionaries_loader.reloadConfig(previous_dictionary_name); + + auto result = external_dictionaries_loader.getLoadResult(new_dictionary_name); - auto result = external_dictionaries_loader.getLoadResult(getStorageID().getInternalDictionaryName()); if (!result.object) return; const auto dictionary = std::static_pointer_cast(result.object); dictionary->updateDictionaryName(new_table_id); } - - IStorage::renameInMemory(new_table_id); } void registerStorageDictionary(StorageFactory & factory) diff --git a/src/Storages/StorageDictionary.h b/src/Storages/StorageDictionary.h index e2ba2964b1d..d074dec2c34 100644 --- a/src/Storages/StorageDictionary.h +++ b/src/Storages/StorageDictionary.h @@ -45,7 +45,7 @@ public: Poco::Timestamp getUpdateTime() const; LoadablesConfigurationPtr getConfiguration() const; - const String & getDictionaryName() const { return dictionary_name; } + String getDictionaryName() const { return dictionary_name; } /// Specifies where the table is located relative to the dictionary. enum class Location @@ -66,7 +66,7 @@ public: }; private: - const String dictionary_name; + String dictionary_name; const Location location; mutable std::mutex dictionary_config_mutex; diff --git a/tests/queries/0_stateless/01913_replace_dictionary.reference b/tests/queries/0_stateless/01913_replace_dictionary.reference new file mode 100644 index 00000000000..2d33c16ccc2 --- /dev/null +++ b/tests/queries/0_stateless/01913_replace_dictionary.reference @@ -0,0 +1,2 @@ +0 Value0 +0 Value1 diff --git a/tests/queries/0_stateless/01913_replace_dictionary.sql b/tests/queries/0_stateless/01913_replace_dictionary.sql new file mode 100644 index 00000000000..22b0bd002ae --- /dev/null +++ b/tests/queries/0_stateless/01913_replace_dictionary.sql @@ -0,0 +1,50 @@ +DROP DATABASE IF EXISTS 01913_db; +CREATE DATABASE 01913_db ENGINE=Atomic; + +DROP TABLE IF EXISTS 01913_db.test_source_table_1; +CREATE TABLE 01913_db.test_source_table_1 +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 01913_db.test_source_table_1 VALUES (0, 'Value0'); + +DROP DICTIONARY IF EXISTS 01913_db.test_dictionary; +CREATE DICTIONARY 01913_db.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01913_db' TABLE 'test_source_table_1')); + +SELECT * FROM 01913_db.test_dictionary; + +DROP TABLE IF EXISTS 01913_db.test_source_table_2; +CREATE TABLE 01913_db.test_source_table_2 +( + id UInt64, + value_1 String +) ENGINE=TinyLog; + +INSERT INTO 01913_db.test_source_table_2 VALUES (0, 'Value1'); + +REPLACE DICTIONARY 01913_db.test_dictionary +( + id UInt64, + value_1 String +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(DB '01913_db' TABLE 'test_source_table_2')) +LIFETIME(0); + +SELECT * FROM 01913_db.test_dictionary; + +DROP DICTIONARY 01913_db.test_dictionary; +DROP TABLE 01913_db.test_source_table_1; +DROP TABLE 01913_db.test_source_table_2; + +DROP DATABASE 01913_db; From 7b03150b90fa7b238ea5c0427db789db97ee64f9 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 18 Jun 2021 12:05:21 +0300 Subject: [PATCH 414/931] Fixed tests --- src/Parsers/ASTCreateQuery.cpp | 2 +- src/Storages/StorageDictionary.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 8d8a0a1840a..be973a988bd 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -314,7 +314,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat action = "REPLACE"; /// Always DICTIONARY - settings.ostr << (settings.hilite ? hilite_keyword : "") << action << "DICTIONARY " + settings.ostr << (settings.hilite ? hilite_keyword : "") << action << " DICTIONARY " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); if (uuid != UUIDHelpers::Nil) diff --git a/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp index 2c37f0ef641..79c6247e358 100644 --- a/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -167,7 +167,7 @@ Pipe StorageDictionary::read( const size_t max_block_size, const unsigned /*threads*/) { - auto dictionary = getContext()->getExternalDictionariesLoader().getDictionary(getStorageID().getInternalDictionaryName(), local_context); + auto dictionary = getContext()->getExternalDictionariesLoader().getDictionary(dictionary_name, local_context); auto stream = dictionary->getBlockInputStream(column_names, max_block_size); /// TODO: update dictionary interface for processors. return Pipe(std::make_shared(stream)); From 3c9ae7b5ba4b25573194a3c24aa702a9f03c3129 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 18 Jun 2021 23:59:35 +0300 Subject: [PATCH 415/931] Added more tests --- src/Databases/DatabaseAtomic.cpp | 2 +- src/Storages/StorageDictionary.cpp | 22 ++++---- .../01914_exchange_dictionaries.reference | 4 ++ .../01914_exchange_dictionaries.sql | 34 +++++++++++++ ...915_create_or_replace_dictionary.reference | 2 + .../01915_create_or_replace_dictionary.sql | 50 +++++++++++++++++++ 6 files changed, 100 insertions(+), 14 deletions(-) create mode 100644 tests/queries/0_stateless/01914_exchange_dictionaries.reference create mode 100644 tests/queries/0_stateless/01914_exchange_dictionaries.sql create mode 100644 tests/queries/0_stateless/01915_create_or_replace_dictionary.reference create mode 100644 tests/queries/0_stateless/01915_create_or_replace_dictionary.sql diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 6b8c470861d..7a529187c84 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -210,7 +210,7 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ std::unique_lock other_db_lock; if (inside_database) db_lock = std::unique_lock{mutex}; - else if (this < &other_db) + else if (this < &other_db) { db_lock = std::unique_lock{mutex}; other_db_lock = std::unique_lock{other_db.mutex}; diff --git a/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp index 79c6247e358..4c31f62b21f 100644 --- a/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -215,29 +215,25 @@ LoadablesConfigurationPtr StorageDictionary::getConfiguration() const void StorageDictionary::renameInMemory(const StorageID & new_table_id) { - auto previous_table_id = getStorageID(); - auto previous_dictionary_name = getStorageID().getInternalDictionaryName(); - auto new_dictionary_name = new_table_id.getInternalDictionaryName(); - + auto old_table_id = getStorageID(); IStorage::renameInMemory(new_table_id); - dictionary_name = new_dictionary_name; - if (configuration) { configuration->setString("dictionary.database", new_table_id.database_name); configuration->setString("dictionary.name", new_table_id.table_name); const auto & external_dictionaries_loader = getContext()->getExternalDictionariesLoader(); - external_dictionaries_loader.reloadConfig(previous_dictionary_name); + auto result = external_dictionaries_loader.getLoadResult(old_table_id.getInternalDictionaryName()); - auto result = external_dictionaries_loader.getLoadResult(new_dictionary_name); + if (result.object) + { + const auto dictionary = std::static_pointer_cast(result.object); + dictionary->updateDictionaryName(new_table_id); + } - if (!result.object) - return; - - const auto dictionary = std::static_pointer_cast(result.object); - dictionary->updateDictionaryName(new_table_id); + external_dictionaries_loader.reloadConfig(old_table_id.getInternalDictionaryName()); + dictionary_name = new_table_id.getFullNameNotQuoted(); } } diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.reference b/tests/queries/0_stateless/01914_exchange_dictionaries.reference new file mode 100644 index 00000000000..9278d0abeed --- /dev/null +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.reference @@ -0,0 +1,4 @@ +1 Table1 +2 Table2 +2 Table2 +1 Table1 diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.sql b/tests/queries/0_stateless/01914_exchange_dictionaries.sql new file mode 100644 index 00000000000..ba0c70d13be --- /dev/null +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS table_1; +CREATE TABLE table_1 (id UInt64, value String) ENGINE=TinyLog; + +DROP TABLE IF EXISTS table_2; +CREATE TABLE table_2 (id UInt64, value String) ENGINE=TinyLog; + +INSERT INTO table_1 VALUES (1, 'Table1'); +INSERT INTO table_2 VALUES (2, 'Table2'); + +DROP DICTIONARY IF EXISTS dictionary_1; +CREATE DICTIONARY dictionary_1 (id UInt64, value String) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(TABLE 'table_1')); + +DROP DICTIONARY IF EXISTS dictionary_2; +CREATE DICTIONARY dictionary_2 (id UInt64, value String) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(TABLE 'table_2')); + +SELECT * FROM dictionary_1; +SELECT * FROM dictionary_2; + +EXCHANGE DICTIONARIES dictionary_1 AND dictionary_2; + +SELECT * FROM dictionary_1; +SELECT * FROM dictionary_2; + +DROP DICTIONARY dictionary_1; +DROP DICTIONARY dictionary_2; + +DROP TABLE table_1; +DROP TABLE table_2; diff --git a/tests/queries/0_stateless/01915_create_or_replace_dictionary.reference b/tests/queries/0_stateless/01915_create_or_replace_dictionary.reference new file mode 100644 index 00000000000..2d33c16ccc2 --- /dev/null +++ b/tests/queries/0_stateless/01915_create_or_replace_dictionary.reference @@ -0,0 +1,2 @@ +0 Value0 +0 Value1 diff --git a/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql b/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql new file mode 100644 index 00000000000..5d5515f4f8a --- /dev/null +++ b/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql @@ -0,0 +1,50 @@ +DROP DATABASE IF EXISTS 01915_db; +CREATE DATABASE 01915_db ENGINE=Atomic; + +DROP TABLE IF EXISTS 01915_db.test_source_table_1; +CREATE TABLE 01915_db.test_source_table_1 +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 01915_db.test_source_table_1 VALUES (0, 'Value0'); + +DROP DICTIONARY IF EXISTS 01915_db.test_dictionary; +CREATE OR REPLACE DICTIONARY 01915_db.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'test_source_table_1')); + +SELECT * FROM 01915_db.test_dictionary; + +DROP TABLE IF EXISTS 01915_db.test_source_table_2; +CREATE TABLE 01915_db.test_source_table_2 +( + id UInt64, + value_1 String +) ENGINE=TinyLog; + +INSERT INTO 01915_db.test_source_table_2 VALUES (0, 'Value1'); + +CREATE OR REPLACE DICTIONARY 01915_db.test_dictionary +( + id UInt64, + value_1 String +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'test_source_table_2')) +LIFETIME(0); + +SELECT * FROM 01915_db.test_dictionary; + +DROP DICTIONARY 01915_db.test_dictionary; +DROP TABLE 01915_db.test_source_table_1; +DROP TABLE 01915_db.test_source_table_2; + +DROP DATABASE 01915_db; From 44122118de6bd119b8766e9c28b42fb54f70e920 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 25 Jun 2021 23:06:07 +0300 Subject: [PATCH 416/931] Fixed ASTRenameQuery for EXCHANGE DICTIONARIES --- src/Parsers/ASTRenameQuery.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index 8d300f430b3..611f81dc9e9 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -75,12 +75,15 @@ protected: } settings.ostr << (settings.hilite ? hilite_keyword : ""); - if (exchange) + if (exchange && dictionary) + settings.ostr << "EXCHANGE DICTIONARIES "; + else if (exchange) settings.ostr << "EXCHANGE TABLES "; else if (dictionary) settings.ostr << "RENAME DICTIONARY "; else settings.ostr << "RENAME TABLE "; + settings.ostr << (settings.hilite ? hilite_none : ""); for (auto it = elements.cbegin(); it != elements.cend(); ++it) From 99689484aa98a2ac76eb219d949ffa801228141d Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 26 Jun 2021 14:13:30 +0300 Subject: [PATCH 417/931] Fixed ANTRL tests --- tests/queries/skip_list.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 52c2d468498..fcfd5192ce9 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -516,7 +516,10 @@ "01913_if_int_decimal", "01913_join_push_down_bug", "01921_with_fill_with_totals", - "01924_argmax_bitmap_state" + "01924_argmax_bitmap_state", + "01913_replace_dictionary", + "01914_exchange_dictionaries", + "01915_create_or_replace_dictionary" ], "parallel": [ From 28c5a14922a2e578a0fbc616189b5cc439efc237 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sat, 26 Jun 2021 15:50:15 +0300 Subject: [PATCH 418/931] Fix clang-tidy --- src/Storages/StorageMerge.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index a9f5b4b8a86..068008170ca 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -99,7 +99,7 @@ protected: const QueryProcessingStage::Enum & processed_stage, UInt64 max_block_size, const Block & header, - const Aliases & aliaes, + const Aliases & aliases, const StorageWithLockAndName & storage_with_lock, Names & real_column_names, ContextMutablePtr modified_context, From 9788b0e38a9a6f93f3f618fe4ce4684bc33f00bf Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Sat, 26 Jun 2021 17:39:02 +0400 Subject: [PATCH 419/931] Fix locating objcopy in macOS Rework clickhouse_embed_binaries() to compile asm files properly and avoid duplicate symbols when linking in macOS --- CMakeLists.txt | 21 +++++++++-- cmake/embed_binary.cmake | 61 +++++++++++++------------------- docs/en/development/build-osx.md | 2 +- 3 files changed, 44 insertions(+), 40 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9cf8188cc8e..d23e5f540d3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -184,10 +184,27 @@ endif () set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic") find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy") + +if (NOT OBJCOPY_PATH AND OS_DARWIN) + find_program (BREW_PATH NAMES "brew") + if (BREW_PATH) + execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX) + if (LLVM_PREFIX) + find_program (OBJCOPY_PATH NAMES "llvm-objcopy" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH) + endif () + if (NOT OBJCOPY_PATH) + execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX) + if (BINUTILS_PREFIX) + find_program (OBJCOPY_PATH NAMES "objcopy" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH) + endif () + endif () + endif () +endif () + if (OBJCOPY_PATH) - message(STATUS "Using objcopy: ${OBJCOPY_PATH}.") + message (STATUS "Using objcopy: ${OBJCOPY_PATH}") else () - message(FATAL_ERROR "Cannot find objcopy.") + message (FATAL_ERROR "Cannot find objcopy.") endif () if (OS_DARWIN) diff --git a/cmake/embed_binary.cmake b/cmake/embed_binary.cmake index e132c590520..a87c63d714e 100644 --- a/cmake/embed_binary.cmake +++ b/cmake/embed_binary.cmake @@ -33,51 +33,38 @@ macro(clickhouse_embed_binaries) message(FATAL_ERROR "The list of binary resources to embed may not be empty") endif() - # If cross-compiling, ensure we use the toolchain file and target the actual target architecture - if (CMAKE_CROSSCOMPILING) - set(CROSS_COMPILE_FLAGS --target=${CMAKE_C_COMPILER_TARGET}) - - # FIXME: find a way to properly pass all cross-compile flags to custom command in CMake - if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") - list(APPEND CROSS_COMPILE_FLAGS -isysroot ${CMAKE_OSX_SYSROOT} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}) - else () - list(APPEND CROSS_COMPILE_FLAGS -isysroot ${CMAKE_SYSROOT}) - endif () - else() - set(CROSS_COMPILE_FLAGS "") - endif() + add_library("${EMBED_TARGET}" STATIC) + set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C) set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in") - set(RESOURCE_OBJS) - foreach(RESOURCE_FILE ${EMBED_RESOURCES}) - set(RESOURCE_OBJ "${RESOURCE_FILE}.o") - list(APPEND RESOURCE_OBJS "${RESOURCE_OBJ}") - # Normalize the name of the resource + foreach(RESOURCE_FILE ${EMBED_RESOURCES}) + set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S") set(BINARY_FILE_NAME "${RESOURCE_FILE}") + + # Normalize the name of the resource. string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}") - set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S") - # Put the configured assembly file in the output directory. - # This is so we can clean it up as usual, and we CD to the - # source directory before compiling, so that the assembly - # `.incbin` directive can find the file. + # Generate the configured assembly file in the output directory. configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY) - # Generate the output object file by compiling the assembly, in the directory of - # the sources so that the resource file may also be found - add_custom_command( - OUTPUT ${RESOURCE_OBJ} - COMMAND cd "${EMBED_RESOURCE_DIR}" && - ${CMAKE_C_COMPILER} "${CROSS_COMPILE_FLAGS}" -c -o - "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" - "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" - COMMAND_EXPAND_LISTS - ) - set_source_files_properties("${RESOURCE_OBJ}" PROPERTIES EXTERNAL_OBJECT true GENERATED true) - endforeach() + # If cross-compiling, ensure we use the toolchain file and target the actual target architecture. + if(CMAKE_CROSSCOMPILING) + set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "--target=${CMAKE_C_COMPILER_TARGET}") - add_library("${EMBED_TARGET}" STATIC ${RESOURCE_OBJS}) - set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C) + # FIXME: find a way to properly pass all cross-compile flags. + if(OS_DARWIN) + set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-isysroot ${CMAKE_OSX_SYSROOT}") + set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}") + else() + set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-isysroot ${CMAKE_SYSROOT}") + endif() + endif() + + # Set the include directory for relative paths specified for `.incbin` directive. + set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}") + + target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}") + endforeach() endmacro() diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index a862bdeb299..687e0179e07 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -33,7 +33,7 @@ Reboot. ``` bash brew update -brew install cmake ninja libtool gettext llvm gcc +brew install cmake ninja libtool gettext llvm gcc binutils ``` ## Checkout ClickHouse Sources {#checkout-clickhouse-sources} From 4da1b8154ae16a67893fd699670c3eeb30be0eb9 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sat, 26 Jun 2021 22:15:57 +0800 Subject: [PATCH 420/931] Fix data race in getClusters() --- src/Interpreters/Context.cpp | 14 +++++++------- src/Interpreters/Context.h | 2 +- src/Storages/System/StorageSystemClusters.cpp | 2 +- .../System/StorageSystemDDLWorkerQueue.cpp | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index dcddeef2811..899550bffec 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -394,7 +394,7 @@ struct ContextSharedPart /// Clusters for distributed tables /// Initialized on demand (on distributed storages initialization) since Settings should be initialized - std::unique_ptr clusters; + std::shared_ptr clusters; ConfigurationPtr clusters_config; /// Stores updated configs mutable std::mutex clusters_mutex; /// Guards clusters and clusters_config @@ -1882,7 +1882,7 @@ std::optional Context::getTCPPortSecure() const std::shared_ptr Context::getCluster(const std::string & cluster_name) const { - auto res = getClusters().getCluster(cluster_name); + auto res = getClusters()->getCluster(cluster_name); if (res) return res; @@ -1896,7 +1896,7 @@ std::shared_ptr Context::getCluster(const std::string & cluster_name) c std::shared_ptr Context::tryGetCluster(const std::string & cluster_name) const { - return getClusters().getCluster(cluster_name); + return getClusters()->getCluster(cluster_name); } @@ -1911,7 +1911,7 @@ void Context::reloadClusterConfig() const } const auto & config = cluster_config ? *cluster_config : getConfigRef(); - auto new_clusters = std::make_unique(config, settings); + auto new_clusters = std::make_shared(config, settings); { std::lock_guard lock(shared->clusters_mutex); @@ -1927,16 +1927,16 @@ void Context::reloadClusterConfig() const } -Clusters & Context::getClusters() const +std::shared_ptr Context::getClusters() const { std::lock_guard lock(shared->clusters_mutex); if (!shared->clusters) { const auto & config = shared->clusters_config ? *shared->clusters_config : getConfigRef(); - shared->clusters = std::make_unique(config, settings); + shared->clusters = std::make_shared(config, settings); } - return *shared->clusters; + return shared->clusters; } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index c673eb0d408..7990bd7420b 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -676,7 +676,7 @@ public: void setDDLWorker(std::unique_ptr ddl_worker); DDLWorker & getDDLWorker() const; - Clusters & getClusters() const; + std::shared_ptr getClusters() const; std::shared_ptr getCluster(const std::string & cluster_name) const; std::shared_ptr tryGetCluster(const std::string & cluster_name) const; void setClustersConfig(const ConfigurationPtr & config, const String & config_name = "remote_servers"); diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 8a3227aafdb..1f5def6d6b4 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -31,7 +31,7 @@ NamesAndTypesList StorageSystemClusters::getNamesAndTypes() void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - for (const auto & name_and_cluster : context->getClusters().getContainer()) + for (const auto & name_and_cluster : context->getClusters()->getContainer()) writeCluster(res_columns, name_and_cluster); const auto databases = DatabaseCatalog::instance().getDatabases(); diff --git a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp index 98b15bfa6e2..5b9ed938e22 100644 --- a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp +++ b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp @@ -130,8 +130,8 @@ void StorageSystemDDLWorkerQueue::fillData(MutableColumns & res_columns, Context if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE) zk_exception_code = code; - const auto & clusters = context->getClusters(); - for (const auto & name_and_cluster : clusters.getContainer()) + const auto clusters = context->getClusters(); + for (const auto & name_and_cluster : clusters->getContainer()) { const ClusterPtr & cluster = name_and_cluster.second; const auto & shards_info = cluster->getShardsInfo(); From ffe49589a1d8fc2d97acdf3524b7679052206b94 Mon Sep 17 00:00:00 2001 From: Alexey Date: Sat, 26 Jun 2021 16:08:23 +0000 Subject: [PATCH 421/931] Description draft --- .../reference/quantilebfloat16.md | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md new file mode 100644 index 00000000000..00bec8e00db --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -0,0 +1,68 @@ +--- +toc_priority: 209 +--- + +# quantileBFloat16 {#quantilebfloat16} + + + +# Is there a BFloat16 data type in ClickHouse? + # How conversion to BFloat16 is made? +# Does quantile calculations have general implementation? And some methods are implemented to support BFloat16? +# Is quantile calculation is really table based as stated somewhere in PR + +# Perhaps add quantilesBFloat16 to page quantiles.md +# Add alias to page median.md + +Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. +bfloat16 is a floating point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. The function converts input values to 32-bit floats and then take the most significant 16 bits. Then it calculates the histogram of these values. Calculated bfloat16 value is converted to 64-bit float data type by appending zero bits. +The function is a fast quantile estimator with a relative error no more than 0.390625%. + +**Syntax** + +``` sql +quantileBFloat16[(level)](expr) +``` + +Alias: `medianBFloat16` + +**Arguments** + +- `expr` — sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md). + +**Parameters** + +- `level` — Level of quantile. Optional. Possible values are in a range from 0 to 1. Default value: 0.5. [Float](../../../sql-reference/data-types/float.md). + +**Returned value** + +- Approximate quantile of the specified level. + +Type: [Float64](../../../sql-reference/data-types/float.md#float32-float64). + +**Example** + +Input table has an integer and a float columns: + +``` text +┌─a─┬─────b─┐ +│ 1 │ 1.001 │ +│ 2 │ 1.002 │ +│ 3 │ 1.003 │ +└───┴───────┘ +``` + +Query: + +``` sql +SELECT quantilesBFloat16(0.75)(a), quantilesBFloat16(0.75)(b) FROM example_table; +``` + +Result: + +``` text +┌─quantilesBFloat16(0.75)(a)─┬─quantilesBFloat16(0.75)(b)─┐ +│ [3] │ [1] │ +└────────────────────────────┴────────────────────────────┘ +``` +Note that all floating point values were truncated to 1.0 when converting to bfloat16. From 27ba48ebe76482ed11691db0c24d5836af6b5252 Mon Sep 17 00:00:00 2001 From: Alexey Date: Sat, 26 Jun 2021 16:13:55 +0000 Subject: [PATCH 422/931] Aliases added --- .../aggregate-functions/reference/median.md | 1 + .../aggregate-functions/reference/quantiles.md | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index b4f38a9b562..b15a97f0992 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -12,6 +12,7 @@ Functions: - `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). - `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). - `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). +- `medianBFloat16` — Alias for [quantileBFloat16](#quantilebfloat16). **Example** diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index abce6a9e7f0..49a2df57bf5 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -6,4 +6,12 @@ toc_priority: 201 Syntax: `quantiles(level1, level2, …)(x)` -All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. +All the quantile functions also have corresponding quantiles functions. They calculate quantiles of all listed levels in one pass, and return them as an array. + +- `quantiles`; +- `quantilesDeterministic`; +- `quantilesTiming`; +- `quantilesTimingWeighted`; +- `quantilesExact`; +- `quantilesExactWeighted`; +- `quantilesTDigest`. From ccddb60f453a9020e533b457710b1e77414d6e7c Mon Sep 17 00:00:00 2001 From: Alexey Date: Sat, 26 Jun 2021 16:15:37 +0000 Subject: [PATCH 423/931] Removed comments --- .../aggregate-functions/reference/quantilebfloat16.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 00bec8e00db..9e18dff423e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -4,16 +4,6 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} - - -# Is there a BFloat16 data type in ClickHouse? - # How conversion to BFloat16 is made? -# Does quantile calculations have general implementation? And some methods are implemented to support BFloat16? -# Is quantile calculation is really table based as stated somewhere in PR - -# Perhaps add quantilesBFloat16 to page quantiles.md -# Add alias to page median.md - Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. The function converts input values to 32-bit floats and then take the most significant 16 bits. Then it calculates the histogram of these values. Calculated bfloat16 value is converted to 64-bit float data type by appending zero bits. The function is a fast quantile estimator with a relative error no more than 0.390625%. From 7b3996c603888b1999750613cf7cc49829893d98 Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Sat, 26 Jun 2021 21:52:37 +0400 Subject: [PATCH 424/931] Remove manual flag adjusting - cross compilations should be configured automatically --- cmake/embed_binary.cmake | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/cmake/embed_binary.cmake b/cmake/embed_binary.cmake index a87c63d714e..d15962c05d4 100644 --- a/cmake/embed_binary.cmake +++ b/cmake/embed_binary.cmake @@ -49,19 +49,6 @@ macro(clickhouse_embed_binaries) # Generate the configured assembly file in the output directory. configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY) - # If cross-compiling, ensure we use the toolchain file and target the actual target architecture. - if(CMAKE_CROSSCOMPILING) - set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "--target=${CMAKE_C_COMPILER_TARGET}") - - # FIXME: find a way to properly pass all cross-compile flags. - if(OS_DARWIN) - set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-isysroot ${CMAKE_OSX_SYSROOT}") - set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}") - else() - set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY COMPILE_FLAGS "-isysroot ${CMAKE_SYSROOT}") - endif() - endif() - # Set the include directory for relative paths specified for `.incbin` directive. set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}") From d48f5227ea3182957dae190bb58177933d7a408d Mon Sep 17 00:00:00 2001 From: Alexey Date: Sat, 26 Jun 2021 20:04:29 +0000 Subject: [PATCH 425/931] Fixes and updates --- .../reference/quantilebfloat16.md | 15 ++++++++++----- .../aggregate-functions/reference/quantiles.md | 3 ++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 9e18dff423e..87b7e96dd7e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -4,8 +4,8 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} -Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. -bfloat16 is a floating point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. The function converts input values to 32-bit floats and then take the most significant 16 bits. Then it calculates the histogram of these values. Calculated bfloat16 value is converted to 64-bit float data type by appending zero bits. +Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. +The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates the histogram of these values. Resulting value is converted to 64-bit float by appending zero bits. The function is a fast quantile estimator with a relative error no more than 0.390625%. **Syntax** @@ -18,11 +18,11 @@ Alias: `medianBFloat16` **Arguments** -- `expr` — sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md). +- `expr` — Column with numeric data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md). **Parameters** -- `level` — Level of quantile. Optional. Possible values are in a range from 0 to 1. Default value: 0.5. [Float](../../../sql-reference/data-types/float.md). +- `level` — Level of quantile. Optional. Possible values are in the range from 0 to 1. Default value: 0.5. [Float](../../../sql-reference/data-types/float.md). **Returned value** @@ -55,4 +55,9 @@ Result: │ [3] │ [1] │ └────────────────────────────┴────────────────────────────┘ ``` -Note that all floating point values were truncated to 1.0 when converting to bfloat16. +Note that all floating point values in the example are truncated to 1.0 when converting to bfloat16. + +**See Also** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 49a2df57bf5..766766d2f94 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -14,4 +14,5 @@ All the quantile functions also have corresponding quantiles functions. They cal - `quantilesTimingWeighted`; - `quantilesExact`; - `quantilesExactWeighted`; -- `quantilesTDigest`. +- `quantilesTDigest`; +- `quantilesBFloat16`; From 024cf55252da50dc7cba93370f0185089871a55e Mon Sep 17 00:00:00 2001 From: Alexey Date: Sat, 26 Jun 2021 20:10:24 +0000 Subject: [PATCH 426/931] toc_priority set on median page to place it after quantiles --- docs/en/sql-reference/aggregate-functions/reference/median.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index b15a97f0992..b309b20fd5f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -1,3 +1,7 @@ +--- +toc_priority: 212 +--- + # median {#median} The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample. From 5e5aa409eed0f2301582ffbac9c352bd08739f14 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Sun, 27 Jun 2021 01:00:39 +0300 Subject: [PATCH 427/931] Apply suggestions from code review Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/operations/settings/settings.md | 24 ++++++++--------- .../aggregate-functions/reference/count.md | 2 +- docs/en/sql-reference/data-types/map.md | 26 +++---------------- .../functions/array-functions.md | 6 ++--- .../functions/tuple-map-functions.md | 4 +-- docs/en/sql-reference/operators/index.md | 4 +-- 6 files changed, 24 insertions(+), 42 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 6190b9b030b..5cb10720cf9 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1729,23 +1729,23 @@ Default value: 0. ## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} -Optimizes functions (if possible) by transforming them to read the subcolumns. This reduces the amount of read data. +Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read. -These function can be tranformed: +These functions can be transformed: -- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). -- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). -- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read subcolumn [size0](../../sql-reference/data-types/array.md#array-size). -- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). -- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). -- [count](../../sql-reference/aggregate-functions/reference/count.md) to read subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). -- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). -- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). +- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. +- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. +- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. +- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. +- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. +- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. +- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#subcolumn-keys) subcolumn. +- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#subcolumn-values) subcolumn. Possible values: -- 0 — Disabled. -- 1 — Enabled. +- 0 — Optimization disabled. +- 1 — Optimization enabled. Default value: `0`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 6f55d3b5cee..9356d0aab46 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -31,7 +31,7 @@ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this const The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../../sql-reference/data-types/nullable.md#finding-null). +The `SELECT count() FROM table` query can be optimized by enabling the [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole table data. **Examples** diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index dc1a9846d22..dff74b0cef4 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -76,9 +76,9 @@ SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map └───────────────────────────────┘ ``` -## Subcolumn Map.keys {#subcolumn-keys} +## Map.keys and Map.values Subcolumns {#map-subcolumns} -To read all keys of a `Map` you can use the subcolumn `keys`, which doesn't read the whole column. +To optimize `Map` column processing, in some cases you can use the `keys` and 'values' subcolumns instead of reading the whole column. **Example** @@ -90,6 +90,8 @@ CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory; INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3)); SELECT a.keys FROM t_map; + +SELECT a.values FROM t_map; ``` Result: @@ -98,27 +100,7 @@ Result: ┌─a.keys─────────────────┐ │ ['key1','key2','key3'] │ └────────────────────────┘ -``` -## Subcolumn Map.values {#subcolumn-keys} - -To read all values of a `Map` you can use the subcolumn `values`, which doesn't read the whole column. - -**Example** - -Query: - -``` sql -CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory; - -INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3)) - -SELECT a.values FROM t_map; -``` - -Result: - -``` text ┌─a.values─┐ │ [1,2,3] │ └──────────┘ diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 10b8500b571..6f7a2d63318 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -11,7 +11,7 @@ Returns 1 for an empty array, or 0 for a non-empty array. The result type is UInt8. The function also works for strings. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. ## notEmpty {#function-notempty} @@ -19,7 +19,7 @@ Returns 0 for an empty array, or 1 for a non-empty array. The result type is UInt8. The function also works for strings. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. ## length {#array_functions-length} @@ -27,7 +27,7 @@ Returns the number of items in the array. The result type is UInt64. The function also works for strings. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [size0](../../sql-reference/data-types/array.md#array-size). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. ## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 2deb9323cff..b506ce7f190 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -220,7 +220,7 @@ Result: Returns all keys from the `map` parameter. -Can be optimized by setting the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [keys](../../sql-reference/data-types/map.md#subcolumn-keys). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn instead of reading and processing the whole column data. **Syntax** @@ -263,7 +263,7 @@ Result: Returns all values from the `map` parameter. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [values](../../sql-reference/data-types/map.md#subcolumn-values). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn instead of reading and processing the whole column data. **Syntax** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 1cb7936969c..9fa84c9eaae 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -283,7 +283,7 @@ ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. - `0` otherwise. - For other values, the `IS NULL` operator always returns `0`. -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. @@ -316,4 +316,4 @@ SELECT * FROM t_null WHERE y IS NOT NULL └───┴───┘ ``` -Can be optimized by the setting [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). With `optimize_functions_to_subcolumns = 1` the function reads subcolumn [null](../../sql-reference/data-types/nullable.md#finding-null). +Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. From 159de92197ec6426588763190679caa3ec4186d3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 26 Jun 2021 22:05:20 +0000 Subject: [PATCH 428/931] Uncomment test --- .../test.py | 174 +++++++++--------- 1 file changed, 87 insertions(+), 87 deletions(-) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index f19a5cf2467..685ed85d8f4 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -637,93 +637,93 @@ def test_virtual_columns(started_cluster): drop_materialized_db() -#def test_multiple_databases(started_cluster): -# instance.query("DROP DATABASE IF EXISTS test_database_1") -# instance.query("DROP DATABASE IF EXISTS test_database_2") -# NUM_TABLES = 5 -# -# conn = get_postgres_conn() -# cursor = conn.cursor() -# create_postgres_db(cursor, 'postgres_database_1') -# create_postgres_db(cursor, 'postgres_database_2') -# -# conn1 = get_postgres_conn(True, True, 'postgres_database_1') -# conn2 = get_postgres_conn(True, True, 'postgres_database_2') -# -# cursor1 = conn1.cursor() -# cursor2 = conn2.cursor() -# -# create_clickhouse_postgres_db('postgres_database_1') -# create_clickhouse_postgres_db('postgres_database_2') -# -# cursors = [cursor1, cursor2] -# for cursor_id in range(len(cursors)): -# for i in range(NUM_TABLES): -# table_name = 'postgresql_replica_{}'.format(i) -# create_postgres_table(cursors[cursor_id], table_name); -# instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) -# print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) -# print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) -# -# create_materialized_db('test_database_1', 'postgres_database_1') -# create_materialized_db('test_database_2', 'postgres_database_2') -# -# cursors = [cursor1, cursor2] -# for cursor_id in range(len(cursors)): -# for i in range(NUM_TABLES): -# table_name = 'postgresql_replica_{}'.format(i) -# instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) -# -# for cursor_id in range(len(cursors)): -# for i in range(NUM_TABLES): -# table_name = 'postgresql_replica_{}'.format(i) -# check_tables_are_synchronized( -# table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); -# -# drop_clickhouse_postgres_db('postgres_database_1') -# drop_clickhouse_postgres_db('postgres_database_2') -# drop_materialized_db('test_database_1') -# drop_materialized_db('test_database_2') -# -# -#@pytest.mark.timeout(320) -#def test_concurrent_transactions(started_cluster): -# instance.query("DROP DATABASE IF EXISTS test_database") -# conn = get_postgres_conn(True) -# cursor = conn.cursor() -# NUM_TABLES = 6 -# -# for i in range(NUM_TABLES): -# create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); -# -# def transaction(thread_id): -# conn_ = get_postgres_conn(True, auto_commit=False) -# cursor_ = conn.cursor() -# for query in queries: -# cursor_.execute(query.format(thread_id)) -# print('thread {}, query {}'.format(thread_id, query)) -# conn_.commit() -# -# threads = [] -# threads_num = 6 -# for i in range(threads_num): -# threads.append(threading.Thread(target=transaction, args=(i,))) -# -# create_materialized_db() -# -# for thread in threads: -# time.sleep(random.uniform(0, 0.5)) -# thread.start() -# for thread in threads: -# thread.join() -# -# for i in range(NUM_TABLES): -# check_tables_are_synchronized('postgresql_replica_{}'.format(i)); -# count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) -# count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) -# print(int(count1), int(count2), sep=' ') -# assert(int(count1) == int(count2)) -# drop_materialized_db() +def test_multiple_databases(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database_1") + instance.query("DROP DATABASE IF EXISTS test_database_2") + NUM_TABLES = 5 + + conn = get_postgres_conn() + cursor = conn.cursor() + create_postgres_db(cursor, 'postgres_database_1') + create_postgres_db(cursor, 'postgres_database_2') + + conn1 = get_postgres_conn(True, True, 'postgres_database_1') + conn2 = get_postgres_conn(True, True, 'postgres_database_2') + + cursor1 = conn1.cursor() + cursor2 = conn2.cursor() + + create_clickhouse_postgres_db('postgres_database_1') + create_clickhouse_postgres_db('postgres_database_2') + + cursors = [cursor1, cursor2] + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + create_postgres_table(cursors[cursor_id], table_name); + instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) + print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) + print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) + + create_materialized_db('test_database_1', 'postgres_database_1') + create_materialized_db('test_database_2', 'postgres_database_2') + + cursors = [cursor1, cursor2] + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) + + for cursor_id in range(len(cursors)): + for i in range(NUM_TABLES): + table_name = 'postgresql_replica_{}'.format(i) + check_tables_are_synchronized( + table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); + + drop_clickhouse_postgres_db('postgres_database_1') + drop_clickhouse_postgres_db('postgres_database_2') + drop_materialized_db('test_database_1') + drop_materialized_db('test_database_2') + + +@pytest.mark.timeout(320) +def test_concurrent_transactions(started_cluster): + instance.query("DROP DATABASE IF EXISTS test_database") + conn = get_postgres_conn(True) + cursor = conn.cursor() + NUM_TABLES = 6 + + for i in range(NUM_TABLES): + create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); + + def transaction(thread_id): + conn_ = get_postgres_conn(True, auto_commit=False) + cursor_ = conn.cursor() + for query in queries: + cursor_.execute(query.format(thread_id)) + print('thread {}, query {}'.format(thread_id, query)) + conn_.commit() + + threads = [] + threads_num = 6 + for i in range(threads_num): + threads.append(threading.Thread(target=transaction, args=(i,))) + + create_materialized_db() + + for thread in threads: + time.sleep(random.uniform(0, 0.5)) + thread.start() + for thread in threads: + thread.join() + + for i in range(NUM_TABLES): + check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) + count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) + print(int(count1), int(count2), sep=' ') + assert(int(count1) == int(count2)) + drop_materialized_db() if __name__ == '__main__': From e9e941f6efc410a7c6e3728b8eb01c4de21bd9bb Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Sat, 26 Jun 2021 22:23:14 +0000 Subject: [PATCH 429/931] fix --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 2b03f32dccc..9bd0163b58a 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -239,3 +239,4 @@ 01870_modulo_partition_key 01880_remote_ipv6 01882_check_max_parts_to_merge_at_once +01914_exchange_dictionaries From e7af8ee31b482eb29bb6ccf28c8a460a62c99776 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Sun, 27 Jun 2021 02:09:30 +0300 Subject: [PATCH 430/931] Apply suggestions from code review Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/aggregate-functions/reference/count.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 9356d0aab46..78ab20151b7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -29,8 +29,6 @@ In both cases the type of the returned value is [UInt64](../../../sql-reference/ ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function. -The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. - The `SELECT count() FROM table` query can be optimized by enabling the [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole table data. **Examples** From 96b2c054da05e9f496488af1431b897e677165cd Mon Sep 17 00:00:00 2001 From: George Date: Sun, 27 Jun 2021 02:37:34 +0300 Subject: [PATCH 431/931] Added translation --- docs/en/operations/settings/settings.md | 4 +-- docs/en/sql-reference/data-types/map.md | 2 +- docs/ru/operations/settings/settings.md | 22 +++++++++++++ .../aggregate-functions/reference/count.md | 2 +- docs/ru/sql-reference/data-types/map.md | 31 +++++++++++++++++++ .../functions/array-functions.md | 6 ++++ .../functions/tuple-map-functions.md | 4 +++ docs/ru/sql-reference/operators/index.md | 4 +++ 8 files changed, 71 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 5cb10720cf9..52556c581a0 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1739,8 +1739,8 @@ These functions can be transformed: - [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. - [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. - [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. -- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#subcolumn-keys) subcolumn. -- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#subcolumn-values) subcolumn. +- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn. +- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn. Possible values: diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index dff74b0cef4..86ea55004fd 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -78,7 +78,7 @@ SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map ## Map.keys and Map.values Subcolumns {#map-subcolumns} -To optimize `Map` column processing, in some cases you can use the `keys` and 'values' subcolumns instead of reading the whole column. +To optimize `Map` column processing, in some cases you can use the `keys` and `values` subcolumns instead of reading the whole column. **Example** diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index fd5c9dba43a..d1904dc8617 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1606,6 +1606,28 @@ ClickHouse генерирует исключение Значение по умолчанию: 0. +## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} + +Включает или отключает оптимизацию преобразованием некоторых функций к чтению подстобцов, таким образом уменьшая количество данных для чтения. + +Могут быть преобразованы следующие функции: + +- [length](../../sql-reference/functions/array-functions.md#array_functions-length) к чтению подстолбца [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. +- [empty](../../sql-reference/functions/array-functions.md#function-empty) к чтению подстолбца [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. +- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) к чтению подстолбца [size0](../../sql-reference/data-types/array.md#array-size). +- [isNull](../../sql-reference/operators/index.md#operator-is-null) к чтению подстолбца [null](../../sql-reference/data-types/nullable.md#finding-null). +- [isNotNull](../../sql-reference/operators/index.md#is-not-null) к чтению подстолбца [null](../../sql-reference/data-types/nullable.md#finding-null). +- [count](../../sql-reference/aggregate-functions/reference/count.md) к чтению подстолбца [null](../../sql-reference/data-types/nullable.md#finding-null). +- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) к чтению подстолбца [keys](../../sql-reference/data-types/map.md#map-subcolumns). +- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) к чтению подстолбца [values](../../sql-reference/data-types/map.md#map-subcolumns). + +Возможные значения: + +- 0 — оптимизация отключена. +- 1 — оптимизация включена. + +Значение по умолчанию: `0`. + ## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life} - Тип: секунды diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index 06cf66bd8bd..da882621085 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -29,7 +29,7 @@ ClickHouse поддерживает следующие виды синтакси ClickHouse поддерживает синтаксис `COUNT(DISTINCT ...)`. Поведение этой конструкции зависит от настройки [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation). Она определяет, какая из функций [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) используется для выполнения операции. По умолчанию — функция [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact). -Запрос `SELECT count() FROM table` не оптимизирован, поскольку количество записей в таблице не хранится отдельно. Он выбирает небольшой столбец из таблицы и подсчитывает количество значений в нём. +Запрос `SELECT count() FROM table` может быть оптимизирован включением настройки optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [null](../../../sql-reference/data-types/nullable.md#finding-null) вместо чтения всех данных таблицы. **Примеры** diff --git a/docs/ru/sql-reference/data-types/map.md b/docs/ru/sql-reference/data-types/map.md index 6cb8ccf1143..a703eb1b0ac 100644 --- a/docs/ru/sql-reference/data-types/map.md +++ b/docs/ru/sql-reference/data-types/map.md @@ -8,6 +8,7 @@ toc_title: Map(key, value) Тип данных `Map(key, value)` хранит пары `ключ:значение`. **Параметры** + - `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md). - `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md). @@ -61,6 +62,36 @@ SELECT a['key3'] FROM table_map; └─────────────────────────┘ ``` +## Подстолбцы Map.keys и Map.values {#map-subcolumns} + +Для оптимизации обработки столбцов `Map` в некоторых случаях можно использовать подстолбцы `keys` и `values` вместо чтения всего столбца. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE t_map (`a` Map(String, UInt64)) ENGINE = Memory; + +INSERT INTO t_map VALUES (map('key1', 1, 'key2', 2, 'key3', 3)); + +SELECT a.keys FROM t_map; + +SELECT a.values FROM t_map; +``` + +Результат: + +``` text +┌─a.keys─────────────────┐ +│ ['key1','key2','key3'] │ +└────────────────────────┘ + +┌─a.values─┐ +│ [1,2,3] │ +└──────────┘ +``` + **См. также** - функция [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 0dfad45605a..cdf1a1a9bbf 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -11,18 +11,24 @@ toc_title: "Массивы" Тип результата - UInt8. Функция также работает для строк. +Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [size0](../../sql-reference/data-types/array.md#array-size) вместо чтения и обработки всего столбца массива. + ## notEmpty {#function-notempty} Возвращает 0 для пустого массива, и 1 для непустого массива. Тип результата - UInt8. Функция также работает для строк. +Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [size0](../../sql-reference/data-types/array.md#array-size) вместо чтения и обработки всего столбца массива. + ## length {#array_functions-length} Возвращает количество элементов в массиве. Тип результата - UInt64. Функция также работает для строк. +Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [size0](../../sql-reference/data-types/array.md#array-size) вместо чтения и обработки всего столбца массива. + ## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} ## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} diff --git a/docs/ru/sql-reference/functions/tuple-map-functions.md b/docs/ru/sql-reference/functions/tuple-map-functions.md index c385dbd8f87..7c3cd706d3c 100644 --- a/docs/ru/sql-reference/functions/tuple-map-functions.md +++ b/docs/ru/sql-reference/functions/tuple-map-functions.md @@ -224,6 +224,8 @@ SELECT mapContains(a, 'name') FROM test; Возвращает все ключи контейнера `map`. +Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [keys](../../sql-reference/data-types/map.md#map-subcolumns) вместо чтения и обработки данных всего столбца. + **Синтаксис** ```sql @@ -265,6 +267,8 @@ SELECT mapKeys(a) FROM test; Возвращает все значения контейнера `map`. +Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [values](../../sql-reference/data-types/map.md#map-subcolumns) вместо чтения и обработки данных всего столбца. + **Синтаксис** ```sql diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index 5cf21b64079..80507f7529b 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -283,6 +283,8 @@ ClickHouse поддерживает операторы `IS NULL` и `IS NOT NULL - `0` в обратном случае. - Для прочих значений оператор `IS NULL` всегда возвращает `0`. +Оператор можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` читается только подстолбец [keys](../../sql-reference/data-types/map.md#map-subcolumns) вместо чтения и обработки данных всего столбца. + ``` sql @@ -302,6 +304,8 @@ SELECT x+100 FROM t_null WHERE y IS NULL - `1`, в обратном случае. - Для прочих значений оператор `IS NOT NULL` всегда возвращает `1`. +Оператор можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` читается только подстолбец [keys](../../sql-reference/data-types/map.md#map-subcolumns) вместо чтения и обработки данных всего столбца. + ``` sql From 1e55b9376ae7951861a79350d81d9a8e8d79c495 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 02:48:11 +0300 Subject: [PATCH 432/931] Silent ANTLR parser test (non production) --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index fcfd5192ce9..ab988d3e543 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -519,7 +519,8 @@ "01924_argmax_bitmap_state", "01913_replace_dictionary", "01914_exchange_dictionaries", - "01915_create_or_replace_dictionary" + "01915_create_or_replace_dictionary", + "01913_names_of_tuple_literal" ], "parallel": [ From 930a67da13e0b1db9f65c58a7d5ee6bfc123052f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 02:49:25 +0300 Subject: [PATCH 433/931] Fix the annoying Arcadia --- tests/queries/0_stateless/arcadia_skip_list.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 82d054a223b..afd11cb5a7d 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -249,3 +249,5 @@ 01824_prefer_global_in_and_join 01576_alias_column_rewrite 01924_argmax_bitmap_state +01914_exchange_dictionaries +01923_different_expression_name_alias From ba67097c0f67c7d3c58fa50de7e7664b86d122a7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 02:54:22 +0300 Subject: [PATCH 434/931] Fix test in database Ordinary --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index ab988d3e543..f010efcf916 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -112,7 +112,8 @@ "00738_lock_for_inner_table", "01153_attach_mv_uuid", /// Sometimes cannot lock file most likely due to concurrent or adjacent tests, but we don't care how it works in Ordinary database. - "rocksdb" + "rocksdb", + "01914_exchange_dictionaries" /// Requires Atomic database ], "database-replicated": [ /// Unclassified From 686bf75f78345cfa2d560581c65e5a97e7e1b605 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 03:11:48 +0300 Subject: [PATCH 435/931] This performance test does not run in CI - remove --- tests/performance/nyc_taxi.xml | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 tests/performance/nyc_taxi.xml diff --git a/tests/performance/nyc_taxi.xml b/tests/performance/nyc_taxi.xml deleted file mode 100644 index b8d9621e3eb..00000000000 --- a/tests/performance/nyc_taxi.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - trips_mergetree - - - SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type - SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count - SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year - SELECT passenger_count, toYear(pickup_date) AS year, round(trip_distance) AS distance, count(*) FROM trips_mergetree GROUP BY passenger_count, year, distance ORDER BY year, count(*) DESC - From ebc2fbfd63471c62f26bb83784cfa21d8243aba4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 03:14:42 +0300 Subject: [PATCH 436/931] Performance test: be more generous --- docker/test/performance-comparison/report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index 13b18cda326..35e1008e0d7 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -561,7 +561,7 @@ if args.report == 'main': # Don't show mildly unstable queries, only the very unstable ones we # treat as errors. if very_unstable_queries: - if very_unstable_queries > 3: + if very_unstable_queries > 5: error_tests += very_unstable_queries status = 'failure' message_array.append(str(very_unstable_queries) + ' unstable') From 80d9346080c3ac064f11bf8f16c647f62f7aa4cb Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 27 Jun 2021 03:20:28 +0300 Subject: [PATCH 437/931] Update StorageSystemDataSkippingIndices.h --- src/Storages/System/StorageSystemDataSkippingIndices.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.h b/src/Storages/System/StorageSystemDataSkippingIndices.h index de8d7de706f..8497f16606f 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.h +++ b/src/Storages/System/StorageSystemDataSkippingIndices.h @@ -1,10 +1,13 @@ #pragma once #include -#include +#include + namespace DB { + +/// For system.data_skipping_indices table - describes the data skipping indices in tables, similar to system.columns. class StorageSystemDataSkippingIndices : public shared_ptr_helper, public IStorage { friend struct shared_ptr_helper; @@ -21,6 +24,7 @@ public: unsigned num_streams) override; protected: - StorageSystemDataSkippingIndices(const StorageID& table_id_); + StorageSystemDataSkippingIndices(const StorageID & table_id_); }; + } From ec738c6170ed96f572bd8c332018b42bf20b08bc Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 27 Jun 2021 03:25:04 +0300 Subject: [PATCH 438/931] Update StorageSystemDataSkippingIndices.cpp --- .../System/StorageSystemDataSkippingIndices.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index 4629fb36b27..5229c685e41 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -87,15 +87,15 @@ protected: if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, database_name, table_name)) continue; - auto const table = tables_it->table(); + const auto table = tables_it->table(); if (!table) continue; StorageMetadataPtr metadata_snapshot = table->getInMemoryMetadataPtr(); if (!metadata_snapshot) continue; - auto const indices = metadata_snapshot->getSecondaryIndices(); + const auto indices = metadata_snapshot->getSecondaryIndices(); - for (auto const& index : indices) + for (const auto & index : indices) { ++rows_count; @@ -175,14 +175,19 @@ Pipe StorageSystemDataSkippingIndices::read( { if (database_name == DatabaseCatalog::TEMPORARY_DATABASE) continue; + + /// Lazy database can contain only very primitive tables, + /// it cannot contain tables with data skipping indices. + /// Skip it to avoid unnecessary tables loading in the Lazy database. if (database->getEngineName() != "Lazy") column->insert(database_name); } + /// Condition on "database" in a query acts like an index. Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; VirtualColumnUtils::filterBlockWithQuery(query_info.query, block, context); - ColumnPtr& filtered_databases = block.getByPosition(0).column; + ColumnPtr & filtered_databases = block.getByPosition(0).column; return Pipe(std::make_shared( std::move(columns_mask), std::move(header), max_block_size, std::move(filtered_databases), context)); } From 0e001329363284c1eb004fe5de2ee9dd654611df Mon Sep 17 00:00:00 2001 From: George Date: Sun, 27 Jun 2021 03:44:26 +0300 Subject: [PATCH 439/931] Small fix --- docs/ru/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index d1904dc8617..346971e2a11 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1608,7 +1608,7 @@ ClickHouse генерирует исключение ## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} -Включает или отключает оптимизацию преобразованием некоторых функций к чтению подстобцов, таким образом уменьшая количество данных для чтения. +Включает или отключает оптимизацию путем преобразования некоторых функций к чтению подстобцов, таким образом уменьшая количество данных для чтения. Могут быть преобразованы следующие функции: From 06242f85e63849f14c4763f7497484d40548b465 Mon Sep 17 00:00:00 2001 From: feng lv Date: Sun, 27 Jun 2021 06:09:23 +0000 Subject: [PATCH 440/931] fix --- src/Interpreters/evaluateConstantExpression.cpp | 6 +++--- src/Interpreters/evaluateConstantExpression.h | 2 +- src/Storages/StorageMerge.cpp | 6 +++++- src/TableFunctions/TableFunctionMerge.cpp | 7 +++++-- .../01902_table_function_merge_db_repr.reference | 4 ++++ .../01902_table_function_merge_db_repr.sql | 14 ++++++++++---- 6 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 93cc1b4516c..2c7740a4036 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -100,7 +100,7 @@ ASTPtr evaluateConstantExpressionForDatabaseName(const ASTPtr & node, ContextPtr return res; } -std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context) +std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context) { if (const auto * func = node->as(); func && func->name == "REGEXP") { @@ -111,11 +111,11 @@ std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, if (!literal || literal->value.safeGet().empty()) throw Exception("Argument for REGEXP in Merge ENGINE should be a non empty String Literal", ErrorCodes::BAD_ARGUMENTS); - return std::tuple{true, literal->value.safeGet()}; + return std::tuple{true, func->arguments->children[0]}; } auto ast = evaluateConstantExpressionForDatabaseName(node, context); - return std::tuple{false, ast->as()->value.safeGet()}; + return std::tuple{false, ast}; } namespace diff --git a/src/Interpreters/evaluateConstantExpression.h b/src/Interpreters/evaluateConstantExpression.h index 70f7bb9bd86..3b817080fe0 100644 --- a/src/Interpreters/evaluateConstantExpression.h +++ b/src/Interpreters/evaluateConstantExpression.h @@ -54,5 +54,5 @@ ASTPtr evaluateConstantExpressionForDatabaseName(const ASTPtr & node, ContextPtr std::optional evaluateExpressionOverConstantCondition(const ASTPtr & node, const ExpressionActionsPtr & target_expr, size_t & limit); // Evaluate database name or regexp for StorageMerge and TableFunction merge -std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context); +std::tuple evaluateDatabaseNameForMergeEngine(const ASTPtr & node, ContextPtr context); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 7af0f5a71b3..3875193938b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -670,7 +670,11 @@ void registerStorageMerge(StorageFactory & factory) " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - auto [is_regexp, source_database_name_or_regexp] = evaluateDatabaseNameForMergeEngine(engine_args[0], args.getLocalContext()); + auto [is_regexp, database_ast] = evaluateDatabaseNameForMergeEngine(engine_args[0], args.getLocalContext()); + + if (!is_regexp) + engine_args[0] = database_ast; + String source_database_name_or_regexp = database_ast->as().value.safeGet(); engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.getLocalContext()); String table_name_regexp = engine_args[1]->as().value.safeGet(); diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 85e338a3a8c..eb93386e2e9 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -49,10 +49,13 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr " - name of source database and regexp for table names.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - auto [is_regexp, source_database_name_or_regexp_] = evaluateDatabaseNameForMergeEngine(args[0], context); + auto [is_regexp, database_ast] = evaluateDatabaseNameForMergeEngine(args[0], context); database_is_regexp = is_regexp; - source_database_name_or_regexp = source_database_name_or_regexp_; + + if (!is_regexp) + args[0] = database_ast; + source_database_name_or_regexp = database_ast->as().value.safeGet(); args[1] = evaluateConstantExpressionAsLiteral(args[1], context); source_table_regexp = args[1]->as().value.safeGet(); diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference index 18cd6b85306..4fd27ceec77 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.reference @@ -40,6 +40,8 @@ SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n 01902_db3 t3 7 01902_db3 t3 8 01902_db3 t3 9 +SHOW CREATE TABLE 01902_db.t_merge +CREATE TABLE `01902_db`.t_merge\n(\n `n` Int8\n)\nENGINE = Merge(REGEXP(\'^01902_db\'), \'^t\') SELECT _database, _table, n FROM merge(REGEXP(^01902_db), ^t) ORDER BY _database, _table, n 01902_db t 0 01902_db t 1 @@ -178,6 +180,8 @@ SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, 01902_db1 t1 7 01902_db1 t1 8 01902_db1 t1 9 +SHOW CREATE TABLE 01902_db.t_merge_1 +CREATE TABLE `01902_db`.t_merge_1\n(\n `n` Int8\n)\nENGINE = Merge(\'01902_db1\', \'^t\') SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n 01902_db1 t1 0 01902_db1 t1 1 diff --git a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql index 22b8f6879c8..3aabf1a1f36 100644 --- a/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql +++ b/tests/queries/0_stateless/01902_table_function_merge_db_repr.sql @@ -24,6 +24,9 @@ CREATE TABLE 01902_db.t_merge as 01902_db.t ENGINE=Merge(REGEXP('^01902_db'), '^ SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge ORDER BY _database, _table, n; +SELECT 'SHOW CREATE TABLE 01902_db.t_merge'; +SHOW CREATE TABLE 01902_db.t_merge; + SELECT 'SELECT _database, _table, n FROM merge(REGEXP(^01902_db), ^t) ORDER BY _database, _table, n'; SELECT _database, _table, n FROM merge(REGEXP('^01902_db'), '^t') ORDER BY _database, _table, n; @@ -51,10 +54,13 @@ CREATE TABLE 01902_db.t_merge_1 as 01902_db.t ENGINE=Merge(currentDatabase(), '^ SELECT 'SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n'; SELECT _database, _table, n FROM 01902_db.t_merge_1 ORDER BY _database, _table, n; +SELECT 'SHOW CREATE TABLE 01902_db.t_merge_1'; +SHOW CREATE TABLE 01902_db.t_merge_1; + SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; --- DROP DATABASE 01902_db; --- DROP DATABASE 01902_db1; --- DROP DATABASE 01902_db2; --- DROP DATABASE 01902_db3; +DROP DATABASE 01902_db; +DROP DATABASE 01902_db1; +DROP DATABASE 01902_db2; +DROP DATABASE 01902_db3; From e87c2c31477f8cd35556db01d80f790bc2343406 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:25:16 +0300 Subject: [PATCH 441/931] Update docs/en/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 19e0b9b9ceb..9f882fb7b36 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -43,7 +43,7 @@ Supports multiple replicas that must be listed by `|` and shards must be listed CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ``` -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way several times. +When specifying replicas, one of the available replicas is selected for each of the shards when reading. If the connection fails, the next replica is selected, and so on for all the replicas. If the connection attempt fails for all the replicas, the attempt is repeated the same way several times. You can specify up to any number of shards and to any number of replicas for each shard. From 020a82b96f4dd667967b07bb066a96b5ec046fc0 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:25:34 +0300 Subject: [PATCH 442/931] Update docs/en/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 9f882fb7b36..6f83bbe0cfc 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -45,7 +45,7 @@ CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGI When specifying replicas, one of the available replicas is selected for each of the shards when reading. If the connection fails, the next replica is selected, and so on for all the replicas. If the connection attempt fails for all the replicas, the attempt is repeated the same way several times. -You can specify up to any number of shards and to any number of replicas for each shard. +You can specify any number of shards and any number of replicas for each shard. **See Also** From 775454bfe0912e4f534d6ce569ba3b2f56abe437 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:25:52 +0300 Subject: [PATCH 443/931] Update docs/en/engines/table-engines/integrations/mysql.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/engines/table-engines/integrations/mysql.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index b4664e257a1..5db2104a6c4 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -55,7 +55,7 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL s The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. -Supports multiple replicas that must be listed by a character `|`. For example: +Supports multiple replicas that must be listed by `|`. For example: ```sql CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL(`mysql{2|3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); From 6efe22d67ef70e999b875ce335df21ba5dd17e7b Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:26:03 +0300 Subject: [PATCH 444/931] Update docs/en/engines/table-engines/integrations/postgresql.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 88ed3613d01..81849052357 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -50,7 +50,7 @@ PostgreSQL `Array` types are converted into ClickHouse arrays. !!! info "Note" Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. -Supports multiple replicas that must be listed by a character `|`. For example: +Supports multiple replicas that must be listed by `|`. For example: ```sql CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); From 92726cf8e2b04af6dbfb72d973a4420af7d71204 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:26:12 +0300 Subject: [PATCH 445/931] Update docs/en/sql-reference/table-functions/mysql.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/mysql.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index 016e78de68b..22e87e78cb1 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -39,7 +39,7 @@ Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on t The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. -Supports multiple replicas that must be listed by a character `|`. For example: +Supports multiple replicas that must be listed by `|`. For example: ```sql SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); From 8766a981b23f86ceb9a33cbc610a3d17797918d4 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:26:20 +0300 Subject: [PATCH 446/931] Update docs/en/sql-reference/table-functions/postgresql.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/table-functions/postgresql.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 313093dccf5..7ffe97d98af 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -44,7 +44,7 @@ PostgreSQL Array types converts into ClickHouse arrays. !!! info "Note" Be careful, in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. -Supports multiple replicas that must be listed by a character `|`. For example: +Supports multiple replicas that must be listed by `|`. For example: ```sql SELECT DISTINCT(name) FROM postgresql(`postgres{1|2|3}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); From 18b918f2adecd21a9372307b66093681cd6049e3 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:26:34 +0300 Subject: [PATCH 447/931] Update docs/ru/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index f50f671d589..21d3920066a 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -20,7 +20,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). -Структура таблицы может отличаться от исходной структуры таблицы: +Структура таблицы может отличаться от структуры исходной таблицы: - Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. From f89d396568f8f21df6cdc40d74ea6cc5637bbb37 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:26:49 +0300 Subject: [PATCH 448/931] Update docs/ru/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 21d3920066a..6793df683a8 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -22,7 +22,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от структуры исходной таблицы: -- Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Имена столбцов должны быть такими же, как в исходной таблице, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. - Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. From fa0d65a4d1560553f5bab3167338d7c2891933e4 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:27:23 +0300 Subject: [PATCH 449/931] Update docs/ru/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 6793df683a8..72d511d912f 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать столбцы Nullable и будет вместо NULL выставлять значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** From 6319175dea5fd9a6e44ad2fe6cb6c43662f0a6a2 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Sun, 27 Jun 2021 12:27:54 +0300 Subject: [PATCH 450/931] Update docs/ru/engines/table-engines/integrations/ExternalDistributed.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 72d511d912f..22aae75ebbf 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -43,7 +43,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) ENGINE = ExternalDistributed('MySQL', `mysql{1|2}:3306,mysql{3|4}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ``` -При указании реплик для каждого из шардов при чтении будет выбрана одна из доступных реплик. Если соединиться не удалось, то будет выбрана следующая реплика, и так для всех реплик. Если попытка соединения для всех реплик не удалась, то будут снова произведены попытки соединения по кругу и так несколько раз. +При указании реплик для каждого из шардов при чтении выбирается одна из доступных реплик. Если соединиться не удалось, то выбирается следующая реплика, и так для всех реплик. Если попытка соединения не удалась для всех реплик, то сервер ClickHouse снова пытается соединиться с одной из реплик, перебирая их по кругу, и так несколько раз. Вы можете указать любое количество шардов и любое количество реплик для каждого шарда. From ed262d423f5f8f387d565a1288a9cf7d1dd793ff Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Sun, 27 Jun 2021 13:22:49 +0300 Subject: [PATCH 451/931] Update docs/ru/sql-reference/aggregate-functions/reference/count.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/aggregate-functions/reference/count.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index da882621085..9b753c03d24 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -29,7 +29,7 @@ ClickHouse поддерживает следующие виды синтакси ClickHouse поддерживает синтаксис `COUNT(DISTINCT ...)`. Поведение этой конструкции зависит от настройки [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation). Она определяет, какая из функций [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) используется для выполнения операции. По умолчанию — функция [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact). -Запрос `SELECT count() FROM table` может быть оптимизирован включением настройки optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [null](../../../sql-reference/data-types/nullable.md#finding-null) вместо чтения всех данных таблицы. +Запрос `SELECT count() FROM table` может быть оптимизирован включением настройки [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [null](../../../sql-reference/data-types/nullable.md#finding-null) вместо чтения всех данных таблицы. **Примеры** @@ -68,4 +68,3 @@ SELECT count(DISTINCT num) FROM t ``` Этот пример показывает, что `count(DISTINCT num)` выполняется с помощью функции `uniqExact` в соответствии со значением настройки `count_distinct_implementation`. - From 9674669726d9dfa9159368e74fd5de4c8f131d24 Mon Sep 17 00:00:00 2001 From: George Date: Sun, 27 Jun 2021 13:25:30 +0300 Subject: [PATCH 452/931] typo --- docs/ru/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 346971e2a11..eb78f37d87f 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1608,7 +1608,7 @@ ClickHouse генерирует исключение ## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns} -Включает или отключает оптимизацию путем преобразования некоторых функций к чтению подстобцов, таким образом уменьшая количество данных для чтения. +Включает или отключает оптимизацию путем преобразования некоторых функций к чтению подстолбцов, таким образом уменьшая объем данных для чтения. Могут быть преобразованы следующие функции: From 45a4ea7a686240d06d2c9c1f0a716c30cf911248 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 14:01:51 +0300 Subject: [PATCH 453/931] Delete the external_table_functions_use_nulls setting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Удалил настройку external_table_functions_use_nulls из описания движка ExternalDistributed. --- .../en/engines/table-engines/integrations/ExternalDistributed.md | 1 - .../ru/engines/table-engines/integrations/ExternalDistributed.md | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 6f83bbe0cfc..39fea30f98f 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -24,7 +24,6 @@ The table structure can differ from the original table structure: - Column names should be the same as in the original table, but you can use just some of these columns and in any order. - Column types may differ from those in the original table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- The `external_table_functions_use_nulls` setting defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 22aae75ebbf..4c81f242f65 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -24,7 +24,6 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать столбцы Nullable и будет вместо NULL выставлять значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** From 07afd0746b1fe8be43ffd8b9ab579b239b45b547 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 14:07:24 +0300 Subject: [PATCH 454/931] Replace links MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Заменил линки в cast. --- docs/en/engines/table-engines/integrations/mysql.md | 2 +- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- docs/ru/engines/table-engines/integrations/mysql.md | 2 +- docs/ru/engines/table-engines/integrations/postgresql.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 5db2104a6c4..1b6d1893cc2 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -28,7 +28,7 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen The table structure can differ from the original MySQL table structure: - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. -- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types. - Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 81849052357..5d9516aeefc 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -23,7 +23,7 @@ See a detailed description of the [CREATE TABLE](../../../sql-reference/statemen The table structure can differ from the original PostgreSQL table structure: - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. -- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../engines/database-engines/postgresql.md#data_types-support) values to the ClickHouse data types. - Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 0951077937e..e27f63b9a62 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от исходной структуры таблицы MySQL: - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../engines/database-engines/mysql.md#data_types-support) значения к типам данных ClickHouse. - Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index c4d11a81f22..4cc93beaf08 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: - Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../engines/database-engines/postgresql.md#data_types-support) значения к типам данных ClickHouse. - Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** From 40095a9bfe6333923fc8c6df81e57e426c5fb8f5 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 27 Jun 2021 15:03:56 +0300 Subject: [PATCH 455/931] Update StorageSystemDataSkippingIndices.cpp --- src/Storages/System/StorageSystemDataSkippingIndices.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index 5229c685e41..7a6ce4ec519 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -8,6 +8,8 @@ #include #include #include +#include + namespace DB { From 4b994fc3c75334380838bd3457136d7e743ae7c4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 15:38:08 +0300 Subject: [PATCH 456/931] Change error code in LIVE VIEW --- src/Storages/LiveView/StorageLiveView.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index 19c3992276f..e1da02c5243 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -49,7 +49,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int INCORRECT_QUERY; extern const int TABLE_WAS_NOT_DROPPED; extern const int QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW; @@ -84,7 +83,7 @@ static StorageID extractDependentTable(ASTPtr & query, ContextPtr context, const if (!ast_select) throw Exception("Logical error while creating StorageLiveView." " Could not retrieve table name from select query.", - DB::ErrorCodes::LOGICAL_ERROR); + DB::ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW); if (ast_select->list_of_selects->children.size() != 1) throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW); From 4dbd659d64218733234a0ce98424fab5c8a4b9f9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 15:40:07 +0300 Subject: [PATCH 457/931] Change error code in LIVE VIEW --- src/Storages/LiveView/StorageLiveView.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index e1da02c5243..f54abda6d7f 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -81,8 +81,7 @@ static StorageID extractDependentTable(ASTPtr & query, ContextPtr context, const { auto * ast_select = subquery->as(); if (!ast_select) - throw Exception("Logical error while creating StorageLiveView." - " Could not retrieve table name from select query.", + throw Exception("LIVE VIEWs are only supported for queries from tables, but there is no table name in select query.", DB::ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW); if (ast_select->list_of_selects->children.size() != 1) throw Exception("UNION is not supported for LIVE VIEW", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW); From 1129b85f3c443b655fc7eb1beb6cb46de52f60e1 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 15:40:19 +0300 Subject: [PATCH 458/931] Create the external_table_functions_use_nulls setting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задокументировал настройку external_table_functions_use_nulls. --- docs/en/operations/settings/settings.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 08cf9daeb28..c584776a3b2 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3145,4 +3145,19 @@ SETTINGS index_granularity = 8192 │ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` +## external_table_functions_use_nulls {#external-table-functions-use-nulls} + +Defines how [mysql](../../sql-reference/table-functions/mysql.md), [postgresql](../../sql-reference/table-functions/postgresql.md) and [odbc](../../sql-reference/table-functions/odbc.md)] table functions use Nullable columns. + +Possible values: + +- 0 — The table function explicitly uses Nullable columns. +- 1 — The table function implicitly uses Nullable columns. + +Default value: `1`. + +**Usage** + +If the setting is set to `0`, the table function does not make Nullable columns and insert default values instead of NULL. This is also applicable for NULL values inside arrays. + [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) From a21577925692de75b08eea22c911947e31a1dc62 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 16:04:35 +0300 Subject: [PATCH 459/931] Translate to Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Перевел на русский язык. --- docs/en/operations/settings/settings.md | 2 -- docs/ru/operations/settings/settings.md | 15 ++++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c584776a3b2..d5ba2d5a653 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3159,5 +3159,3 @@ Default value: `1`. **Usage** If the setting is set to `0`, the table function does not make Nullable columns and insert default values instead of NULL. This is also applicable for NULL values inside arrays. - -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index be3695badc5..06d6f2b8b61 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -3023,4 +3023,17 @@ SETTINGS index_granularity = 8192 │ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) +## external_table_functions_use_nulls {#external-table-functions-use-nulls} + +Определяет, как табличные функции [mysql](../../sql-reference/table-functions/mysql.md), [postgresql](../../sql-reference/table-functions/postgresql.md) и [odbc](../../sql-reference/table-functions/odbc.md)] используют Nullable столбцы. + +Возможные значения: + +- 0 — табличная функция явно использует Nullable столбцы. +- 1 — табличная функция неявно использует Nullable столбцы. + +Значение по умолчанию: `1`. + +**Использование** + +Если установлено значение `0`, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. From 9ab463efddc1fb4df584a1a44e1dc5e88ec95e9f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 17:49:26 +0300 Subject: [PATCH 460/931] utils/generate-ya-make/generate-ya-make.sh --- src/Storages/ya.make | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/ya.make b/src/Storages/ya.make index d8692524210..6e412cddba7 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -158,6 +158,7 @@ SRCS( System/StorageSystemContributors.generated.cpp System/StorageSystemCurrentRoles.cpp System/StorageSystemDDLWorkerQueue.cpp + System/StorageSystemDataSkippingIndices.cpp System/StorageSystemDataTypeFamilies.cpp System/StorageSystemDatabases.cpp System/StorageSystemDetachedParts.cpp From f6e67d3dc1ed06df521dbd5ff1838d5ee836dbe0 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 27 Jun 2021 18:22:34 +0300 Subject: [PATCH 461/931] Update StorageDistributed.cpp --- src/Storages/StorageDistributed.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index d43fd1532a1..15b1421c635 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -801,6 +801,8 @@ void StorageDistributed::startup() return; const auto & disks = data_volume->getDisks(); + + /// Make initialization for large number of disks parallel. ThreadPool pool(disks.size()); for (const DiskPtr & disk : disks) From 45c4b62c3b4485494d0d9d33d0a8ff09fca9d5c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 18:41:25 +0300 Subject: [PATCH 462/931] Separate log files for separate runs in stress test --- docker/test/stress/run.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 8016b2c59f3..a2bcc7cde09 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -37,6 +37,17 @@ function stop() function start() { + # Rename existing log file - it will be more convenient to read separate files for separate server runs. + if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ] + then + log_file_counter=1 + while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ] + do + log_file_counter=$((log_file_counter + 1)) + done + mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" + fi + counter=0 until clickhouse-client --query "SELECT 1" do @@ -140,7 +151,11 @@ zgrep -Fa "########################################" /test_output/* > /dev/null && echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv # Put logs into /test_output/ -pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz +for log_file in /var/log/clickhouse-server/clickhouse-server.log* +do + pigz < $log_file > /test_output/$(basename $log_file).gz +done + tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: mv /var/log/clickhouse-server/stderr.log /test_output/ tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: From 887be640f90c84308754170c131ecb83f6da05db Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 18:54:21 +0300 Subject: [PATCH 463/931] Fix slow performance test --- tests/performance/quantile.xml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/performance/quantile.xml b/tests/performance/quantile.xml index 4718c129db6..120166b600d 100644 --- a/tests/performance/quantile.xml +++ b/tests/performance/quantile.xml @@ -1,6 +1,7 @@ hits_100m_single + hits_10m_single @@ -10,9 +11,17 @@ SearchEngineID RegionID SearchPhrase + + + + + + key_slow + ClientIP + func @@ -26,4 +35,5 @@ SELECT {key} AS k, {func}(ResolutionWidth) FROM hits_100m_single GROUP BY k FORMAT Null + SELECT {key_slow} AS k, {func}(ResolutionWidth) FROM hits_10m_single GROUP BY k FORMAT Null From f6fa720b4fea2fc1b3d4263f691f7a094c18793b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 27 Jun 2021 19:06:24 +0300 Subject: [PATCH 464/931] Better test --- .../0_stateless/01923_network_receive_time_metric_insert.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index 8d66cfddb3e..e8b7cda0dff 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -11,7 +11,8 @@ seq 1 1000 | pv --quiet --rate-limit 3893 | ${CLICKHOUSE_CLIENT} --query "INSERT # We check that the value of NetworkReceiveElapsedMicroseconds correctly includes the time spent waiting data from the client. ${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; - SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')] >= 1000000 FROM system.query_log + WITH ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')] AS time + SELECT time >= 1000000 ? 1 : time FROM system.query_log WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" ${CLICKHOUSE_CLIENT} --query "DROP TABLE t" From c977c33d6d60f042d3c1b7452cc982be17b01d14 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sun, 27 Jun 2021 19:18:15 +0300 Subject: [PATCH 465/931] Fix bug in execution of TTL GROUP BY --- src/DataStreams/TTLAggregationAlgorithm.cpp | 154 ++++++++++-------- src/DataStreams/TTLColumnAlgorithm.cpp | 2 + src/DataStreams/TTLDeleteAlgorithm.cpp | 2 + .../MergeTree/MergeTreeDataPartTTLInfo.cpp | 51 ++++++ .../MergeTree/MergeTreeDataPartTTLInfo.h | 9 + src/Storages/MergeTree/TTLMergeSelector.cpp | 3 + src/Storages/StorageReplicatedMergeTree.cpp | 4 + tests/integration/test_ttl_replicated/test.py | 13 +- .../test.py | 5 +- 9 files changed, 174 insertions(+), 69 deletions(-) diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index 9a1cf45772f..6d5c234a074 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -36,88 +36,110 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm( storage_.getContext()->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data); aggregator = std::make_unique(params); + + if (isMinTTLExpired()) + new_ttl_info.finished = true; } void TTLAggregationAlgorithm::execute(Block & block) { - if (!block) - { - if (!aggregation_result.empty()) - { - MutableColumns result_columns = header.cloneEmptyColumns(); - finalizeAggregates(result_columns); - block = header.cloneWithColumns(std::move(result_columns)); - } - return; - } - - const auto & column_names = header.getNames(); + bool some_rows_were_aggregated = false; MutableColumns result_columns = header.cloneEmptyColumns(); - MutableColumns aggregate_columns = header.cloneEmptyColumns(); - auto ttl_column = executeExpressionAndGetColumn(description.expression, block, description.result_column); - auto where_column = executeExpressionAndGetColumn(description.where_expression, block, description.where_result_column); - - size_t rows_aggregated = 0; - size_t current_key_start = 0; - size_t rows_with_current_key = 0; - - for (size_t i = 0; i < block.rows(); ++i) + if (!block) /// Empty block -- no more data, but we may still have some accumulated rows { - UInt32 cur_ttl = getTimestampByIndex(ttl_column.get(), i); - bool where_filter_passed = !where_column || where_column->getBool(i); - bool ttl_expired = isTTLExpired(cur_ttl) && where_filter_passed; - - bool same_as_current = true; - for (size_t j = 0; j < description.group_by_keys.size(); ++j) + if (!aggregation_result.empty()) /// Still have some aggregated data, let's update TTL { - const String & key_column = description.group_by_keys[j]; - const IColumn * values_column = block.getByName(key_column).column.get(); - if (!same_as_current || (*values_column)[i] != current_key_value[j]) - { - values_column->get(i, current_key_value[j]); - same_as_current = false; - } - } - - if (!same_as_current) - { - if (rows_with_current_key) - calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key); finalizeAggregates(result_columns); - - current_key_start = rows_aggregated; - rows_with_current_key = 0; + some_rows_were_aggregated = true; } - - if (ttl_expired) + else /// No block, all aggregated, just finish { - ++rows_with_current_key; - ++rows_aggregated; - for (const auto & name : column_names) - { - const IColumn * values_column = block.getByName(name).column.get(); - auto & column = aggregate_columns[header.getPositionByName(name)]; - column->insertFrom(*values_column, i); - } - } - else - { - new_ttl_info.update(cur_ttl); - for (const auto & name : column_names) - { - const IColumn * values_column = block.getByName(name).column.get(); - auto & column = result_columns[header.getPositionByName(name)]; - column->insertFrom(*values_column, i); - } + return; } } + else + { + const auto & column_names = header.getNames(); + MutableColumns aggregate_columns = header.cloneEmptyColumns(); - if (rows_with_current_key) - calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key); + auto ttl_column = executeExpressionAndGetColumn(description.expression, block, description.result_column); + auto where_column = executeExpressionAndGetColumn(description.where_expression, block, description.where_result_column); + + size_t rows_aggregated = 0; + size_t current_key_start = 0; + size_t rows_with_current_key = 0; + + for (size_t i = 0; i < block.rows(); ++i) + { + UInt32 cur_ttl = getTimestampByIndex(ttl_column.get(), i); + bool where_filter_passed = !where_column || where_column->getBool(i); + bool ttl_expired = isTTLExpired(cur_ttl) && where_filter_passed; + + bool same_as_current = true; + for (size_t j = 0; j < description.group_by_keys.size(); ++j) + { + const String & key_column = description.group_by_keys[j]; + const IColumn * values_column = block.getByName(key_column).column.get(); + if (!same_as_current || (*values_column)[i] != current_key_value[j]) + { + values_column->get(i, current_key_value[j]); + same_as_current = false; + } + } + + if (!same_as_current) + { + if (rows_with_current_key) + { + some_rows_were_aggregated = true; + calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key); + } + finalizeAggregates(result_columns); + + current_key_start = rows_aggregated; + rows_with_current_key = 0; + } + + if (ttl_expired) + { + ++rows_with_current_key; + ++rows_aggregated; + for (const auto & name : column_names) + { + const IColumn * values_column = block.getByName(name).column.get(); + auto & column = aggregate_columns[header.getPositionByName(name)]; + column->insertFrom(*values_column, i); + } + } + else + { + for (const auto & name : column_names) + { + const IColumn * values_column = block.getByName(name).column.get(); + auto & column = result_columns[header.getPositionByName(name)]; + column->insertFrom(*values_column, i); + } + } + } + + if (rows_with_current_key) + { + some_rows_were_aggregated = true; + calculateAggregates(aggregate_columns, current_key_start, rows_with_current_key); + } + } block = header.cloneWithColumns(std::move(result_columns)); + + /// If some rows were aggregated we have to recalculate ttl info's + if (some_rows_were_aggregated) + { + auto ttl_column_after_aggregation = executeExpressionAndGetColumn(description.expression, block, description.result_column); + for (size_t i = 0; i < block.rows(); ++i) + new_ttl_info.update(getTimestampByIndex(ttl_column_after_aggregation.get(), i)); + } } void TTLAggregationAlgorithm::calculateAggregates(const MutableColumns & aggregate_columns, size_t start_pos, size_t length) @@ -133,6 +155,7 @@ void TTLAggregationAlgorithm::calculateAggregates(const MutableColumns & aggrega aggregator->executeOnBlock(aggregate_chunk, length, aggregation_result, key_columns, columns_for_aggregator, no_more_keys); + } void TTLAggregationAlgorithm::finalizeAggregates(MutableColumns & result_columns) @@ -140,6 +163,7 @@ void TTLAggregationAlgorithm::finalizeAggregates(MutableColumns & result_columns if (!aggregation_result.empty()) { auto aggregated_res = aggregator->convertToBlocks(aggregation_result, true, 1); + for (auto & agg_block : aggregated_res) { for (const auto & it : description.set_parts) diff --git a/src/DataStreams/TTLColumnAlgorithm.cpp b/src/DataStreams/TTLColumnAlgorithm.cpp index 140631ac0bf..5c0a5e1ae83 100644 --- a/src/DataStreams/TTLColumnAlgorithm.cpp +++ b/src/DataStreams/TTLColumnAlgorithm.cpp @@ -21,6 +21,8 @@ TTLColumnAlgorithm::TTLColumnAlgorithm( new_ttl_info = old_ttl_info; is_fully_empty = false; } + else + new_ttl_info.finished = true; } void TTLColumnAlgorithm::execute(Block & block) diff --git a/src/DataStreams/TTLDeleteAlgorithm.cpp b/src/DataStreams/TTLDeleteAlgorithm.cpp index c364bb06f3e..f1bbe6d4b7d 100644 --- a/src/DataStreams/TTLDeleteAlgorithm.cpp +++ b/src/DataStreams/TTLDeleteAlgorithm.cpp @@ -9,6 +9,8 @@ TTLDeleteAlgorithm::TTLDeleteAlgorithm( { if (!isMinTTLExpired()) new_ttl_info = old_ttl_info; + else + new_ttl_info.finished = true; } void TTLDeleteAlgorithm::execute(Block & block) diff --git a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp index e130fbc1798..f1beb09c482 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp @@ -55,6 +55,10 @@ void MergeTreeDataPartTTLInfos::read(ReadBuffer & in) MergeTreeDataPartTTLInfo ttl_info; ttl_info.min = col["min"].getUInt(); ttl_info.max = col["max"].getUInt(); + + if (col.has("finished")) + ttl_info.finished = col["finished"].getUInt(); + String name = col["name"].getString(); columns_ttl.emplace(name, ttl_info); @@ -67,6 +71,9 @@ void MergeTreeDataPartTTLInfos::read(ReadBuffer & in) table_ttl.min = table["min"].getUInt(); table_ttl.max = table["max"].getUInt(); + if (table.has("finished")) + table_ttl.finished = table["finished"].getUInt(); + updatePartMinMaxTTL(table_ttl.min, table_ttl.max); } @@ -77,6 +84,10 @@ void MergeTreeDataPartTTLInfos::read(ReadBuffer & in) MergeTreeDataPartTTLInfo ttl_info; ttl_info.min = elem["min"].getUInt(); ttl_info.max = elem["max"].getUInt(); + + if (elem.has("finished")) + ttl_info.finished = elem["finished"].getUInt(); + String expression = elem["expression"].getString(); ttl_info_map.emplace(expression, ttl_info); @@ -126,6 +137,8 @@ void MergeTreeDataPartTTLInfos::write(WriteBuffer & out) const writeIntText(it->second.min, out); writeString(",\"max\":", out); writeIntText(it->second.max, out); + writeString(R"(,"finished":)", out); + writeIntText(static_cast(it->second.finished), out); writeString("}", out); } writeString("]", out); @@ -138,6 +151,8 @@ void MergeTreeDataPartTTLInfos::write(WriteBuffer & out) const writeIntText(table_ttl.min, out); writeString(R"(,"max":)", out); writeIntText(table_ttl.max, out); + writeString(R"(,"finished":)", out); + writeIntText(static_cast(table_ttl.finished), out); writeString("}", out); } @@ -159,6 +174,8 @@ void MergeTreeDataPartTTLInfos::write(WriteBuffer & out) const writeIntText(it->second.min, out); writeString(R"(,"max":)", out); writeIntText(it->second.max, out); + writeString(R"(,"finished":)", out); + writeIntText(static_cast(it->second.finished), out); writeString("}", out); } writeString("]", out); @@ -202,6 +219,39 @@ time_t MergeTreeDataPartTTLInfos::getMinimalMaxRecompressionTTL() const return max; } +bool MergeTreeDataPartTTLInfos::hasAnyNonFinishedTTLs() const +{ + auto has_non_finished_ttl = [] (const TTLInfoMap & map) -> bool + { + for (const auto & [name, info] : map) + { + if (!info.finished) + return true; + } + return false; + }; + + if (!table_ttl.finished) + return true; + + if (has_non_finished_ttl(columns_ttl)) + return true; + + if (has_non_finished_ttl(rows_where_ttl)) + return true; + + if (has_non_finished_ttl(moves_ttl)) + return true; + + if (has_non_finished_ttl(recompression_ttl)) + return true; + + if (has_non_finished_ttl(group_by_ttl)) + return true; + + return false; +} + std::optional selectTTLDescriptionForTTLInfos(const TTLDescriptions & descriptions, const TTLInfoMap & ttl_info_map, time_t current_time, bool use_max) { time_t best_ttl_time = 0; @@ -232,4 +282,5 @@ std::optional selectTTLDescriptionForTTLInfos(const TTLDescripti return best_ttl_time ? *best_entry_it : std::optional(); } + } diff --git a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h index 9d1606ee44a..2b79ad1aac5 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h +++ b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h @@ -14,6 +14,11 @@ struct MergeTreeDataPartTTLInfo time_t min = 0; time_t max = 0; + /// This TTL was computed on completely expired part. It doesn't make sense + /// to select such parts for TTL again. But make sense to recalcuate TTL + /// again for merge with multiple parts. + bool finished = false; + void update(time_t time) { if (time && (!min || time < min)) @@ -28,6 +33,7 @@ struct MergeTreeDataPartTTLInfo min = other_info.min; max = std::max(other_info.max, max); + finished &= other_info.finished; } }; @@ -60,6 +66,9 @@ struct MergeTreeDataPartTTLInfos void write(WriteBuffer & out) const; void update(const MergeTreeDataPartTTLInfos & other_infos); + /// Has any TTLs which are not calculated on completely expired parts. + bool hasAnyNonFinishedTTLs() const; + void updatePartMinMaxTTL(time_t time_min, time_t time_max) { if (time_min && (!part_min_ttl || time_min < part_min_ttl)) diff --git a/src/Storages/MergeTree/TTLMergeSelector.cpp b/src/Storages/MergeTree/TTLMergeSelector.cpp index fc7aa93e129..ab686c9952d 100644 --- a/src/Storages/MergeTree/TTLMergeSelector.cpp +++ b/src/Storages/MergeTree/TTLMergeSelector.cpp @@ -111,6 +111,9 @@ bool TTLDeleteMergeSelector::isTTLAlreadySatisfied(const IMergeSelector::Part & if (only_drop_parts) return false; + if (!part.ttl_infos->hasAnyNonFinishedTTLs()) + return false; + return !part.shall_participate_in_merges; } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e91f3d9554e..ea4376a56ec 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -551,6 +551,10 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( break; } + /// This replica inactive, don't check anything + if (!inactive_replicas.empty() && inactive_replicas.count(replica)) + break; + /// It maybe already removed from zk, but local in-memory mutations /// state was not updated. if (!getZooKeeper()->exists(fs::path(zookeeper_path) / "mutations" / mutation_id)) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index de5e5984082..f37c28b2a80 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -351,6 +351,7 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_delete_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND + SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 '''.format(suff=num_run, replica=node.name)) node.query( @@ -359,6 +360,7 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_group_by_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND GROUP BY id SET val = sum(val) + SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 '''.format(suff=num_run, replica=node.name)) node.query( @@ -367,6 +369,7 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_where_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND DELETE WHERE id % 2 = 1 + SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 '''.format(suff=num_run, replica=node.name)) node_left.query("INSERT INTO test_ttl_delete VALUES (now(), 1)") @@ -397,9 +400,9 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): node_right.query("OPTIMIZE TABLE test_ttl_group_by FINAL") node_right.query("OPTIMIZE TABLE test_ttl_where FINAL") - exec_query_with_retry(node_left, "SYSTEM SYNC REPLICA test_ttl_delete") - node_left.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) - node_left.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_left, "OPTIMIZE TABLE test_ttl_delete FINAL") + node_left.query("OPTIMIZE TABLE test_ttl_group_by FINAL", timeout=20) + node_left.query("OPTIMIZE TABLE test_ttl_where FINAL", timeout=20) # After OPTIMIZE TABLE, it is not guaranteed that everything is merged. # Possible scenario (for test_ttl_group_by): @@ -414,6 +417,10 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): node_right.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) node_right.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_left, "SYSTEM SYNC REPLICA test_ttl_delete") + node_left.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) + node_left.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + assert node_left.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" assert node_right.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index dd8e1bc7a9e..03387b0be67 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -79,7 +79,10 @@ def test_upgrade_while_mutation(start_cluster): node3.restart_with_latest_version(signal=9) - exec_query_with_retry(node3, "ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) + # wait replica became active + exec_query_with_retry(node3, "SYSTEM RESTART REPLICA mt1") + + node3.query("ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) # will delete nothing, but previous async mutation will finish with this query assert_eq_with_retry(node3, "SELECT COUNT() from mt1", "50000\n") From db998c3f6caa7f4f20442caeacc818bbfec4bacd Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 27 Jun 2021 16:15:28 +0000 Subject: [PATCH 466/931] More robust --- src/Core/PostgreSQL/Connection.cpp | 26 ++++++++- src/Core/PostgreSQL/Connection.h | 6 +- .../MaterializePostgreSQLConsumer.cpp | 58 +++++++++++++------ .../PostgreSQLReplicationHandler.cpp | 35 +++++++---- .../test.py | 40 ++++++++----- 5 files changed, 117 insertions(+), 48 deletions(-) diff --git a/src/Core/PostgreSQL/Connection.cpp b/src/Core/PostgreSQL/Connection.cpp index ff6197d1390..71dfa7ea305 100644 --- a/src/Core/PostgreSQL/Connection.cpp +++ b/src/Core/PostgreSQL/Connection.cpp @@ -4,8 +4,8 @@ namespace postgres { -Connection::Connection(const ConnectionInfo & connection_info_, bool replication_) - : connection_info(connection_info_), replication(replication_) +Connection::Connection(const ConnectionInfo & connection_info_, bool replication_, size_t num_tries_) + : connection_info(connection_info_), replication(replication_), num_tries(num_tries_) { if (replication) { @@ -14,10 +14,30 @@ Connection::Connection(const ConnectionInfo & connection_info_, bool replication } } +void Connection::execWithRetry(const std::function & exec) +{ + for (size_t try_no = 0; try_no < num_tries; ++try_no) + { + try + { + pqxx::nontransaction tx(getRef()); + exec(tx); + } + catch (const pqxx::broken_connection & e) + { + LOG_DEBUG(&Poco::Logger::get("PostgreSQLReplicaConnection"), + "Cannot execute query due to connection failure, attempt: {}/{}. (Message: {})", + try_no, num_tries, e.what()); + + if (try_no == num_tries) + throw; + } + } +} + pqxx::connection & Connection::getRef() { connect(); - assert(connection != nullptr); return *connection; } diff --git a/src/Core/PostgreSQL/Connection.h b/src/Core/PostgreSQL/Connection.h index 46646ea6f35..00cf0c737f6 100644 --- a/src/Core/PostgreSQL/Connection.h +++ b/src/Core/PostgreSQL/Connection.h @@ -13,7 +13,9 @@ using ConnectionPtr = std::unique_ptr; class Connection : private boost::noncopyable { public: - Connection(const ConnectionInfo & connection_info_, bool replication_ = false); + Connection(const ConnectionInfo & connection_info_, bool replication_ = false, size_t num_tries = 3); + + void execWithRetry(const std::function & exec); pqxx::connection & getRef(); @@ -24,6 +26,8 @@ public: private: ConnectionPtr connection; ConnectionInfo connection_info; + bool replication; + size_t num_tries; }; } diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp index b693dad6a68..b1325d9ca57 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp @@ -252,7 +252,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli /// Skip '\x' size_t pos = 2; char type = readInt8(replication_message, pos, size); - //LOG_DEBUG(log, "Message type: {}, lsn string: {}, lsn value {}", type, current_lsn, lsn_value); + // LOG_DEBUG(log, "Message type: {}, lsn string: {}, lsn value {}", type, current_lsn, lsn_value); switch (type) { @@ -352,9 +352,9 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli constexpr size_t transaction_commit_timestamp_len = 8; pos += unused_flags_len + commit_lsn_len + transaction_end_lsn_len + transaction_commit_timestamp_len; - final_lsn = current_lsn; - LOG_DEBUG(log, "Commit lsn: {}", getLSNValue(current_lsn)); /// Will be removed + LOG_DEBUG(log, "Current lsn: {} = {}", current_lsn, getLSNValue(current_lsn)); /// Will be removed + final_lsn = current_lsn; break; } case 'R': // Relation @@ -458,9 +458,9 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli void MaterializePostgreSQLConsumer::syncTables(std::shared_ptr tx) { - for (const auto & table_name : tables_to_sync) + try { - try + for (const auto & table_name : tables_to_sync) { auto & buffer = buffers.find(table_name)->second; Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns)); @@ -483,19 +483,20 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptrgetHeader(), "postgresql replica table sync"); copyData(input, *block_io.out); - current_lsn = advanceLSN(tx); buffer.columns = buffer.description.sample_block.cloneEmptyColumns(); } } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - LOG_DEBUG(log, "Table sync end for {} tables", tables_to_sync.size()); - tables_to_sync.clear(); - tx->commit(); + LOG_DEBUG(log, "Table sync end for {} tables, last lsn: {} = {}, (attempted lsn {})", tables_to_sync.size(), current_lsn, getLSNValue(current_lsn), getLSNValue(final_lsn)); + + current_lsn = advanceLSN(tx); + tables_to_sync.clear(); + tx->commit(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } @@ -507,6 +508,7 @@ String MaterializePostgreSQLConsumer::advanceLSN(std::shared_ptr(); + LOG_TRACE(log, "Advanced LSN up to: {}", final_lsn); return final_lsn; } @@ -622,14 +624,34 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() return false; } - catch (const Exception & e) + catch (const pqxx::conversion_error & e) + { + LOG_ERROR(log, "Convertion error: {}", e.what()); + return false; + } + catch (const pqxx::broken_connection & e) + { + LOG_ERROR(log, "Connection error: {}", e.what()); + return false; + } + catch (const Exception &) { - if (e.code() == ErrorCodes::UNKNOWN_TABLE) - throw; - tryLogCurrentException(__PRETTY_FUNCTION__); return false; } + catch (...) + { + /// Since reading is done from a background task, it is important to catch any possible error + /// in order to understand why something does not work. + try + { + std::rethrow_exception(std::current_exception()); + } + catch (const std::exception& e) + { + LOG_ERROR(log, "Unexpected error: {}", e.what()); + } + } if (!tables_to_sync.empty()) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 33d5c49ec09..46fedb99b62 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -15,7 +15,7 @@ namespace DB { static const auto RESCHEDULE_MS = 500; -static const auto BACKOFF_TRESHOLD = 32000; +static const auto BACKOFF_TRESHOLD = 5000; namespace ErrorCodes { @@ -255,18 +255,25 @@ void PostgreSQLReplicationHandler::consumerFunc() } if (stop_synchronization) + { + LOG_TRACE(log, "Replication thread is stopped"); return; + } if (schedule_now) { - consumer_task->schedule(); milliseconds_to_wait = RESCHEDULE_MS; + consumer_task->schedule(); + + LOG_DEBUG(log, "Scheduling replication thread: now"); } else { consumer_task->scheduleAfter(milliseconds_to_wait); if (milliseconds_to_wait < BACKOFF_TRESHOLD) milliseconds_to_wait *= 2; + + LOG_TRACE(log, "Scheduling replication thread: after {} ms", milliseconds_to_wait); } } @@ -397,16 +404,24 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::shutdownFinal() { - pqxx::nontransaction tx(connection->getRef()); - dropPublication(tx); - String last_committed_lsn; try { - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) - dropReplicationSlot(tx, /* temporary */false); - if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) - dropReplicationSlot(tx, /* temporary */true); - tx.commit(); + shutdown(); + + connection->execWithRetry([&](pqxx::nontransaction & tx){ dropPublication(tx); }); + String last_committed_lsn; + + connection->execWithRetry([&](pqxx::nontransaction & tx) + { + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */false)) + dropReplicationSlot(tx, /* temporary */false); + }); + + connection->execWithRetry([&](pqxx::nontransaction & tx) + { + if (isReplicationSlotExist(tx, last_committed_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); + }); } catch (Exception & e) { diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 685ed85d8f4..d1e590704fd 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -145,11 +145,6 @@ def started_cluster(): cluster.shutdown() -@pytest.fixture(autouse=True) -def postgresql_setup_teardown(): - yield # run test - - @pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") @@ -642,19 +637,25 @@ def test_multiple_databases(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database_2") NUM_TABLES = 5 - conn = get_postgres_conn() + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=False) cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database_1') create_postgres_db(cursor, 'postgres_database_2') - conn1 = get_postgres_conn(True, True, 'postgres_database_1') - conn2 = get_postgres_conn(True, True, 'postgres_database_2') + conn1 = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, database_name='postgres_database_1') + conn2 = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, database_name='postgres_database_2') cursor1 = conn1.cursor() cursor2 = conn2.cursor() - create_clickhouse_postgres_db('postgres_database_1') - create_clickhouse_postgres_db('postgres_database_2') + create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_1') + create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_2') cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): @@ -665,8 +666,10 @@ def test_multiple_databases(started_cluster): print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) - create_materialized_db('test_database_1', 'postgres_database_1') - create_materialized_db('test_database_2', 'postgres_database_2') + create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, + 'test_database_1', 'postgres_database_1') + create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, + 'test_database_2', 'postgres_database_2') cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): @@ -689,7 +692,9 @@ def test_multiple_databases(started_cluster): @pytest.mark.timeout(320) def test_concurrent_transactions(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(True) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True) cursor = conn.cursor() NUM_TABLES = 6 @@ -697,19 +702,22 @@ def test_concurrent_transactions(started_cluster): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); def transaction(thread_id): - conn_ = get_postgres_conn(True, auto_commit=False) + conn = get_postgres_conn(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, auto_commit=False) cursor_ = conn.cursor() for query in queries: cursor_.execute(query.format(thread_id)) print('thread {}, query {}'.format(thread_id, query)) - conn_.commit() + conn.commit() threads = [] threads_num = 6 for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) - create_materialized_db() + create_materialized_db(ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 0.5)) From ec834fe213fb37d9af5cc91a03f88e23877f928b Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 27 Jun 2021 19:31:55 +0300 Subject: [PATCH 467/931] Update 01917_prewhere_column_type.sql --- tests/queries/0_stateless/01917_prewhere_column_type.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01917_prewhere_column_type.sql b/tests/queries/0_stateless/01917_prewhere_column_type.sql index 4046eb4d891..5147e6093a9 100644 --- a/tests/queries/0_stateless/01917_prewhere_column_type.sql +++ b/tests/queries/0_stateless/01917_prewhere_column_type.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS t1; -CREATE TABLE t1 ( s String, f Float32, e UInt16 ) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t1 ( s String, f Float32, e UInt16 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = '100G'; INSERT INTO t1 VALUES ('111', 1, 1); From 534d33fb9d9b9b7499fb5bd79374e74f4a8d4d75 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 27 Jun 2021 20:21:40 +0300 Subject: [PATCH 468/931] Update settings.md --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index d5ba2d5a653..056682a824e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3158,4 +3158,4 @@ Default value: `1`. **Usage** -If the setting is set to `0`, the table function does not make Nullable columns and insert default values instead of NULL. This is also applicable for NULL values inside arrays. +If the setting is set to `0`, the table function does not make Nullable columns and inserts default values instead of NULL. This is also applicable for NULL values inside arrays. From c379a0b226983f399871ceddea7c46a4f39a9d3c Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 20:35:53 +0300 Subject: [PATCH 469/931] Update mysql and postgresql engines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Внес небольшие поправки. --- docs/en/engines/table-engines/integrations/mysql.md | 2 +- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- docs/ru/engines/table-engines/integrations/mysql.md | 6 +++--- docs/ru/engines/table-engines/integrations/postgresql.md | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 1b6d1893cc2..0815afb89c5 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -29,7 +29,7 @@ The table structure can differ from the original MySQL table structure: - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and insert default values instead of nulls. This is also applicable for NULL values inside arrays. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 5d9516aeefc..7cf36447049 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -24,7 +24,7 @@ The table structure can differ from the original PostgreSQL table structure: - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../engines/database-engines/postgresql.md#data_types-support) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and insert default values instead of nulls. This is also applicable for NULL values inside arrays. **Engine Parameters** diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index e27f63b9a62..0e840d207a2 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -20,11 +20,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). -Структура таблицы может отличаться от исходной структуры таблицы MySQL: +Структура таблицы может отличаться от структуры исходной таблицы MySQL: -- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../engines/database-engines/mysql.md#data_types-support) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 4cc93beaf08..c293b6f5736 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -20,11 +20,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). -Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: +Структура таблицы может отличаться от структуры исходной таблицы PostgreSQL: -- Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../engines/database-engines/postgresql.md#data_types-support) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** From 2b46006774c9e90125633046e542736c16e50268 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 27 Jun 2021 20:44:31 +0300 Subject: [PATCH 470/931] DatabaseAtomic EXCHANGE DICTIONARIES fix test --- .../0_stateless/01913_replace_dictionary.sql | 1 + .../01914_exchange_dictionaries.reference | 2 +- .../01914_exchange_dictionaries.sql | 47 ++++++++++--------- .../01915_create_or_replace_dictionary.sql | 1 + 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/tests/queries/0_stateless/01913_replace_dictionary.sql b/tests/queries/0_stateless/01913_replace_dictionary.sql index 22b0bd002ae..43dd460707a 100644 --- a/tests/queries/0_stateless/01913_replace_dictionary.sql +++ b/tests/queries/0_stateless/01913_replace_dictionary.sql @@ -44,6 +44,7 @@ LIFETIME(0); SELECT * FROM 01913_db.test_dictionary; DROP DICTIONARY 01913_db.test_dictionary; + DROP TABLE 01913_db.test_source_table_1; DROP TABLE 01913_db.test_source_table_2; diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.reference b/tests/queries/0_stateless/01914_exchange_dictionaries.reference index 9278d0abeed..d176a0d7396 100644 --- a/tests/queries/0_stateless/01914_exchange_dictionaries.reference +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.reference @@ -1,4 +1,4 @@ 1 Table1 2 Table2 -2 Table2 1 Table1 +2 Table2 diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.sql b/tests/queries/0_stateless/01914_exchange_dictionaries.sql index ba0c70d13be..840fb43b8b2 100644 --- a/tests/queries/0_stateless/01914_exchange_dictionaries.sql +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.sql @@ -1,34 +1,39 @@ -DROP TABLE IF EXISTS table_1; -CREATE TABLE table_1 (id UInt64, value String) ENGINE=TinyLog; +DROP DATABASE IF EXISTS 01915_db; +CREATE DATABASE 01915_db ENGINE=Atomic; -DROP TABLE IF EXISTS table_2; -CREATE TABLE table_2 (id UInt64, value String) ENGINE=TinyLog; +DROP TABLE IF EXISTS 01915_db.table_1; +CREATE TABLE 01915_db.table_1 (id UInt64, value String) ENGINE=TinyLog; -INSERT INTO table_1 VALUES (1, 'Table1'); -INSERT INTO table_2 VALUES (2, 'Table2'); +DROP TABLE IF EXISTS 01915_db.table_2; +CREATE TABLE 01915_db.table_2 (id UInt64, value String) ENGINE=TinyLog; -DROP DICTIONARY IF EXISTS dictionary_1; -CREATE DICTIONARY dictionary_1 (id UInt64, value String) +INSERT INTO 01915_db.table_1 VALUES (1, 'Table1'); +INSERT INTO 01915_db.table_2 VALUES (2, 'Table2'); + +DROP DICTIONARY IF EXISTS 01915_db.dictionary_1; +CREATE DICTIONARY 01915_db.dictionary_1 (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) -SOURCE(CLICKHOUSE(TABLE 'table_1')); +SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'table_1')); -DROP DICTIONARY IF EXISTS dictionary_2; -CREATE DICTIONARY dictionary_2 (id UInt64, value String) +DROP DICTIONARY IF EXISTS 01915_db.dictionary_2; +CREATE DICTIONARY 01915_db.dictionary_2 (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) -SOURCE(CLICKHOUSE(TABLE 'table_2')); +SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'table_2')); -SELECT * FROM dictionary_1; -SELECT * FROM dictionary_2; +SELECT * FROM 01915_db.dictionary_1; +SELECT * FROM 01915_db.dictionary_2; -EXCHANGE DICTIONARIES dictionary_1 AND dictionary_2; +EXCHANGE DICTIONARIES 01915_db.dictionary_1 AND 01915_db.dictionary_2; -SELECT * FROM dictionary_1; -SELECT * FROM dictionary_2; +SELECT * FROM 01915_db.dictionary_1; +SELECT * FROM 01915_db.dictionary_2; -DROP DICTIONARY dictionary_1; -DROP DICTIONARY dictionary_2; +DROP DICTIONARY 01915_db.dictionary_1; +DROP DICTIONARY 01915_db.dictionary_2; -DROP TABLE table_1; -DROP TABLE table_2; +DROP TABLE 01915_db.table_1; +DROP TABLE 01915_db.table_2; + +DROP DATABASE 01915_db; diff --git a/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql b/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql index 5d5515f4f8a..c9df6114ec9 100644 --- a/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql +++ b/tests/queries/0_stateless/01915_create_or_replace_dictionary.sql @@ -44,6 +44,7 @@ LIFETIME(0); SELECT * FROM 01915_db.test_dictionary; DROP DICTIONARY 01915_db.test_dictionary; + DROP TABLE 01915_db.test_source_table_1; DROP TABLE 01915_db.test_source_table_2; From e6adc405d823a1a9be8edd0bb6107836d1c190cc Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 14 May 2021 23:07:08 +0300 Subject: [PATCH 471/931] DateTime timezone fix --- src/DataTypes/getLeastSupertype.cpp | 15 ++++++++++++++- src/Functions/DateTimeTransforms.h | 18 +++++++++++------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index a04d16ef7cd..4614d65ed8a 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -288,9 +288,18 @@ DataTypePtr getLeastSupertype(const DataTypes & types) ErrorCodes::NO_COMMON_TYPE); if (have_datetime64 == 0) + { + for (const auto & t : types) + { + if (const auto * data_type = typeid_cast(t.get())) + return std::make_shared(data_type->getTimeZone().getTimeZone()); + } + return std::make_shared(); + } UInt8 max_scale = 0; + const DataTypeDateTime64 * max_scale_date_time = nullptr; for (const auto & t : types) { @@ -298,11 +307,15 @@ DataTypePtr getLeastSupertype(const DataTypes & types) { const auto scale = dt64->getScale(); if (scale > max_scale) + { + max_scale_date_time = dt64; max_scale = scale; + } } } - return std::make_shared(max_scale); + assert(max_scale_date_time); + return std::make_shared(max_scale, max_scale_date_time->getTimeZone().getTimeZone()); } } diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 03f35333150..d4e1ad25084 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -863,19 +864,22 @@ struct DateTimeTransformImpl { using Op = Transformer; - size_t time_zone_argument_position = 1; - if constexpr (std::is_same_v) - time_zone_argument_position = 2; - - const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_argument_position, 0); - const ColumnPtr source_col = arguments[0].column; if (const auto * sources = checkAndGetColumn(source_col.get())) { auto mutable_result_col = result_type->createColumn(); auto * col_to = assert_cast(mutable_result_col.get()); - Op::vector(sources->getData(), col_to->getData(), time_zone, transform); + WhichDataType result_data_type(result_type); + if (result_data_type.isDateOrDateTime()) + { + const auto & time_zone = dynamic_cast(*result_type).getTimeZone(); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform); + } + else + { + Op::vector(sources->getData(), col_to->getData(), DateLUT::instance(), transform); + } return mutable_result_col; } From 61b116332d44839523f41bd8dedd40151b5a80c0 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 15 May 2021 00:46:17 +0300 Subject: [PATCH 472/931] Fixed tests --- src/Functions/DateTimeTransforms.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index d4e1ad25084..70035cdda30 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -871,7 +871,7 @@ struct DateTimeTransformImpl auto * col_to = assert_cast(mutable_result_col.get()); WhichDataType result_data_type(result_type); - if (result_data_type.isDateOrDateTime()) + if (result_data_type.isDateTime() || result_data_type.isDateTime64()) { const auto & time_zone = dynamic_cast(*result_type).getTimeZone(); Op::vector(sources->getData(), col_to->getData(), time_zone, transform); From 253f249f3d543e1c9c35095e81d07bf3cdab57c4 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 20:57:46 +0300 Subject: [PATCH 473/931] Add links MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Добавил ссылку на настройку external_table_functions_use_nulls. --- docs/en/engines/table-engines/integrations/mysql.md | 2 +- docs/en/engines/table-engines/integrations/odbc.md | 2 +- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- docs/ru/engines/table-engines/integrations/mysql.md | 2 +- docs/ru/engines/table-engines/integrations/odbc.md | 2 +- docs/ru/engines/table-engines/integrations/postgresql.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 0815afb89c5..a6402e00bc9 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -29,7 +29,7 @@ The table structure can differ from the original MySQL table structure: - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and insert default values instead of nulls. This is also applicable for NULL values inside arrays. +- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index 26bfb6aeb0d..ab39fb7a811 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -29,7 +29,7 @@ The table structure can differ from the source table structure: - Column names should be the same as in the source table, but you can use just some of these columns and in any order. - Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. +- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 7cf36447049..1a8f2c4b758 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -24,7 +24,7 @@ The table structure can differ from the original PostgreSQL table structure: - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../engines/database-engines/postgresql.md#data_types-support) values to the ClickHouse data types. -- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and insert default values instead of nulls. This is also applicable for NULL values inside arrays. +- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays. **Engine Parameters** diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 0e840d207a2..486a432d86c 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [привести](../../../engines/database-engines/mysql.md#data_types-support) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. +- Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index 669977ff531..27b8578a22c 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -29,7 +29,7 @@ ENGINE = ODBC(connection_settings, external_database, external_table) - Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../../engines/table-engines/integrations/odbc.md#type_conversion_function-cast) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. +- Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index c293b6f5736..caf3bb8c69a 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но можно использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [привести](../../../engines/database-engines/postgresql.md#data_types-support) значения к типам данных ClickHouse. -- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. +- Настройка [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) определяет как обрабатывать Nullable столбцы. Значение по умолчанию: 1. Если значение 0, то табличная функция не делает Nullable столбцы, а вместо NULL выставляет значения по умолчанию для скалярного типа. Это также применимо для значений NULL внутри массивов. **Параметры движка** From c3a2fc05846255a8b947c806b334930fbb6af42e Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 27 Jun 2021 19:09:17 +0000 Subject: [PATCH 474/931] Review fixes --- ...stgresql.md => materialized-postgresql.md} | 23 +-- ...stgresql.md => materialized-postgresql.md} | 13 +- src/Core/Settings.h | 2 +- src/Databases/DatabaseFactory.cpp | 14 +- ...cpp => DatabaseMaterializedPostgreSQL.cpp} | 58 +++---- ...SQL.h => DatabaseMaterializedPostgreSQL.h} | 12 +- src/Interpreters/InterpreterCreateQuery.cpp | 6 +- src/Interpreters/InterpreterDropQuery.cpp | 4 +- .../MaterializePostgreSQLSettings.h | 30 ---- ...cpp => MaterializedPostgreSQLConsumer.cpp} | 47 +++--- ...mer.h => MaterializedPostgreSQLConsumer.h} | 4 +- ...cpp => MaterializedPostgreSQLSettings.cpp} | 6 +- .../MaterializedPostgreSQLSettings.h | 30 ++++ .../PostgreSQLReplicationHandler.cpp | 153 ++++++++++-------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 20 +-- ....cpp => StorageMaterializedPostgreSQL.cpp} | 117 ++++++++------ ...eSQL.h => StorageMaterializedPostgreSQL.h} | 55 ++++--- src/Storages/registerStorages.cpp | 4 +- .../configs/users.xml | 2 +- .../test.py | 24 +-- .../test_storage_postgresql_replica/test.py | 2 +- 21 files changed, 330 insertions(+), 296 deletions(-) rename docs/en/engines/database-engines/{materialize-postgresql.md => materialized-postgresql.md} (59%) rename docs/en/engines/table-engines/integrations/{materialize-postgresql.md => materialized-postgresql.md} (54%) rename src/Databases/PostgreSQL/{DatabaseMaterializePostgreSQL.cpp => DatabaseMaterializedPostgreSQL.cpp} (66%) rename src/Databases/PostgreSQL/{DatabaseMaterializePostgreSQL.h => DatabaseMaterializedPostgreSQL.h} (83%) delete mode 100644 src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h rename src/Storages/PostgreSQL/{MaterializePostgreSQLConsumer.cpp => MaterializedPostgreSQLConsumer.cpp} (91%) rename src/Storages/PostgreSQL/{MaterializePostgreSQLConsumer.h => MaterializedPostgreSQLConsumer.h} (98%) rename src/Storages/PostgreSQL/{MaterializePostgreSQLSettings.cpp => MaterializedPostgreSQLSettings.cpp} (77%) create mode 100644 src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h rename src/Storages/PostgreSQL/{StorageMaterializePostgreSQL.cpp => StorageMaterializedPostgreSQL.cpp} (78%) rename src/Storages/PostgreSQL/{StorageMaterializePostgreSQL.h => StorageMaterializedPostgreSQL.h} (73%) diff --git a/docs/en/engines/database-engines/materialize-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md similarity index 59% rename from docs/en/engines/database-engines/materialize-postgresql.md rename to docs/en/engines/database-engines/materialized-postgresql.md index f657035d050..dde3886f694 100644 --- a/docs/en/engines/database-engines/materialize-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -1,15 +1,15 @@ --- toc_priority: 30 -toc_title: MaterializePostgreSQL +toc_title: MaterializedPostgreSQL --- -# MaterializePostgreSQL {#materialize-postgresql} +# MaterializedPostgreSQL {#materialize-postgresql} ## Creating a Database {#creating-a-database} ``` sql CREATE DATABASE test_database -ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' +ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' SELECT * FROM test_database.postgres_table; ``` @@ -17,17 +17,17 @@ SELECT * FROM test_database.postgres_table; ## Settings {#settings} -1. `materialize_postgresql_max_block_size` - Number of rows collected before flushing data into table. Default: `65536`. +1. `materialized_postgresql_max_block_size` - Number of rows collected before flushing data into table. Default: `65536`. -2. `materialize_postgresql_tables_list` - List of tables for MaterializePostgreSQL database engine. Default: `whole database`. +2. `materialized_postgresql_tables_list` - List of tables for MaterializedPostgreSQL database engine. Default: `whole database`. -3. `materialize_postgresql_allow_automatic_update` - Allow to reload table in the background, when schema changes are detected. Default: `0` (`false`). +3. `materialized_postgresql_allow_automatic_update` - Allow to reload table in the background, when schema changes are detected. Default: `0` (`false`). ``` sql CREATE DATABASE test_database -ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' -SETTINGS materialize_postgresql_max_block_size = 65536, - materialize_postgresql_tables_list = 'table1,table2,table3'; +ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password' +SETTINGS materialized_postgresql_max_block_size = 65536, + materialized_postgresql_tables_list = 'table1,table2,table3'; SELECT * FROM test_database.table1; ``` @@ -64,3 +64,8 @@ postgres# SELECT CASE relreplident FROM pg_class WHERE oid = 'postgres_table'::regclass; ``` + + +## WARNINGS {#warnings} + +1. **TOAST** values convertions is not supported. Default value for the data type will be used. diff --git a/docs/en/engines/table-engines/integrations/materialize-postgresql.md b/docs/en/engines/table-engines/integrations/materialized-postgresql.md similarity index 54% rename from docs/en/engines/table-engines/integrations/materialize-postgresql.md rename to docs/en/engines/table-engines/integrations/materialized-postgresql.md index aba1a370792..8c645d03e13 100644 --- a/docs/en/engines/table-engines/integrations/materialize-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialized-postgresql.md @@ -3,13 +3,13 @@ toc_priority: 12 toc_title: MateriaziePostgreSQL --- -# MaterializePostgreSQL {#materialize-postgresql} +# MaterializedPostgreSQL {#materialize-postgresql} ## Creating a Table {#creating-a-table} ``` sql CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) -ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') +ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') PRIMARY KEY key; ``` @@ -18,7 +18,7 @@ PRIMARY KEY key; - Setting `wal_level`to `logical` and `max_replication_slots` to at least `2` in the postgresql config file. -- A table with engine `MaterializePostgreSQL` must have a primary key - the same as a replica identity index (default: primary key) of a postgres table (See [details on replica identity index](../../database-engines/materialize-postgresql.md#requirements)). +- A table with engine `MaterializedPostgreSQL` must have a primary key - the same as a replica identity index (default: primary key) of a postgres table (See [details on replica identity index](../../database-engines/materialize-postgresql.md#requirements)). - Only database `Atomic` is allowed. @@ -34,8 +34,13 @@ These columns do not need to be added, when table is created. They are always ac ``` sql CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) -ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') +ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password') PRIMARY KEY key; SELECT key, value, _version FROM test.postgresql_replica; ``` + + +## WARNINGS {#warnings} + +1. **TOAST** values convertions is not supported. Default value for the data type will be used. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 69b9e8bc10f..2bf5aeaeed3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -428,7 +428,7 @@ class IColumn; M(Bool, cast_keep_nullable, false, "CAST operator keep Nullable for result data type", 0) \ M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ - M(Bool, allow_experimental_database_materialize_postgresql, false, "Allow to create database with Engine=MaterializePostgreSQL(...).", 0) \ + M(Bool, allow_experimental_database_materialized_postgresql, false, "Allow to create database with Engine=MaterializedPostgreSQL(...).", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index adfefcf7820..802d50d11c2 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -36,8 +36,8 @@ #if USE_LIBPQXX #include // Y_IGNORE -#include -#include +#include +#include #endif namespace fs = std::filesystem; @@ -100,14 +100,14 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String const UUID & uuid = create.uuid; bool engine_may_have_arguments = engine_name == "MySQL" || engine_name == "MaterializeMySQL" || engine_name == "Lazy" || - engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "MaterializePostgreSQL"; + engine_name == "Replicated" || engine_name == "PostgreSQL" || engine_name == "MaterializedPostgreSQL"; if (engine_define->engine->arguments && !engine_may_have_arguments) throw Exception("Database engine " + engine_name + " cannot have arguments", ErrorCodes::BAD_ARGUMENTS); bool has_unexpected_element = engine_define->engine->parameters || engine_define->partition_by || engine_define->primary_key || engine_define->order_by || engine_define->sample_by; - bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializePostgreSQL"; + bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializedPostgreSQL"; if (has_unexpected_element || (!may_have_settings && engine_define->settings)) throw Exception("Database engine " + engine_name + " cannot have parameters, primary_key, order_by, sample_by, settings", ErrorCodes::UNKNOWN_ELEMENT_IN_AST); @@ -263,7 +263,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String return std::make_shared( context, metadata_path, engine_define, database_name, postgres_database_name, connection_pool, use_table_cache); } - else if (engine_name == "MaterializePostgreSQL") + else if (engine_name == "MaterializedPostgreSQL") { const ASTFunction * engine = engine_define->engine; @@ -287,12 +287,12 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String auto parsed_host_port = parseAddress(host_port, 5432); auto connection_info = postgres::formatConnectionString(postgres_database_name, parsed_host_port.first, parsed_host_port.second, username, password); - auto postgresql_replica_settings = std::make_unique(); + auto postgresql_replica_settings = std::make_unique(); if (engine_define->settings) postgresql_replica_settings->loadFromQuery(*engine_define); - return std::make_shared( + return std::make_shared( context, metadata_path, uuid, engine_define, database_name, postgres_database_name, connection_info, std::move(postgresql_replica_settings)); diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp similarity index 66% rename from src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp rename to src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index f3d71d6bf67..37a464c6cda 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -1,8 +1,8 @@ -#include +#include #if USE_LIBPQXX -#include +#include #include #include @@ -31,7 +31,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( +DatabaseMaterializedPostgreSQL::DatabaseMaterializedPostgreSQL( ContextPtr context_, const String & metadata_path_, UUID uuid_, @@ -39,8 +39,8 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( const String & database_name_, const String & postgres_database_name, const postgres::ConnectionInfo & connection_info_, - std::unique_ptr settings_) - : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializePostgreSQL (" + database_name_ + ")", context_) + std::unique_ptr settings_) + : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializedPostgreSQL (" + database_name_ + ")", context_) , database_engine_define(database_engine_define_->clone()) , remote_database_name(postgres_database_name) , connection_info(connection_info_) @@ -49,7 +49,7 @@ DatabaseMaterializePostgreSQL::DatabaseMaterializePostgreSQL( } -void DatabaseMaterializePostgreSQL::startSynchronization() +void DatabaseMaterializedPostgreSQL::startSynchronization() { replication_handler = std::make_unique( /* replication_identifier */database_name, @@ -57,10 +57,10 @@ void DatabaseMaterializePostgreSQL::startSynchronization() database_name, connection_info, getContext(), - settings->materialize_postgresql_max_block_size.value, - settings->materialize_postgresql_allow_automatic_update, - /* is_materialize_postgresql_database = */ true, - settings->materialize_postgresql_tables_list.value); + settings->materialized_postgresql_max_block_size.value, + settings->materialized_postgresql_allow_automatic_update, + /* is_materialized_postgresql_database = */ true, + settings->materialized_postgresql_tables_list.value); postgres::Connection connection(connection_info); std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection.getRef()); @@ -73,19 +73,19 @@ void DatabaseMaterializePostgreSQL::startSynchronization() if (storage) { /// Nested table was already created and synchronized. - storage = StorageMaterializePostgreSQL::create(storage, getContext()); + storage = StorageMaterializedPostgreSQL::create(storage, getContext()); } else { /// Nested table does not exist and will be created by replication thread. - storage = StorageMaterializePostgreSQL::create(StorageID(database_name, table_name), getContext()); + storage = StorageMaterializedPostgreSQL::create(StorageID(database_name, table_name), getContext()); } - /// Cache MaterializePostgreSQL wrapper over nested table. + /// Cache MaterializedPostgreSQL wrapper over nested table. materialized_tables[table_name] = storage; - /// Let replication thread now, which tables it needs to keep in sync. - replication_handler->addStorage(table_name, storage->as()); + /// Let replication thread know, which tables it needs to keep in sync. + replication_handler->addStorage(table_name, storage->as()); } LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); @@ -93,7 +93,7 @@ void DatabaseMaterializePostgreSQL::startSynchronization() } -void DatabaseMaterializePostgreSQL::loadStoredObjects(ContextMutablePtr local_context, bool has_force_restore_data_flag, bool force_attach) +void DatabaseMaterializedPostgreSQL::loadStoredObjects(ContextMutablePtr local_context, bool has_force_restore_data_flag, bool force_attach) { DatabaseAtomic::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach); @@ -112,9 +112,9 @@ void DatabaseMaterializePostgreSQL::loadStoredObjects(ContextMutablePtr local_co } -StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, ContextPtr local_context) const +StoragePtr DatabaseMaterializedPostgreSQL::tryGetTable(const String & name, ContextPtr local_context) const { - /// In otder to define which table access is needed - to MaterializePostgreSQL table (only in case of SELECT queries) or + /// In otder to define which table access is needed - to MaterializedPostgreSQL table (only in case of SELECT queries) or /// to its nested ReplacingMergeTree table (in all other cases), the context of a query os modified. /// Also if materialzied_tables set is empty - it means all access is done to ReplacingMergeTree tables - it is a case after /// replication_handler was shutdown. @@ -123,14 +123,14 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte return DatabaseAtomic::tryGetTable(name, local_context); } - /// Note: In select query we call MaterializePostgreSQL table and it calls tryGetTable from its nested. - /// So the only point, where synchronization is needed - access to MaterializePostgreSQL table wrapper over nested table. + /// Note: In select query we call MaterializedPostgreSQL table and it calls tryGetTable from its nested. + /// So the only point, where synchronization is needed - access to MaterializedPostgreSQL table wrapper over nested table. std::lock_guard lock(tables_mutex); auto table = materialized_tables.find(name); /// Return wrapper over ReplacingMergeTree table. If table synchronization just started, table will not /// be accessible immediately. Table is considered to exist once its nested table was created. - if (table != materialized_tables.end() && table->second->as ()->hasNested()) + if (table != materialized_tables.end() && table->second->as ()->hasNested()) { return table->second; } @@ -139,7 +139,7 @@ StoragePtr DatabaseMaterializePostgreSQL::tryGetTable(const String & name, Conte } -void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializedPostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & table, const ASTPtr & query) { /// Create table query can only be called from replication thread. if (local_context->isInternalQuery()) @@ -153,7 +153,7 @@ void DatabaseMaterializePostgreSQL::createTable(ContextPtr local_context, const } -void DatabaseMaterializePostgreSQL::stopReplication() +void DatabaseMaterializedPostgreSQL::stopReplication() { if (replication_handler) replication_handler->shutdown(); @@ -163,27 +163,27 @@ void DatabaseMaterializePostgreSQL::stopReplication() } -void DatabaseMaterializePostgreSQL::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) +void DatabaseMaterializedPostgreSQL::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) { /// Modify context into nested_context and pass query to Atomic database. - DatabaseAtomic::dropTable(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), table_name, no_delay); + DatabaseAtomic::dropTable(StorageMaterializedPostgreSQL::makeNestedTableContext(local_context), table_name, no_delay); } -void DatabaseMaterializePostgreSQL::drop(ContextPtr local_context) +void DatabaseMaterializedPostgreSQL::drop(ContextPtr local_context) { if (replication_handler) replication_handler->shutdownFinal(); - DatabaseAtomic::drop(StorageMaterializePostgreSQL::makeNestedTableContext(local_context)); + DatabaseAtomic::drop(StorageMaterializedPostgreSQL::makeNestedTableContext(local_context)); } -DatabaseTablesIteratorPtr DatabaseMaterializePostgreSQL::getTablesIterator( +DatabaseTablesIteratorPtr DatabaseMaterializedPostgreSQL::getTablesIterator( ContextPtr local_context, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) { /// Modify context into nested_context and pass query to Atomic database. - return DatabaseAtomic::getTablesIterator(StorageMaterializePostgreSQL::makeNestedTableContext(local_context), filter_by_table_name); + return DatabaseAtomic::getTablesIterator(StorageMaterializedPostgreSQL::makeNestedTableContext(local_context), filter_by_table_name); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h similarity index 83% rename from src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h rename to src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h index aff2db4499a..0a60f47cbe4 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h @@ -7,7 +7,7 @@ #if USE_LIBPQXX #include -#include +#include #include #include @@ -24,11 +24,11 @@ class PostgreSQLConnection; using PostgreSQLConnectionPtr = std::shared_ptr; -class DatabaseMaterializePostgreSQL : public DatabaseAtomic +class DatabaseMaterializedPostgreSQL : public DatabaseAtomic { public: - DatabaseMaterializePostgreSQL( + DatabaseMaterializedPostgreSQL( ContextPtr context_, const String & metadata_path_, UUID uuid_, @@ -36,9 +36,9 @@ public: const String & database_name_, const String & postgres_database_name, const postgres::ConnectionInfo & connection_info, - std::unique_ptr settings_); + std::unique_ptr settings_); - String getEngineName() const override { return "MaterializePostgreSQL"; } + String getEngineName() const override { return "MaterializedPostgreSQL"; } String getMetadataPath() const override { return metadata_path; } @@ -63,7 +63,7 @@ private: ASTPtr database_engine_define; String remote_database_name; postgres::ConnectionInfo connection_info; - std::unique_ptr settings; + std::unique_ptr settings; std::shared_ptr replication_handler; std::map materialized_tables; diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 33c80ccee38..7ebc7b95ea9 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -151,7 +151,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", serializeAST(*create.storage)); } - if (create.storage->engine->name == "Atomic" || create.storage->engine->name == "Replicated" || create.storage->engine->name == "MaterializePostgreSQL") + if (create.storage->engine->name == "Atomic" || create.storage->engine->name == "Replicated" || create.storage->engine->name == "MaterializedPostgreSQL") { if (create.attach && create.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. " @@ -217,9 +217,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) "Enable allow_experimental_database_replicated to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } - if (create.storage->engine->name == "MaterializePostgreSQL" && !getContext()->getSettingsRef().allow_experimental_database_materialize_postgresql && !internal) + if (create.storage->engine->name == "MaterializedPostgreSQL" && !getContext()->getSettingsRef().allow_experimental_database_materialized_postgresql && !internal) { - throw Exception("MaterializePostgreSQL is an experimental database engine. " + throw Exception("MaterializedPostgreSQL is an experimental database engine. " "Enable allow_experimental_database_postgresql_replica to use it.", ErrorCodes::UNKNOWN_DATABASE_ENGINE); } diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 9e4fb44f9a2..94d5fbf3ea7 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -21,7 +21,7 @@ #endif #if USE_LIBPQXX -# include +# include #endif namespace DB @@ -321,7 +321,7 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, if (auto * replicated = typeid_cast(database.get())) replicated->stopReplication(); #if USE_LIBPQXX - if (auto * materialize_postgresql = typeid_cast(database.get())) + if (auto * materialize_postgresql = typeid_cast(database.get())) materialize_postgresql->stopReplication(); #endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h b/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h deleted file mode 100644 index 8875c45f9fa..00000000000 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#if !defined(ARCADIA_BUILD) -#include "config_core.h" -#endif - -#if USE_LIBPQXX -#include - - -namespace DB -{ - class ASTStorage; - - -#define LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS(M) \ - M(UInt64, materialize_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ - M(String, materialize_postgresql_tables_list, "", "List of tables for MaterializePostgreSQL database engine", 0) \ - M(Bool, materialize_postgresql_allow_automatic_update, 0, "Allow to reload table in the background, when schema changes are detected", 0) \ - -DECLARE_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) - -struct MaterializePostgreSQLSettings : public BaseSettings -{ - void loadFromQuery(ASTStorage & storage_def); -}; - -} - -#endif diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp similarity index 91% rename from src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp rename to src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index b1325d9ca57..390ad996e4e 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -1,6 +1,6 @@ -#include "MaterializePostgreSQLConsumer.h" +#include "MaterializedPostgreSQLConsumer.h" -#include "StorageMaterializePostgreSQL.h" +#include "StorageMaterializedPostgreSQL.h" #include #include #include @@ -16,10 +16,9 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int UNKNOWN_TABLE; } -MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( +MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer( ContextPtr context_, std::shared_ptr connection_, const std::string & replication_slot_name_, @@ -46,7 +45,7 @@ MaterializePostgreSQLConsumer::MaterializePostgreSQLConsumer( } -void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage) +void MaterializedPostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage) { const auto storage_metadata = storage->getInMemoryMetadataPtr(); const Block sample_block = storage_metadata->getSampleBlock(); @@ -60,7 +59,7 @@ void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage auto insert_columns = std::make_shared(); auto table_id = storage->getStorageID(); - LOG_TRACE(&Poco::Logger::get("MaterializePostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}", + LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}", table_id.database_name, table_id.table_name, toString(table_id.uuid), sample_block.dumpStructure()); assert(description.sample_block.columns() == storage_columns.size()); @@ -79,7 +78,7 @@ void MaterializePostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage } -void MaterializePostgreSQLConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) +void MaterializedPostgreSQLConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); bool is_nullable = buffer.description.types[column_idx].second; @@ -105,14 +104,14 @@ void MaterializePostgreSQLConsumer::insertValue(Buffer & buffer, const std::stri } -void MaterializePostgreSQLConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx) +void MaterializedPostgreSQLConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx) { const auto & sample = buffer.description.sample_block.getByPosition(column_idx); insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column); } -void MaterializePostgreSQLConsumer::readString(const char * message, size_t & pos, size_t size, String & result) +void MaterializedPostgreSQLConsumer::readString(const char * message, size_t & pos, size_t size, String & result) { assert(size > pos + 2); char current = unhex2(message + pos); @@ -127,7 +126,7 @@ void MaterializePostgreSQLConsumer::readString(const char * message, size_t & po template -T MaterializePostgreSQLConsumer::unhexN(const char * message, size_t pos, size_t n) +T MaterializedPostgreSQLConsumer::unhexN(const char * message, size_t pos, size_t n) { T result = 0; for (size_t i = 0; i < n; ++i) @@ -139,7 +138,7 @@ T MaterializePostgreSQLConsumer::unhexN(const char * message, size_t pos, size_t } -Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int64 MaterializedPostgreSQLConsumer::readInt64(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size >= pos + 16); Int64 result = unhexN(message, pos, 8); @@ -148,7 +147,7 @@ Int64 MaterializePostgreSQLConsumer::readInt64(const char * message, size_t & po } -Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int32 MaterializedPostgreSQLConsumer::readInt32(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size >= pos + 8); Int32 result = unhexN(message, pos, 4); @@ -157,7 +156,7 @@ Int32 MaterializePostgreSQLConsumer::readInt32(const char * message, size_t & po } -Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int16 MaterializedPostgreSQLConsumer::readInt16(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size >= pos + 4); Int16 result = unhexN(message, pos, 2); @@ -166,7 +165,7 @@ Int16 MaterializePostgreSQLConsumer::readInt16(const char * message, size_t & po } -Int8 MaterializePostgreSQLConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) +Int8 MaterializedPostgreSQLConsumer::readInt8(const char * message, size_t & pos, [[maybe_unused]] size_t size) { assert(size >= pos + 2); Int8 result = unhex2(message + pos); @@ -175,7 +174,7 @@ Int8 MaterializePostgreSQLConsumer::readInt8(const char * message, size_t & pos, } -void MaterializePostgreSQLConsumer::readTupleData( +void MaterializedPostgreSQLConsumer::readTupleData( Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value) { Int16 num_columns = readInt16(message, pos, size); @@ -247,7 +246,7 @@ void MaterializePostgreSQLConsumer::readTupleData( /// https://www.postgresql.org/docs/13/protocol-logicalrep-message-formats.html -void MaterializePostgreSQLConsumer::processReplicationMessage(const char * replication_message, size_t size) +void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * replication_message, size_t size) { /// Skip '\x' size_t pos = 2; @@ -456,7 +455,7 @@ void MaterializePostgreSQLConsumer::processReplicationMessage(const char * repli } -void MaterializePostgreSQLConsumer::syncTables(std::shared_ptr tx) +void MaterializedPostgreSQLConsumer::syncTables(std::shared_ptr tx) { try { @@ -500,7 +499,7 @@ void MaterializePostgreSQLConsumer::syncTables(std::shared_ptr tx) +String MaterializedPostgreSQLConsumer::advanceLSN(std::shared_ptr tx) { std::string query_str = fmt::format("SELECT end_lsn FROM pg_replication_slot_advance('{}', '{}')", replication_slot_name, final_lsn); pqxx::result result{tx->exec(query_str)}; @@ -516,7 +515,7 @@ String MaterializePostgreSQLConsumer::advanceLSN(std::shared_ptr tx; bool slot_empty = true; @@ -626,7 +625,7 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() } catch (const pqxx::conversion_error & e) { - LOG_ERROR(log, "Convertion error: {}", e.what()); + LOG_ERROR(log, "Conversion error: {}", e.what()); return false; } catch (const pqxx::broken_connection & e) @@ -662,7 +661,7 @@ bool MaterializePostgreSQLConsumer::readFromReplicationSlot() } -bool MaterializePostgreSQLConsumer::consume(std::vector> & skipped_tables) +bool MaterializedPostgreSQLConsumer::consume(std::vector> & skipped_tables) { /// Check if there are tables, which are skipped from being updated by changes from replication stream, /// because schema changes were detected. Update them, if it is allowed. @@ -687,7 +686,7 @@ bool MaterializePostgreSQLConsumer::consume(std::vector } -void MaterializePostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn) +void MaterializedPostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn) { /// Cache new pointer to replacingMergeTree table. storages[table_name] = nested_storage; diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h similarity index 98% rename from src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h rename to src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h index 00523ff0ea9..59feb60a21a 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h @@ -14,12 +14,12 @@ namespace DB { -class MaterializePostgreSQLConsumer +class MaterializedPostgreSQLConsumer { public: using Storages = std::unordered_map; - MaterializePostgreSQLConsumer( + MaterializedPostgreSQLConsumer( ContextPtr context_, std::shared_ptr connection_, const String & replication_slot_name_, diff --git a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.cpp similarity index 77% rename from src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp rename to src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.cpp index 2682bd6194f..ef9ca78d984 100644 --- a/src/Storages/PostgreSQL/MaterializePostgreSQLSettings.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.cpp @@ -1,4 +1,4 @@ -#include "MaterializePostgreSQLSettings.h" +#include "MaterializedPostgreSQLSettings.h" #if USE_LIBPQXX #include @@ -15,9 +15,9 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -IMPLEMENT_SETTINGS_TRAITS(MaterializePostgreSQLSettingsTraits, LIST_OF_MATERIALIZE_POSTGRESQL_SETTINGS) +IMPLEMENT_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS) -void MaterializePostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) +void MaterializedPostgreSQLSettings::loadFromQuery(ASTStorage & storage_def) { if (storage_def.settings) { diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h new file mode 100644 index 00000000000..3bc32a21876 --- /dev/null +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLSettings.h @@ -0,0 +1,30 @@ +#pragma once + +#if !defined(ARCADIA_BUILD) +#include "config_core.h" +#endif + +#if USE_LIBPQXX +#include + + +namespace DB +{ + class ASTStorage; + + +#define LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS(M) \ + M(UInt64, materialized_postgresql_max_block_size, 65536, "Number of row collected before flushing data into table.", 0) \ + M(String, materialized_postgresql_tables_list, "", "List of tables for MaterializedPostgreSQL database engine", 0) \ + M(Bool, materialized_postgresql_allow_automatic_update, 0, "Allow to reload table in the background, when schema changes are detected", 0) \ + +DECLARE_SETTINGS_TRAITS(MaterializedPostgreSQLSettingsTraits, LIST_OF_MATERIALIZED_POSTGRESQL_SETTINGS) + +struct MaterializedPostgreSQLSettings : public BaseSettings +{ + void loadFromQuery(ASTStorage & storage_def); +}; + +} + +#endif diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 46fedb99b62..74e3a2fb965 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include @@ -15,7 +15,7 @@ namespace DB { static const auto RESCHEDULE_MS = 500; -static const auto BACKOFF_TRESHOLD = 5000; +static const auto BACKOFF_TRESHOLD_MS = 10000; namespace ErrorCodes { @@ -30,7 +30,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( ContextPtr context_, const size_t max_block_size_, bool allow_automatic_update_, - bool is_materialize_postgresql_database_, + bool is_materialized_postgresql_database_, const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) @@ -39,7 +39,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( , connection_info(connection_info_) , max_block_size(max_block_size_) , allow_automatic_update(allow_automatic_update_) - , is_materialize_postgresql_database(is_materialize_postgresql_database_) + , is_materialized_postgresql_database(is_materialized_postgresql_database_) , tables_list(tables_list_) , connection(std::make_shared(connection_info_)) , milliseconds_to_wait(RESCHEDULE_MS) @@ -52,7 +52,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } -void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage) +void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, StorageMaterializedPostgreSQL * storage) { materialized_storages[table_name] = storage; } @@ -122,14 +122,14 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { try { - nested_storages[table_name] = loadFromSnapshot(snapshot_name, table_name, storage->as ()); + nested_storages[table_name] = loadFromSnapshot(snapshot_name, table_name, storage->as ()); } catch (Exception & e) { e.addMessage("while loading table {}.{}", remote_database_name, table_name); tryLogCurrentException(__PRETTY_FUNCTION__); - /// Throw in case of single MaterializePostgreSQL storage, because initial setup is done immediately + /// Throw in case of single MaterializedPostgreSQL storage, because initial setup is done immediately /// (unlike database engine where it is done in a separate thread). if (throw_on_error) throw; @@ -137,7 +137,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) } }; - /// There is one replication slot for each replication handler. In case of MaterializePostgreSQL database engine, + /// There is one replication slot for each replication handler. In case of MaterializedPostgreSQL database engine, /// there is one replication slot per database. Its lifetime must be equal to the lifetime of replication handler. /// Recreation of a replication slot imposes reloading of all tables. if (!isReplicationSlotExist(tx, start_lsn, /* temporary */false)) @@ -159,7 +159,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) LOG_TRACE(log, "Loading {} tables...", materialized_storages.size()); for (const auto & [table_name, storage] : materialized_storages) { - auto * materialized_storage = storage->as (); + auto * materialized_storage = storage->as (); try { /// Try load nested table, set materialized table metadata. @@ -181,7 +181,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// Pass current connection to consumer. It is not std::moved implicitly, but a shared_ptr is passed. /// Consumer and replication handler are always executed one after another (not concurrently) and share the same connection. /// Handler uses it only for loadFromSnapshot and shutdown methods. - consumer = std::make_shared( + consumer = std::make_shared( context, connection, replication_slot, @@ -199,7 +199,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(String & snapshot_name, const String & table_name, - StorageMaterializePostgreSQL * materialized_storage) + StorageMaterializedPostgreSQL * materialized_storage) { auto tx = std::make_shared(connection->getRef()); @@ -270,7 +270,7 @@ void PostgreSQLReplicationHandler::consumerFunc() else { consumer_task->scheduleAfter(milliseconds_to_wait); - if (milliseconds_to_wait < BACKOFF_TRESHOLD) + if (milliseconds_to_wait < BACKOFF_TRESHOLD_MS) milliseconds_to_wait *= 2; LOG_TRACE(log, "Scheduling replication thread: after {} ms", milliseconds_to_wait); @@ -432,7 +432,7 @@ void PostgreSQLReplicationHandler::shutdownFinal() } -/// Used by MaterializePostgreSQL database engine. +/// Used by MaterializedPostgreSQL database engine. NameSet PostgreSQLReplicationHandler::fetchRequiredTables(pqxx::connection & connection_) { pqxx::work tx(connection_); @@ -474,7 +474,7 @@ NameSet PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::work & tx PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( pqxx::ReplicationTransaction & tx, const std::string & table_name) const { - if (!is_materialize_postgresql_database) + if (!is_materialized_postgresql_database) return nullptr; return std::make_unique(fetchPostgreSQLTableStructure(tx, table_name, true, true, true)); @@ -486,81 +486,92 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras (); + postgres::Connection replication_connection(connection_info, /* replication */true); + pqxx::nontransaction tx(replication_connection.getRef()); - auto temp_materialized_storage = materialized_storage->createTemporary(); + String snapshot_name, start_lsn; - /// This snapshot is valid up to the end of the transaction, which exported it. - StoragePtr temp_nested_storage = loadFromSnapshot(snapshot_name, table_name, temp_materialized_storage->as ()); + if (isReplicationSlotExist(tx, start_lsn, /* temporary */true)) + dropReplicationSlot(tx, /* temporary */true); - auto table_id = materialized_storage->getNestedStorageID(); - auto temp_table_id = temp_nested_storage->getStorageID(); + createReplicationSlot(tx, start_lsn, snapshot_name, /* temporary */true); - LOG_TRACE(log, "Starting background update of table {}.{} ({}) with table {}.{} ({})", - table_id.database_name, table_id.table_name, toString(table_id.uuid), - temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); - - auto ast_rename = std::make_shared(); - ASTRenameQuery::Element elem + for (const auto & [relation_id, table_name] : relation_data) { - ASTRenameQuery::Table{table_id.database_name, table_id.table_name}, - ASTRenameQuery::Table{temp_table_id.database_name, temp_table_id.table_name} - }; - ast_rename->elements.push_back(std::move(elem)); - ast_rename->exchange = true; + auto storage = DatabaseCatalog::instance().getTable(StorageID(current_database_name, table_name), context); + auto * materialized_storage = storage->as (); - auto nested_context = materialized_storage->getNestedTableContext(); + /// If for some reason this temporary table already exists - also drop it. + auto temp_materialized_storage = materialized_storage->createTemporary(); - try - { - auto materialized_table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); - InterpreterRenameQuery(ast_rename, nested_context).execute(); + /// This snapshot is valid up to the end of the transaction, which exported it. + StoragePtr temp_nested_storage = loadFromSnapshot(snapshot_name, table_name, + temp_materialized_storage->as ()); + auto table_id = materialized_storage->getNestedStorageID(); + auto temp_table_id = temp_nested_storage->getStorageID(); + + LOG_TRACE(log, "Starting background update of table {} with table {}", + table_id.getNameForLogs(), temp_table_id.getNameForLogs()); + + auto ast_rename = std::make_shared(); + ASTRenameQuery::Element elem { - auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), nested_context); - auto nested_table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); - auto nested_table_id = nested_storage->getStorageID(); + ASTRenameQuery::Table{table_id.database_name, table_id.table_name}, + ASTRenameQuery::Table{temp_table_id.database_name, temp_table_id.table_name} + }; + ast_rename->elements.push_back(std::move(elem)); + ast_rename->exchange = true; - materialized_storage->setNestedStorageID(nested_table_id); - nested_storage = materialized_storage->prepare(); + auto nested_context = materialized_storage->getNestedTableContext(); - auto nested_storage_metadata = nested_storage->getInMemoryMetadataPtr(); - auto nested_sample_block = nested_storage_metadata->getSampleBlock(); - LOG_TRACE(log, "Updated table {}.{} ({}). New structure: {}", - nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid), nested_sample_block.dumpStructure()); + try + { + auto materialized_table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + InterpreterRenameQuery(ast_rename, nested_context).execute(); - auto materialized_storage_metadata = nested_storage->getInMemoryMetadataPtr(); - auto materialized_sample_block = materialized_storage_metadata->getSampleBlock(); + { + auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, table_id.table_name), + nested_context); + auto nested_table_lock = nested_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + auto nested_table_id = nested_storage->getStorageID(); - assertBlocksHaveEqualStructure(nested_sample_block, materialized_sample_block, "while reloading table in the background"); + materialized_storage->setNestedStorageID(nested_table_id); + nested_storage = materialized_storage->prepare(); - /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. - consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); + auto nested_storage_metadata = nested_storage->getInMemoryMetadataPtr(); + auto nested_sample_block = nested_storage_metadata->getSampleBlock(); + LOG_TRACE(log, "Updated table {}. New structure: {}", + nested_table_id.getNameForLogs(), nested_sample_block.dumpStructure()); + + auto materialized_storage_metadata = nested_storage->getInMemoryMetadataPtr(); + auto materialized_sample_block = materialized_storage_metadata->getSampleBlock(); + + assertBlocksHaveEqualStructure(nested_sample_block, materialized_sample_block, "while reloading table in the background"); + + /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. + consumer->updateNested(table_name, nested_storage, relation_id, start_lsn); + } + + LOG_DEBUG(log, "Dropping table {}", temp_table_id.getNameForLogs()); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, temp_table_id, true); } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } - LOG_DEBUG(log, "Dropping table {}.{} ({})", temp_table_id.database_name, temp_table_id.table_name, toString(temp_table_id.uuid)); - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, nested_context, nested_context, temp_table_id, true); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } + dropReplicationSlot(tx, /* temporary */true); + tx.commit(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); } - - dropReplicationSlot(tx, /* temporary */true); - tx.commit(); } } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 6c919389392..6ae9ec31626 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -1,6 +1,6 @@ #pragma once -#include "MaterializePostgreSQLConsumer.h" +#include "MaterializedPostgreSQLConsumer.h" #include #include @@ -13,7 +13,7 @@ namespace DB /// exist in CH, it can be loaded via snapshot while stream is stopped and then comparing wal positions with /// current lsn and table start lsn. -class StorageMaterializePostgreSQL; +class StorageMaterializedPostgreSQL; class PostgreSQLReplicationHandler { @@ -26,7 +26,7 @@ public: ContextPtr context_, const size_t max_block_size_, bool allow_automatic_update_, - bool is_materialize_postgresql_database_, + bool is_materialized_postgresql_database_, const String tables_list = ""); /// Activate task to be run from a separate thread: wait until connection is available and call startReplication(). @@ -39,7 +39,7 @@ public: void shutdownFinal(); /// Add storage pointer to let handler know which tables it needs to keep in sync. - void addStorage(const std::string & table_name, StorageMaterializePostgreSQL * storage); + void addStorage(const std::string & table_name, StorageMaterializedPostgreSQL * storage); /// Fetch list of tables which are going to be replicated. Used for database engine. NameSet fetchRequiredTables(pqxx::connection & connection_); @@ -48,7 +48,7 @@ public: void startSynchronization(bool throw_on_error); private: - using MaterializedStorages = std::unordered_map; + using MaterializedStorages = std::unordered_map; /// Methods to manage Publication. @@ -74,7 +74,7 @@ private: void consumerFunc(); - StoragePtr loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializePostgreSQL * materialized_storage); + StoragePtr loadFromSnapshot(std::string & snapshot_name, const String & table_name, StorageMaterializedPostgreSQL * materialized_storage); void reloadFromSnapshot(const std::vector> & relation_data); @@ -95,8 +95,8 @@ private: /// This setting allows to reloas table in the background. bool allow_automatic_update = false; - /// To distinguish whether current replication handler belongs to a MaterializePostgreSQL database engine or single storage. - bool is_materialize_postgresql_database; + /// To distinguish whether current replication handler belongs to a MaterializedPostgreSQL database engine or single storage. + bool is_materialized_postgresql_database; /// A coma-separated list of tables, which are going to be replicated for database engine. By default, a whole database is replicated. String tables_list; @@ -107,7 +107,7 @@ private: std::shared_ptr connection; /// Replication consumer. Manages decoding of replication stream and syncing into tables. - std::shared_ptr consumer; + std::shared_ptr consumer; BackgroundSchedulePool::TaskHolder startup_task, consumer_task; @@ -118,7 +118,7 @@ private: /// 2. at replication startup bool new_publication_created = false; - /// MaterializePostgreSQL tables. Used for managing all operations with its internal nested tables. + /// MaterializedPostgreSQL tables. Used for managing all operations with its internal nested tables. MaterializedStorages materialized_storages; UInt64 milliseconds_to_wait; diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp similarity index 78% rename from src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp rename to src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index 07d13ace7c2..52fe7be35a7 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -1,4 +1,4 @@ -#include "StorageMaterializePostgreSQL.h" +#include "StorageMaterializedPostgreSQL.h" #if USE_LIBPQXX #include @@ -40,7 +40,7 @@ static const auto TMP_SUFFIX = "_tmp"; /// For the case of single storage. -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( +StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( const StorageID & table_id_, bool is_attach_, const String & remote_database_name, @@ -48,10 +48,10 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, ContextPtr context_, - std::unique_ptr replication_settings) + std::unique_ptr replication_settings) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , is_materialize_postgresql_database(false) + , is_materialized_postgresql_database(false) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) , nested_table_id(StorageID(table_id_.database_name, getNestedTableName())) @@ -59,7 +59,7 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( , is_attach(is_attach_) { if (table_id_.uuid == UUIDHelpers::Nil) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializePostgreSQL is allowed only for Atomic database"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MaterializedPostgreSQL is allowed only for Atomic database"); setInMemoryMetadata(storage_metadata); @@ -70,31 +70,31 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL( table_id_.database_name, connection_info, getContext(), - replication_settings->materialize_postgresql_max_block_size.value, - /* allow_automatic_update */ false, /* is_materialize_postgresql_database */false); + replication_settings->materialized_postgresql_max_block_size.value, + /* allow_automatic_update */ false, /* is_materialized_postgresql_database */false); } /// For the case of MaterializePosgreSQL database engine. /// It is used when nested ReplacingMergeeTree table has not yet be created by replication thread. /// In this case this storage can't be used for read queries. -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(const StorageID & table_id_, ContextPtr context_) +StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(const StorageID & table_id_, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , is_materialize_postgresql_database(true) + , is_materialized_postgresql_database(true) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) { } -/// Constructor for MaterializePostgreSQL table engine - for the case of MaterializePosgreSQL database engine. +/// Constructor for MaterializedPostgreSQL table engine - for the case of MaterializePosgreSQL database engine. /// It is used when nested ReplacingMergeeTree table has already been created by replication thread. /// This storage is ready to handle read queries. -StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(StoragePtr nested_storage_, ContextPtr context_) +StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL(StoragePtr nested_storage_, ContextPtr context_) : IStorage(nested_storage_->getStorageID()) , WithContext(context_->getGlobalContext()) - , is_materialize_postgresql_database(true) + , is_materialized_postgresql_database(true) , has_nested(true) , nested_context(makeNestedTableContext(context_->getGlobalContext())) , nested_table_id(nested_storage_->getStorageID()) @@ -105,72 +105,82 @@ StorageMaterializePostgreSQL::StorageMaterializePostgreSQL(StoragePtr nested_sto /// A temporary clone table might be created for current table in order to update its schema and reload /// all data in the background while current table will still handle read requests. -StoragePtr StorageMaterializePostgreSQL::createTemporary() const +StoragePtr StorageMaterializedPostgreSQL::createTemporary() const { auto table_id = getStorageID(); - auto new_context = Context::createCopy(context); + auto tmp_table_id = StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX); - return StorageMaterializePostgreSQL::create(StorageID(table_id.database_name, table_id.table_name + TMP_SUFFIX), new_context); + /// If for some reason it already exists - drop it. + auto tmp_storage = DatabaseCatalog::instance().tryGetTable(tmp_table_id, nested_context); + if (tmp_storage) + { + LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs()); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), getContext(), tmp_table_id, /* no delay */true); + } + + auto new_context = Context::createCopy(context); + return StorageMaterializedPostgreSQL::create(tmp_table_id, new_context); } -StoragePtr StorageMaterializePostgreSQL::getNested() const +StoragePtr StorageMaterializedPostgreSQL::getNested() const { return DatabaseCatalog::instance().getTable(getNestedStorageID(), nested_context); } -StoragePtr StorageMaterializePostgreSQL::tryGetNested() const +StoragePtr StorageMaterializedPostgreSQL::tryGetNested() const { return DatabaseCatalog::instance().tryGetTable(getNestedStorageID(), nested_context); } -String StorageMaterializePostgreSQL::getNestedTableName() const +String StorageMaterializedPostgreSQL::getNestedTableName() const { auto table_id = getStorageID(); - if (is_materialize_postgresql_database) + if (is_materialized_postgresql_database) return table_id.table_name; return toString(table_id.uuid) + NESTED_TABLE_SUFFIX; } -StorageID StorageMaterializePostgreSQL::getNestedStorageID() const +StorageID StorageMaterializedPostgreSQL::getNestedStorageID() const { if (nested_table_id.has_value()) return nested_table_id.value(); auto table_id = getStorageID(); throw Exception(ErrorCodes::LOGICAL_ERROR, - "No storageID found for inner table. ({}.{}, {})", table_id.database_name, table_id.table_name, toString(table_id.uuid)); + "No storageID found for inner table. ({})", table_id.getNameForLogs()); } -void StorageMaterializePostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) +void StorageMaterializedPostgreSQL::createNestedIfNeeded(PostgreSQLTableStructurePtr table_structure) { const auto ast_create = getCreateNestedTableQuery(std::move(table_structure)); + auto table_id = getStorageID(); + auto tmp_nested_table_id = StorageID(table_id.database_name, getNestedTableName()); try { InterpreterCreateQuery interpreter(ast_create, nested_context); interpreter.execute(); - auto table_id = getStorageID(); - auto nested_storage = DatabaseCatalog::instance().getTable(StorageID(table_id.database_name, getNestedTableName()), nested_context); - + auto nested_storage = DatabaseCatalog::instance().getTable(tmp_nested_table_id, nested_context); /// Save storage_id with correct uuid. nested_table_id = nested_storage->getStorageID(); } - catch (...) + catch (Exception & e) { + e.addMessage("while creating nested table: {}", tmp_nested_table_id.getNameForLogs()); tryLogCurrentException(__PRETTY_FUNCTION__); } } -std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(ContextPtr from_context) +std::shared_ptr StorageMaterializedPostgreSQL::makeNestedTableContext(ContextPtr from_context) { auto new_context = Context::createCopy(from_context); new_context->setInternalQuery(true); @@ -178,7 +188,7 @@ std::shared_ptr StorageMaterializePostgreSQL::makeNestedTableContext(Co } -StoragePtr StorageMaterializePostgreSQL::prepare() +StoragePtr StorageMaterializedPostgreSQL::prepare() { auto nested_table = getNested(); setInMemoryMetadata(nested_table->getInMemoryMetadata()); @@ -187,9 +197,10 @@ StoragePtr StorageMaterializePostgreSQL::prepare() } -void StorageMaterializePostgreSQL::startup() +void StorageMaterializedPostgreSQL::startup() { - if (!is_materialize_postgresql_database) + /// replication_handler != nullptr only in case of single table engine MaterializedPostgreSQL. + if (replication_handler) { replication_handler->addStorage(remote_table_name, this); @@ -202,7 +213,7 @@ void StorageMaterializePostgreSQL::startup() else { /// Start synchronization preliminary setup immediately and throw in case of failure. - /// It should be guaranteed that if MaterializePostgreSQL table was created successfully, then + /// It should be guaranteed that if MaterializedPostgreSQL table was created successfully, then /// its nested table was also created. replication_handler->startSynchronization(/* throw_on_error */ true); } @@ -210,25 +221,29 @@ void StorageMaterializePostgreSQL::startup() } -void StorageMaterializePostgreSQL::shutdown() +void StorageMaterializedPostgreSQL::shutdown() { if (replication_handler) replication_handler->shutdown(); } -void StorageMaterializePostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) +void StorageMaterializedPostgreSQL::dropInnerTableIfAny(bool no_delay, ContextPtr local_context) { - if (replication_handler) - replication_handler->shutdownFinal(); + /// If it is a table with database engine MaterializedPostgreSQL - return, becuase delition of + /// internal tables is managed there. + if (is_materialized_postgresql_database) + return; + + replication_handler->shutdownFinal(); auto nested_table = getNested(); - if (nested_table && !is_materialize_postgresql_database) + if (nested_table) InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, getNestedStorageID(), no_delay); } -NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const +NamesAndTypesList StorageMaterializedPostgreSQL::getVirtuals() const { return NamesAndTypesList{ {"_sign", std::make_shared()}, @@ -237,7 +252,7 @@ NamesAndTypesList StorageMaterializePostgreSQL::getVirtuals() const } -Pipe StorageMaterializePostgreSQL::read( +Pipe StorageMaterializedPostgreSQL::read( const Names & column_names, const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info, @@ -253,7 +268,7 @@ Pipe StorageMaterializePostgreSQL::read( } -std::shared_ptr StorageMaterializePostgreSQL::getMaterializedColumnsDeclaration( +std::shared_ptr StorageMaterializedPostgreSQL::getMaterializedColumnsDeclaration( const String name, const String type, UInt64 default_value) { auto column_declaration = std::make_shared(); @@ -271,7 +286,7 @@ std::shared_ptr StorageMaterializePostgreSQL::getMateriali } -ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) const +ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) const { WhichDataType which(data_type); @@ -312,17 +327,17 @@ ASTPtr StorageMaterializePostgreSQL::getColumnDeclaration(const DataTypePtr & da } -/// For single storage MaterializePostgreSQL get columns and primary key columns from storage definition. -/// For database engine MaterializePostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same +/// For single storage MaterializedPostgreSQL get columns and primary key columns from storage definition. +/// For database engine MaterializedPostgreSQL get columns and primary key columns by fetching from PostgreSQL, also using the same /// transaction with snapshot, which is used for initial tables dump. -ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableStructurePtr table_structure) +ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(PostgreSQLTableStructurePtr table_structure) { auto create_table_query = std::make_shared(); auto table_id = getStorageID(); create_table_query->table = getNestedTableName(); create_table_query->database = table_id.database_name; - if (is_materialize_postgresql_database) + if (is_materialized_postgresql_database) create_table_query->uuid = table_id.uuid; auto columns_declare_list = std::make_shared(); @@ -333,7 +348,7 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt const auto & columns = metadata_snapshot->getColumns(); NamesAndTypesList ordinary_columns_and_types; - if (!is_materialize_postgresql_database) + if (!is_materialized_postgresql_database) { ordinary_columns_and_types = columns.getOrdinary(); } @@ -416,19 +431,19 @@ ASTPtr StorageMaterializePostgreSQL::getCreateNestedTableQuery(PostgreSQLTableSt } -void registerStorageMaterializePostgreSQL(StorageFactory & factory) +void registerStorageMaterializedPostgreSQL(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; bool has_settings = args.storage_def->settings; - auto postgresql_replication_settings = std::make_unique(); + auto postgresql_replication_settings = std::make_unique(); if (has_settings) postgresql_replication_settings->loadFromQuery(*args.storage_def); if (engine_args.size() != 5) - throw Exception("Storage MaterializePostgreSQL requires 5 parameters: " + throw Exception("Storage MaterializedPostgreSQL requires 5 parameters: " "PostgreSQL('host:port', 'database', 'table', 'username', 'password'", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); @@ -443,7 +458,7 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) args.storage_def->set(args.storage_def->order_by, args.storage_def->primary_key->clone()); if (!args.storage_def->order_by) - throw Exception("Storage MaterializePostgreSQL needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); + throw Exception("Storage MaterializedPostgreSQL needs order by key or primary key", ErrorCodes::BAD_ARGUMENTS); if (args.storage_def->primary_key) metadata.primary_key = KeyDescription::getKeyFromAST(args.storage_def->primary_key->ptr(), metadata.columns, args.getContext()); @@ -462,14 +477,14 @@ void registerStorageMaterializePostgreSQL(StorageFactory & factory) engine_args[3]->as().value.safeGet(), engine_args[4]->as().value.safeGet()); - return StorageMaterializePostgreSQL::create( + return StorageMaterializedPostgreSQL::create( args.table_id, args.attach, remote_database, remote_table, connection_info, metadata, args.getContext(), std::move(postgresql_replication_settings)); }; factory.registerStorage( - "MaterializePostgreSQL", + "MaterializedPostgreSQL", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, diff --git a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h similarity index 73% rename from src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h rename to src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h index 622f40b14ad..5d18a0b16b7 100644 --- a/src/Storages/PostgreSQL/StorageMaterializePostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h @@ -6,7 +6,7 @@ #if USE_LIBPQXX #include "PostgreSQLReplicationHandler.h" -#include "MaterializePostgreSQLSettings.h" +#include "MaterializedPostgreSQLSettings.h" #include #include @@ -24,9 +24,9 @@ namespace DB { -/** Case of single MaterializePostgreSQL table engine. +/** Case of single MaterializedPostgreSQL table engine. * - * A user creates a table with engine MaterializePostgreSQL. Order by expression must be specified (needed for + * A user creates a table with engine MaterializedPostgreSQL. Order by expression must be specified (needed for * nested ReplacingMergeTree table). This storage owns its own replication handler, which loads table data * from PostgreSQL into nested ReplacingMergeTree table. If table is not created, but attached, replication handler * will not start loading-from-snapshot procedure, instead it will continue from last committed lsn. @@ -37,47 +37,47 @@ namespace DB **/ -/** Case of MaterializePostgreSQL database engine. +/** Case of MaterializedPostgreSQL database engine. * - * MaterializePostgreSQL table exists only in memory and acts as a wrapper for nested table, i.e. only provides an + * MaterializedPostgreSQL table exists only in memory and acts as a wrapper for nested table, i.e. only provides an * interface to work with nested table. Both tables share the same StorageID. * * Main table is never created or dropped via database method. The only way database engine interacts with - * MaterializePostgreSQL table - in tryGetTable() method, a MaterializePostgreSQL table is returned in order to wrap + * MaterializedPostgreSQL table - in tryGetTable() method, a MaterializedPostgreSQL table is returned in order to wrap * and redirect read requests. Set of such wrapper-tables is cached inside database engine. All other methods in * regard to materializePostgreSQL table are handled by replication handler. * * All database methods, apart from tryGetTable(), are devoted only to nested table. - * NOTE: It makes sense to allow rename method for MaterializePostgreSQL table via database method. + * NOTE: It makes sense to allow rename method for MaterializedPostgreSQL table via database method. * TODO: Make sure replication-to-table data channel is done only by relation_id. * * Also main table has the same InMemoryMetadata as its nested table, so if metadata of nested table changes - main table also has - * to update its metadata, because all read requests are passed to MaterializePostgreSQL table and then it redirects read + * to update its metadata, because all read requests are passed to MaterializedPostgreSQL table and then it redirects read * into nested table. * - * When there is a need to update table structure, there will be created a new MaterializePostgreSQL table with its own nested table, + * When there is a need to update table structure, there will be created a new MaterializedPostgreSQL table with its own nested table, * it will have updated table schema and all data will be loaded from scratch in the background, while previous table with outadted table * structure will still serve read requests. When data is loaded, nested tables will be swapped, metadata of metarialzied table will be * updated according to nested table. * **/ -class StorageMaterializePostgreSQL final : public shared_ptr_helper, public IStorage, WithContext +class StorageMaterializedPostgreSQL final : public shared_ptr_helper, public IStorage, WithContext { - friend struct shared_ptr_helper; + friend struct shared_ptr_helper; public: - StorageMaterializePostgreSQL(const StorageID & table_id_, ContextPtr context_); + StorageMaterializedPostgreSQL(const StorageID & table_id_, ContextPtr context_); - StorageMaterializePostgreSQL(StoragePtr nested_storage_, ContextPtr context_); + StorageMaterializedPostgreSQL(StoragePtr nested_storage_, ContextPtr context_); - String getName() const override { return "MaterializePostgreSQL"; } + String getName() const override { return "MaterializedPostgreSQL"; } void startup() override; void shutdown() override; - /// Used only for single MaterializePostgreSQL storage. + /// Used only for single MaterializedPostgreSQL storage. void dropInnerTableIfAny(bool no_delay, ContextPtr local_context) override; NamesAndTypesList getVirtuals() const override; @@ -102,7 +102,7 @@ public: StoragePtr tryGetNested() const; - /// Create a temporary MaterializePostgreSQL table with current_table_name + TMP_SUFFIX. + /// Create a temporary MaterializedPostgreSQL table with current_table_name + TMP_SUFFIX. /// An empty wrapper is returned - it does not have inMemory metadata, just acts as an empty wrapper over /// temporary nested, which will be created shortly after. StoragePtr createTemporary() const; @@ -120,7 +120,7 @@ public: StoragePtr prepare(); protected: - StorageMaterializePostgreSQL( + StorageMaterializedPostgreSQL( const StorageID & table_id_, bool is_attach_, const String & remote_database_name, @@ -128,7 +128,7 @@ protected: const postgres::ConnectionInfo & connection_info, const StorageInMemoryMetadata & storage_metadata, ContextPtr context_, - std::unique_ptr replication_settings); + std::unique_ptr replication_settings); private: static std::shared_ptr getMaterializedColumnsDeclaration( @@ -140,37 +140,36 @@ private: String getNestedTableName() const; - /// Not nullptr only for single MaterializePostgreSQL storage, because for MaterializePostgreSQL + /// Not nullptr only for single MaterializedPostgreSQL storage, because for MaterializedPostgreSQL /// database engine there is one replication handler for all tables. std::unique_ptr replication_handler; - /// Distinguish between single MaterilizePostgreSQL table engine and MaterializePostgreSQL database engine, + /// Distinguish between single MaterilizePostgreSQL table engine and MaterializedPostgreSQL database engine, /// because table with engine MaterilizePostgreSQL acts differently in each case. - bool is_materialize_postgresql_database = false; + bool is_materialized_postgresql_database = false; /// Will be set to `true` only once - when nested table was loaded by replication thread. - /// After that, it will never be changed. Needed for MaterializePostgreSQL database engine + /// After that, it will never be changed. Needed for MaterializedPostgreSQL database engine /// because there is an invariant - table exists only if its nested table exists, but nested /// table is not loaded immediately. It is made atomic, because it is accessed only by database engine, /// and updated by replication handler (only once). std::atomic has_nested = false; - /// Nested table context is a copy of global context, but contains query context with defined - /// ReplacingMergeTree storage in factoriesLog. This is needed to let database engine know - /// whether to access nested table or a wrapper over nested (materialized table). + /// Nested table context is a copy of global context, but modified to answer isInternalQuery() == true. + /// This is needed to let database engine know whether to access nested table or a wrapper over nested (materialized table). ContextMutablePtr nested_context; /// Save nested storageID to be able to fetch it. It is set once nested is created and will be /// updated only when nested is reloaded or renamed. std::optional nested_table_id; - /// Needed only for the case of single MaterializePostgreSQL storage - in order to make + /// Needed only for the case of single MaterializedPostgreSQL storage - in order to make /// delayed storage forwarding into replication handler. String remote_table_name; - /// Needed only for the case of single MaterializePostgreSQL storage, because in case of create + /// Needed only for the case of single MaterializedPostgreSQL storage, because in case of create /// query (not attach) initial setup will be done immediately and error message is thrown at once. - /// It results in the fact: single MaterializePostgreSQL storage is created only if its nested table is created. + /// It results in the fact: single MaterializedPostgreSQL storage is created only if its nested table is created. /// In case of attach - this setup will be done in a separate thread in the background. It will also /// be checked for nested table and attempted to load it if it does not exist for some reason. bool is_attach = true; diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index b1c8f17e2b8..0b302ee437a 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -60,7 +60,7 @@ void registerStorageEmbeddedRocksDB(StorageFactory & factory); #if USE_LIBPQXX void registerStoragePostgreSQL(StorageFactory & factory); -void registerStorageMaterializePostgreSQL(StorageFactory & factory); +void registerStorageMaterializedPostgreSQL(StorageFactory & factory); #endif #if USE_MYSQL || USE_LIBPQXX @@ -122,7 +122,7 @@ void registerStorages() #if USE_LIBPQXX registerStoragePostgreSQL(factory); - registerStorageMaterializePostgreSQL(factory); + registerStorageMaterializedPostgreSQL(factory); #endif #if USE_MYSQL || USE_LIBPQXX diff --git a/tests/integration/test_postgresql_replica_database_engine/configs/users.xml b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml index 74d2737c821..1cdece49459 100644 --- a/tests/integration/test_postgresql_replica_database_engine/configs/users.xml +++ b/tests/integration/test_postgresql_replica_database_engine/configs/users.xml @@ -2,7 +2,7 @@ - 1 + 1 diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index d1e590704fd..f0ed06c7ad3 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -62,7 +62,7 @@ def create_materialized_db(ip, port, materialized_database='test_database', postgres_database='postgres_database', settings=[]): - create_query = "CREATE DATABASE {} ENGINE = MaterializePostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, ip, port, postgres_database) + create_query = "CREATE DATABASE {} ENGINE = MaterializedPostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, ip, port, postgres_database) if len(settings) > 0: create_query += " SETTINGS " for i in range(len(settings)): @@ -115,16 +115,16 @@ def assert_nested_table_is_created(table_name, materialized_database='test_datab @pytest.mark.timeout(30) def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database'): - assert_nested_table_is_created(table_name, materialized_database) + assert_nested_table_is_created(table_name, materialized_database) - expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by)) + expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by)) + result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) + + while result != expected: + time.sleep(0.5) result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) - while result != expected: - time.sleep(0.5) - result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) - - assert(result == expected) + assert(result == expected) @pytest.fixture(scope="module") @@ -328,7 +328,7 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialize_postgresql_tables_list = '{}'".format(publication_tables)]) + settings=["materialized_postgresql_tables_list = '{}'".format(publication_tables)]) assert 'test_database' in instance.query('SHOW DATABASES') time.sleep(1) @@ -391,7 +391,7 @@ def test_clickhouse_restart(started_cluster): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) - instance.query("CREATE DATABASE test_database ENGINE = MaterializePostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") + instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) @@ -449,7 +449,7 @@ def test_table_schema_changes(started_cluster): create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialize_postgresql_allow_automatic_update = 1"]) + settings=["materialized_postgresql_allow_automatic_update = 1"]) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) @@ -608,7 +608,7 @@ def test_virtual_columns(started_cluster): create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialize_postgresql_allow_automatic_update = 1"]) + settings=["materialized_postgresql_allow_automatic_update = 1"]) assert_nested_table_is_created('postgresql_replica_0') instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number from numbers(10)") check_tables_are_synchronized('postgresql_replica_0'); diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index e448cfc8e99..d5bb4a85733 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -40,7 +40,7 @@ def create_clickhouse_postgres_db(ip, port, name='postgres_database'): def create_materialized_table(ip, port): instance.query(''' CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) - ENGINE = MaterializePostgreSQL( + ENGINE = MaterializedPostgreSQL( '{}:{}', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') PRIMARY KEY key; '''.format(ip, port)) From c115c25e9ce4f2c501e348728b5b25773c8cfad9 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 27 Jun 2021 22:05:20 +0300 Subject: [PATCH 475/931] Some change --- .../integration/test_storage_rabbitmq/test.py | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 1f14886e50f..b5b60b2c8cc 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -808,22 +808,15 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): rabbitmq_routing_key_list = 'insert2', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; - CREATE TABLE test.view_many (key UInt64, value UInt64) - ENGINE = MergeTree - ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; - CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS - SELECT * FROM test.rabbitmq_consume; ''') - messages_num = 1000 + messages_num = 10000 + values = [] + for i in range(messages_num): + values.append("({i}, {i})".format(i=i)) + values = ','.join(values) def insert(): - values = [] - for i in range(messages_num): - values.append("({i}, {i})".format(i=i)) - values = ','.join(values) - while True: try: instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values)) @@ -835,18 +828,29 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): raise threads = [] - threads_num = 20 + threads_num = 10 for _ in range(threads_num): threads.append(threading.Thread(target=insert)) for thread in threads: time.sleep(random.uniform(0, 1)) thread.start() + instance.query(''' + CREATE TABLE test.view_many (key UInt64, value UInt64) + ENGINE = MergeTree + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS + SELECT * FROM test.rabbitmq_consume; + ''') + + for thread in threads: + thread.join() + while True: result = instance.query('SELECT count() FROM test.view_many') - time.sleep(1) if int(result) == messages_num * threads_num: break + time.sleep(1) instance.query(''' DROP TABLE test.rabbitmq_consume; @@ -855,9 +859,6 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): DROP TABLE test.view_many; ''') - for thread in threads: - thread.join() - assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) From 0772e6f3ee996ea883fcd8c4da3b30cdef507128 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 27 Jun 2021 23:20:44 +0300 Subject: [PATCH 476/931] Update postgresql.md --- docs/ru/sql-reference/table-functions/postgresql.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 7811033b9c9..50f651527c5 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -47,13 +47,13 @@ PostgreSQL массивы конвертируются в массивы ClickHo Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: ```sql -SELECT DISTINCT(name) FROM postgresql(`postgres{1|2|3}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +SELECT name FROM postgresql(`postgres{1|2|3}:5432`, 'postgres_database', 'postgres_table', 'user', 'password'); ``` или ```sql -SELECT DISTINCT(name) FROM postgresql(`postgres2:5431|postgres3:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database', 'postgres_table', 'user', 'password'); ``` При использовании словаря PostgreSQL поддерживается приоритет реплик. Чем больше номер реплики, тем ниже ее приоритет. Наивысший приоритет у реплики с номером `0`. From fefdd52b6d5923686138bab7ed095f498746a557 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 27 Jun 2021 23:22:35 +0300 Subject: [PATCH 477/931] Update mysql.md --- docs/ru/sql-reference/table-functions/mysql.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index 4c5a913971d..e21d1a7fa06 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -41,13 +41,13 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_ Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например: ```sql -SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` или ```sql -SELECT DISTINCT(name) FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` **Возвращаемое значение** From 35cd86c350bc74f15fa619cb5987b5673a103a76 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 27 Jun 2021 23:25:36 +0300 Subject: [PATCH 478/931] Update postgresql.md --- docs/en/sql-reference/table-functions/postgresql.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 7ffe97d98af..7ef664de269 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -47,13 +47,13 @@ PostgreSQL Array types converts into ClickHouse arrays. Supports multiple replicas that must be listed by `|`. For example: ```sql -SELECT DISTINCT(name) FROM postgresql(`postgres{1|2|3}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +SELECT name FROM postgresql(`postgres{1|2|3}:5432`, 'postgres_database', 'postgres_table', 'user', 'password'); ``` or ```sql -SELECT DISTINCT(name) FROM postgresql(`postgres2:5431|postgres3:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword'); +SELECT name FROM postgresql(`postgres1:5431|postgres2:5432`, 'postgres_database', 'postgres_table', 'user', 'password'); ``` Supports replicas priority for PostgreSQL dictionary source. The bigger the number in map, the less the priority. The highest priority is `0`. From 140d21fae8c211651cf8e5c8422ac285812c8c27 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 27 Jun 2021 23:26:10 +0300 Subject: [PATCH 479/931] Update mysql.md --- docs/en/sql-reference/table-functions/mysql.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index 22e87e78cb1..a174786d4b7 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -42,13 +42,13 @@ The rest of the conditions and the `LIMIT` sampling constraint are executed in C Supports multiple replicas that must be listed by `|`. For example: ```sql -SELECT DISTINCT(name) FROM mysql(`mysql{1|2|3}:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +SELECT name FROM mysql(`mysql{1|2|3}:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` or ```sql -SELECT DISTINCT(name) FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'clickhouse', 'test_replicas', 'root', 'clickhouse'); +SELECT name FROM mysql(`mysql1:3306|mysql2:3306|mysql3:3306`, 'mysql_database', 'mysql_table', 'user', 'password'); ``` **Returned Value** From 375965fb7a0a972ee623cd25c26f64743ada32e5 Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sun, 27 Jun 2021 23:45:05 +0300 Subject: [PATCH 480/931] Update ExternalDistributed.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Убрал поддержку запросов на вставку. --- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- .../engines/table-engines/integrations/ExternalDistributed.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index 39fea30f98f..819abdbf9d7 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -5,7 +5,7 @@ toc_title: ExternalDistributed # ExternalDistributed {#externaldistributed} -The `ExternalDistributed` engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible. +The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible. ## Creating a Table {#creating-a-table} diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 4c81f242f65..5b4386ff8b9 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -5,7 +5,7 @@ toc_title: ExternalDistributed # ExternalDistributed {#externaldistributed} -Движок `ExternalDistributed` позволяет выполнять запросы `SELECT` и `INSERT` для таблиц на удаленном сервере MySQL или PostgreSQL. Принимает в качестве аргумента табличные движки [MySQL](../../../engines/table-engines/integrations/mysql.md) или [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md), поэтому возможно шардирование. +Движок `ExternalDistributed` позволяет выполнять запросы `SELECT` для таблиц на удаленном сервере MySQL или PostgreSQL. Принимает в качестве аргумента табличные движки [MySQL](../../../engines/table-engines/integrations/mysql.md) или [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md), поэтому возможно шардирование. ## Создание таблицы {#creating-a-table} From f7663fe6ebbd966a47102a3307aa360794076a11 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 28 Jun 2021 00:05:57 +0300 Subject: [PATCH 481/931] Update run.sh --- docker/test/stress/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index a2bcc7cde09..a7c8c9f50f2 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -153,7 +153,7 @@ zgrep -Fa "########################################" /test_output/* > /dev/null # Put logs into /test_output/ for log_file in /var/log/clickhouse-server/clickhouse-server.log* do - pigz < $log_file > /test_output/$(basename $log_file).gz + pigz < "${log_file}" > /test_output/$(basename "${log_file}").gz done tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: From b3c1f70fc028eddd7b3f13710c9c255fcc815ae7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 28 Jun 2021 00:30:26 +0300 Subject: [PATCH 482/931] Add a test for #13993 --- .../01925_merge_prewhere_table.reference | 2 ++ .../0_stateless/01925_merge_prewhere_table.sql | 13 +++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 tests/queries/0_stateless/01925_merge_prewhere_table.reference create mode 100644 tests/queries/0_stateless/01925_merge_prewhere_table.sql diff --git a/tests/queries/0_stateless/01925_merge_prewhere_table.reference b/tests/queries/0_stateless/01925_merge_prewhere_table.reference new file mode 100644 index 00000000000..368be4d48e2 --- /dev/null +++ b/tests/queries/0_stateless/01925_merge_prewhere_table.reference @@ -0,0 +1,2 @@ +x_1 10 +x_2 10 diff --git a/tests/queries/0_stateless/01925_merge_prewhere_table.sql b/tests/queries/0_stateless/01925_merge_prewhere_table.sql new file mode 100644 index 00000000000..cdac515c212 --- /dev/null +++ b/tests/queries/0_stateless/01925_merge_prewhere_table.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS x_1; +DROP TABLE IF EXISTS x_2; +DROP TABLE IF EXISTS x; + +create table x_1 engine=Log as select * from numbers(10); +create table x_2 engine=Log as select * from numbers(10); +create table x engine=Merge(default, '^x_(1|2)$') as x_1; + +select _table, count() from x group by _table order by _table; + +DROP TABLE x_1; +DROP TABLE x_2; +DROP TABLE x; From a2c23c91a1bcf30e6bf3c6451bed0040a67456e5 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 28 Jun 2021 00:31:18 +0300 Subject: [PATCH 483/931] Update 01925_merge_prewhere_table.sql --- tests/queries/0_stateless/01925_merge_prewhere_table.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01925_merge_prewhere_table.sql b/tests/queries/0_stateless/01925_merge_prewhere_table.sql index cdac515c212..4862a7bb426 100644 --- a/tests/queries/0_stateless/01925_merge_prewhere_table.sql +++ b/tests/queries/0_stateless/01925_merge_prewhere_table.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS x; create table x_1 engine=Log as select * from numbers(10); create table x_2 engine=Log as select * from numbers(10); -create table x engine=Merge(default, '^x_(1|2)$') as x_1; +create table x engine=Merge(currentDatabase(), '^x_(1|2)$') as x_1; select _table, count() from x group by _table order by _table; From 88a44a6adfd85a751c374afdf00756122abbdc36 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 28 Jun 2021 00:47:31 +0300 Subject: [PATCH 484/931] Auto version update to [21.7.1.7283] [54452] --- cmake/autogenerated_versions.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 34de50e9f8a..c2dfae2964b 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -3,7 +3,7 @@ SET(VERSION_REVISION 54452) SET(VERSION_MAJOR 21) SET(VERSION_MINOR 7) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 976ccc2e908ac3bc28f763bfea8134ea0a121b40) -SET(VERSION_DESCRIBE v21.7.1.1-prestable) -SET(VERSION_STRING 21.7.1.1) +SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d) +SET(VERSION_DESCRIBE v21.7.1.7283-prestable) +SET(VERSION_STRING 21.7.1.7283) # end of autochange From 648801760efa1ec048dcd743bb818a94ab5c5bc5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 28 Jun 2021 00:50:20 +0300 Subject: [PATCH 485/931] Auto version update to [21.8.1.1] [54453] --- cmake/autogenerated_versions.txt | 8 ++--- debian/changelog | 4 +-- docker/client/Dockerfile | 2 +- docker/server/Dockerfile | 2 +- docker/test/Dockerfile | 2 +- .../StorageSystemContributors.generated.cpp | 30 +++++++++++++++++++ 6 files changed, 39 insertions(+), 9 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index c2dfae2964b..6214a229da6 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -1,9 +1,9 @@ # This strings autochanged from release_lib.sh: -SET(VERSION_REVISION 54452) +SET(VERSION_REVISION 54453) SET(VERSION_MAJOR 21) -SET(VERSION_MINOR 7) +SET(VERSION_MINOR 8) SET(VERSION_PATCH 1) SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d) -SET(VERSION_DESCRIBE v21.7.1.7283-prestable) -SET(VERSION_STRING 21.7.1.7283) +SET(VERSION_DESCRIBE v21.8.1.1-prestable) +SET(VERSION_STRING 21.8.1.1) # end of autochange diff --git a/debian/changelog b/debian/changelog index e1c46dae3a8..36c29fce1d0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (21.7.1.1) unstable; urgency=low +clickhouse (21.8.1.1) unstable; urgency=low * Modified source code - -- clickhouse-release Thu, 20 May 2021 22:23:29 +0300 + -- clickhouse-release Mon, 28 Jun 2021 00:50:15 +0300 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 79ac92f2277..19cadccb926 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.7.1.* +ARG version=21.8.1.* RUN apt-get update \ && apt-get install --yes --no-install-recommends \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index 52dcb6caae5..65d90bf52ce 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.7.1.* +ARG version=21.8.1.* ARG gosu_ver=1.10 # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index 9809a36395d..687393025f0 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.7.1.* +ARG version=21.8.1.* RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index a5524aee6ae..f45acb0efd9 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -14,6 +14,7 @@ const char * auto_contributors[] { "achulkov2", "adevyatova", "ageraab", + "Ahmed Dardery", "akazz", "Akazz", "akonyaev", @@ -81,8 +82,10 @@ const char * auto_contributors[] { "Aliaksandr Pliutau", "Aliaksandr Shylau", "Ali Demirci", + "Alina Terekhova", "amesaru", "Amesaru", + "Amir Vaza", "Amos Bird", "amoschen", "amudong", @@ -153,6 +156,7 @@ const char * auto_contributors[] { "Artur Beglaryan", "AsiaKorushkina", "asiana21", + "atereh", "Atri Sharma", "avasiliev", "avogar", @@ -182,6 +186,7 @@ const char * auto_contributors[] { "Bogdan Voronin", "BohuTANG", "Bolinov", + "BoloniniD", "booknouse", "Boris Granveaud", "Bowen Masco", @@ -208,9 +213,11 @@ const char * auto_contributors[] { "Chienlung Cheung", "chou.fan", "Christian", + "christophe.kalenzaga", "Ciprian Hacman", "Clement Rodriguez", "Clément Rodriguez", + "cn-ds", "Colum", "comunodi", "Constantin S. Pan", @@ -264,9 +271,11 @@ const char * auto_contributors[] { "Dmitry Luhtionov", "Dmitry Moskowski", "Dmitry Muzyka", + "Dmitry Novik", "Dmitry Petukhov", "Dmitry Rubashkin", "Dmitry S..ky / skype: dvska-at-skype", + "Dmitry Ukolov", "Doge", "Dongdong Yang", "DoomzD", @@ -322,6 +331,7 @@ const char * auto_contributors[] { "fenglv", "fessmage", "FgoDt", + "fibersel", "filimonov", "filipe", "Filipe Caixeta", @@ -362,6 +372,7 @@ const char * auto_contributors[] { "Grigory Pervakov", "Guillaume Tassery", "guoleiyi", + "Guo Wei (William)", "gyuton", "Haavard Kvaalen", "Habibullah Oladepo", @@ -465,8 +476,10 @@ const char * auto_contributors[] { "Konstantin Lebedev", "Konstantin Malanchev", "Konstantin Podshumok", + "Korenevskiy Denis", "Korviakov Andrey", "koshachy", + "Kostiantyn Storozhuk", "Kozlov Ivan", "kreuzerkrieg", "Kruglov Pavel", @@ -537,6 +550,7 @@ const char * auto_contributors[] { "Matwey V. Kornilov", "Max", "Max Akhmedov", + "Max Bruce", "maxim", "Maxim Akhmedov", "MaximAL", @@ -560,6 +574,7 @@ const char * auto_contributors[] { "melin", "memo", "meo", + "meoww-bot", "mergify[bot]", "Metehan Çetinkaya", "Metikov Vadim", @@ -581,6 +596,7 @@ const char * auto_contributors[] { "Mike Kot", "mikepop7", "Mikhail", + "Mikhail Andreev", "Mikhail Cheshkov", "Mikhail Fandyushin", "Mikhail Filimonov", @@ -601,6 +617,7 @@ const char * auto_contributors[] { "MovElb", "Mr.General", "Murat Kabilov", + "muzzlerator", "m-ves", "mwish", "MyroTk", @@ -614,6 +631,7 @@ const char * auto_contributors[] { "never lee", "NeZeD [Mac Pro]", "nicelulu", + "Nickita", "Nickolay Yastrebov", "Nicolae Vartolomei", "Nico Mandery", @@ -635,6 +653,7 @@ const char * auto_contributors[] { "Nikolay Degterinsky", "Nikolay Kirsh", "Nikolay Semyachkin", + "Nikolay Shcheglov", "Nikolay Vasiliev", "Nikolay Volosatov", "Niu Zhaojie", @@ -647,6 +666,7 @@ const char * auto_contributors[] { "Odin Hultgren Van Der Horst", "ogorbacheva", "Okada Haruki", + "Oleg Ershov", "Oleg Favstov", "Oleg Komarov", "olegkv", @@ -685,6 +705,7 @@ const char * auto_contributors[] { "potya", "Potya", "Pradeep Chhetri", + "presto53", "proller", "pufit", "pyos", @@ -699,6 +720,8 @@ const char * auto_contributors[] { "Ramazan Polat", "Raúl Marín", "Ravengg", + "redclusive", + "RedClusive", "RegulusZ", "Reilee", "Reto Kromer", @@ -709,6 +732,7 @@ const char * auto_contributors[] { "robot-clickhouse", "robot-metrika-test", "rodrigargar", + "Romain Neutron", "roman", "Roman Bug", "Roman Lipovsky", @@ -776,6 +800,7 @@ const char * auto_contributors[] { "spongedc", "spyros87", "Stanislav Pavlovichev", + "Stas Kelvich", "Stas Pavlovichev", "stavrolia", "Stefan Thies", @@ -784,6 +809,7 @@ const char * auto_contributors[] { "stepenhu", "Steve-金勇", "Stig Bakken", + "Storozhuk Kostiantyn", "Stupnikov Andrey", "su-houzhen", "sundy", @@ -806,6 +832,7 @@ const char * auto_contributors[] { "Tema Novikov", "templarzq", "The-Alchemist", + "Tiaonmmn", "tiger.yan", "tison", "TiunovNN", @@ -891,6 +918,7 @@ const char * auto_contributors[] { "Xiang Zhou", "xPoSx", "Yağızcan Değirmenci", + "yang", "Yangkuan Liu", "yangshuai", "Yatsishin Ilya", @@ -906,6 +934,7 @@ const char * auto_contributors[] { "Y Lu", "Yohann Jardin", "yonesko", + "yuchuansun", "yuefoo", "yulu86", "yuluxu", @@ -935,6 +964,7 @@ const char * auto_contributors[] { "zvvr", "zzsmdfj", "Артем Стрельцов", + "Владислав Тихонов", "Георгий Кондратьев", "Дмитрий Канатников", "Иванов Евгений", From 31ecef90c38bcaf69c187ccce8e9dcffb6a5d95f Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 28 Jun 2021 01:41:55 +0300 Subject: [PATCH 486/931] Fixed tests --- .../01914_exchange_dictionaries.reference | 2 +- .../01914_exchange_dictionaries.sql | 48 +++++++++---------- tests/queries/skip_list.json | 5 +- 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.reference b/tests/queries/0_stateless/01914_exchange_dictionaries.reference index d176a0d7396..9278d0abeed 100644 --- a/tests/queries/0_stateless/01914_exchange_dictionaries.reference +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.reference @@ -1,4 +1,4 @@ 1 Table1 2 Table2 -1 Table1 2 Table2 +1 Table1 diff --git a/tests/queries/0_stateless/01914_exchange_dictionaries.sql b/tests/queries/0_stateless/01914_exchange_dictionaries.sql index 840fb43b8b2..77bcf53ab5e 100644 --- a/tests/queries/0_stateless/01914_exchange_dictionaries.sql +++ b/tests/queries/0_stateless/01914_exchange_dictionaries.sql @@ -1,39 +1,39 @@ -DROP DATABASE IF EXISTS 01915_db; -CREATE DATABASE 01915_db ENGINE=Atomic; +DROP DATABASE IF EXISTS 01914_db; +CREATE DATABASE 01914_db ENGINE=Atomic; -DROP TABLE IF EXISTS 01915_db.table_1; -CREATE TABLE 01915_db.table_1 (id UInt64, value String) ENGINE=TinyLog; +DROP TABLE IF EXISTS 01914_db.table_1; +CREATE TABLE 01914_db.table_1 (id UInt64, value String) ENGINE=TinyLog; -DROP TABLE IF EXISTS 01915_db.table_2; -CREATE TABLE 01915_db.table_2 (id UInt64, value String) ENGINE=TinyLog; +DROP TABLE IF EXISTS 01914_db.table_2; +CREATE TABLE 01914_db.table_2 (id UInt64, value String) ENGINE=TinyLog; -INSERT INTO 01915_db.table_1 VALUES (1, 'Table1'); -INSERT INTO 01915_db.table_2 VALUES (2, 'Table2'); +INSERT INTO 01914_db.table_1 VALUES (1, 'Table1'); +INSERT INTO 01914_db.table_2 VALUES (2, 'Table2'); -DROP DICTIONARY IF EXISTS 01915_db.dictionary_1; -CREATE DICTIONARY 01915_db.dictionary_1 (id UInt64, value String) +DROP DICTIONARY IF EXISTS 01914_db.dictionary_1; +CREATE DICTIONARY 01914_db.dictionary_1 (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) -SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'table_1')); +SOURCE(CLICKHOUSE(DB '01914_db' TABLE 'table_1')); -DROP DICTIONARY IF EXISTS 01915_db.dictionary_2; -CREATE DICTIONARY 01915_db.dictionary_2 (id UInt64, value String) +DROP DICTIONARY IF EXISTS 01914_db.dictionary_2; +CREATE DICTIONARY 01914_db.dictionary_2 (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) -SOURCE(CLICKHOUSE(DB '01915_db' TABLE 'table_2')); +SOURCE(CLICKHOUSE(DB '01914_db' TABLE 'table_2')); -SELECT * FROM 01915_db.dictionary_1; -SELECT * FROM 01915_db.dictionary_2; +SELECT * FROM 01914_db.dictionary_1; +SELECT * FROM 01914_db.dictionary_2; -EXCHANGE DICTIONARIES 01915_db.dictionary_1 AND 01915_db.dictionary_2; +EXCHANGE DICTIONARIES 01914_db.dictionary_1 AND 01914_db.dictionary_2; -SELECT * FROM 01915_db.dictionary_1; -SELECT * FROM 01915_db.dictionary_2; +SELECT * FROM 01914_db.dictionary_1; +SELECT * FROM 01914_db.dictionary_2; -DROP DICTIONARY 01915_db.dictionary_1; -DROP DICTIONARY 01915_db.dictionary_2; +DROP DICTIONARY 01914_db.dictionary_1; +DROP DICTIONARY 01914_db.dictionary_2; -DROP TABLE 01915_db.table_1; -DROP TABLE 01915_db.table_2; +DROP TABLE 01914_db.table_1; +DROP TABLE 01914_db.table_2; -DROP DATABASE 01915_db; +DROP DATABASE 01914_db; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index f010efcf916..ed952d0cf63 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -848,6 +848,9 @@ "01870_buffer_flush", // creates database "01889_postgresql_protocol_null_fields", "01889_check_row_policy_defined_using_user_function", - "01921_concurrent_ttl_and_normal_merges_zookeeper_long" // heavy test, better to run sequentially + "01921_concurrent_ttl_and_normal_merges_zookeeper_long", // heavy test, better to run sequentially + "01913_replace_dictionary", + "01914_exchange_dictionaries", + "01915_create_or_replace_dictionary" ] } From f00a4668a4e7f399655b91a08f8a6c2aa393b2be Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 28 Jun 2021 10:03:38 +0300 Subject: [PATCH 487/931] Set follow-fork-mode child for gdb in stress/fasttest/fuzzer Sometimes gdb does not catch SIGSEGV [1], let's try set this setting, since maybe some code from contrib does fork. [1]: https://clickhouse-test-reports.s3.yandex.net/25605/cd5a3c8d7eb417f6df211b4507dc970933f8549a/stress_test_(thread).html#fail1 --- docker/test/fasttest/run.sh | 1 + docker/test/fuzzer/run-fuzzer.sh | 1 + docker/test/stress/run.sh | 1 + 3 files changed, 3 insertions(+) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index cc6aeff357f..9fd2212e2dc 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -113,6 +113,7 @@ function start_server echo "ClickHouse server pid '$server_pid' started and responded" echo " +set follow-fork-mode child handle all noprint handle SIGSEGV stop print handle SIGBUS stop print diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 45709e5c501..3ca67a58278 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -103,6 +103,7 @@ function fuzz kill -0 $server_pid echo " +set follow-fork-mode child handle all noprint handle SIGSEGV stop print handle SIGBUS stop print diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 8016b2c59f3..5ee45427225 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -55,6 +55,7 @@ function start() done echo " +set follow-fork-mode child handle all noprint handle SIGSEGV stop print handle SIGBUS stop print From b6b79244136b8f3a9a3372bd67d2168616c67a60 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 28 Jun 2021 10:20:33 +0300 Subject: [PATCH 488/931] Update DatabaseAtomic.cpp --- src/Databases/DatabaseAtomic.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index d7d423cfea6..48777d92a05 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -112,7 +112,8 @@ void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_na auto * storage = tryGetTable(table_name, local_context).get(); /// Remove the inner table (if any) to avoid deadlock /// (due to attempt to execute DROP from the worker thread) - storage->dropInnerTableIfAny(no_delay, local_context); + if (storage) + storage->dropInnerTableIfAny(no_delay, local_context); String table_metadata_path = getObjectMetadataPath(table_name); String table_metadata_path_drop; From ca4783d8546981d6f1bd44fc7c10353f8b5e9df2 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Mon, 28 Jun 2021 15:22:13 +0800 Subject: [PATCH 489/931] Fixed typo and casting --- src/Formats/MySQLBlockInputStream.cpp | 4 ++-- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index 3f51cb8d311..ea662076d4e 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -159,11 +159,11 @@ namespace read_bytes_size += 8; break; case ValueType::vtEnum8: - assert_cast(column).insertValue(static_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtEnum16: - assert_cast(column).insertValue(static_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtString: diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 7988a9ab48b..a7f5e84fad1 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -109,7 +109,7 @@ static NamesAndTypesList getColumnsList(const ASTExpressionList * columns_defini /// Transforms MySQL ENUM's list of strings to ClickHouse string-integer pairs /// For example ENUM('a', 'b', 'c') -> ENUM('a'=1, 'b'=2, 'c'=3) /// Elements on a position further than 32767 are assigned negative values, starting with -32768. - /// Note: Enum would be transformed to Enum8 if number of ellements is less then 128, otherwise it would be transformed to Enum16. + /// Note: Enum would be transformed to Enum8 if number of elements is less then 128, otherwise it would be transformed to Enum16. if (type_name_upper.find("ENUM") != String::npos) { UInt16 i = 0; From 170301c53102d114cd2c26b083040e86ecf339a8 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 28 Jun 2021 10:31:34 +0300 Subject: [PATCH 490/931] Ignore TOO_DEEP_RECURSION server exception during fuzzing --- programs/client/Client.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 12d94943dea..c4aef014971 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1380,9 +1380,19 @@ private: have_error = true; } + const auto * exception = server_exception ? server_exception.get() : client_exception.get(); + // Sometimes you may get TOO_DEEP_RECURSION from the server, + // and TOO_DEEP_RECURSION should not fail the fuzzer check. + if (have_error && exception->code() == ErrorCodes::TOO_DEEP_RECURSION) + { + have_error = false; + server_exception.reset(); + client_exception.reset(); + return true; + } + if (have_error) { - const auto * exception = server_exception ? server_exception.get() : client_exception.get(); fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message()); // Try to reconnect after errors, for two reasons: From 11900c323801aacd623b6ccfa4f1ea36539ba4dd Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 25 Jun 2021 22:38:21 +0300 Subject: [PATCH 491/931] Fix 01641_memory_tracking_insert_optimize - Reverts #23617 - Apply changes from #23617 - Change repeat(10) to repeat(40) (memory tracking is different w/o jemalloc (w/o nallocx()) , i.e. sanitizers, so increase memory usage a little to make it fail under sanitizers) - Unmark 01641_memory_tracking_insert_optimize_long as long - Update skip list for 01641_memory_tracking_insert_optimize --- .../01641_memory_tracking_insert_optimize.reference | 0 .../01641_memory_tracking_insert_optimize.sql | 12 ++++++++++++ tests/queries/skip_list.json | 4 +--- 3 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01641_memory_tracking_insert_optimize.reference create mode 100644 tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql diff --git a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.reference b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql new file mode 100644 index 00000000000..8bbc9ec0a14 --- /dev/null +++ b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql @@ -0,0 +1,12 @@ +drop table if exists data_01641; + +create table data_01641 (key Int, value String) engine=MergeTree order by (key, repeat(value, 40)) settings old_parts_lifetime=0, min_bytes_for_wide_part=0; + +SET max_block_size = 1000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +insert into data_01641 select number, toString(number) from numbers(120000); + +-- Definitely should fail and it proves that memory is tracked in OPTIMIZE query. +set max_memory_usage='10Mi', max_untracked_memory=0; +optimize table data_01641 final; -- { serverError 241 } + +drop table data_01641; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index f010efcf916..8b680b56247 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -18,7 +18,6 @@ "functions_bad_arguments", /// Too long for TSan "01603_read_with_backoff_bug", /// Too long for TSan "01646_system_restart_replicas_smoke", /// RESTART REPLICAS can acquire too much locks, while only 64 is possible from one thread under TSan - "01641_memory_tracking_insert_optimize", /// INSERT lots of rows is too heavy for TSan "01017_uniqCombined_memory_usage" /// Fine thresholds on memory usage ], "address-sanitizer": [ @@ -71,8 +70,7 @@ "hyperscan", "01193_metadata_loading", "01473_event_time_microseconds", - "01396_inactive_replica_cleanup_nodes", - "01641_memory_tracking_insert_optimize" /// INSERT lots of rows is too heavy in debug build + "01396_inactive_replica_cleanup_nodes" ], "unbundled-build": [ "00429", From 1b56b0a02058054f8193002307af5835ed95320a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 28 Jun 2021 11:10:38 +0300 Subject: [PATCH 492/931] Fix flaky test --- src/DataStreams/TTLAggregationAlgorithm.cpp | 7 ++++++- .../0_stateless/01280_ttl_where_group_by.reference | 8 ++++---- tests/queries/0_stateless/01280_ttl_where_group_by.sh | 4 ++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index 6d5c234a074..12d28ff4aea 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -137,8 +137,13 @@ void TTLAggregationAlgorithm::execute(Block & block) if (some_rows_were_aggregated) { auto ttl_column_after_aggregation = executeExpressionAndGetColumn(description.expression, block, description.result_column); + auto where_column_after_aggregation = executeExpressionAndGetColumn(description.where_expression, block, description.where_result_column); for (size_t i = 0; i < block.rows(); ++i) - new_ttl_info.update(getTimestampByIndex(ttl_column_after_aggregation.get(), i)); + { + bool where_filter_passed = !where_column_after_aggregation || where_column_after_aggregation->getBool(i); + if (where_filter_passed) + new_ttl_info.update(getTimestampByIndex(ttl_column_after_aggregation.get(), i)); + } } } diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.reference b/tests/queries/0_stateless/01280_ttl_where_group_by.reference index 7fe00709dee..65e7e5b158f 100644 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.reference +++ b/tests/queries/0_stateless/01280_ttl_where_group_by.reference @@ -16,11 +16,11 @@ ttl_01280_3 2 1 0 3 3 1 8 2 ttl_01280_4 -1 1 0 4 -10 2 13 9 +0 4 +13 9 ttl_01280_5 1 2 7 5 2 3 6 5 ttl_01280_6 -1 5 3 5 -2 10 3 5 +1 3 5 +2 3 5 diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sh b/tests/queries/0_stateless/01280_ttl_where_group_by.sh index 9f30c7c5872..c9936ce7afd 100755 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.sh +++ b/tests/queries/0_stateless/01280_ttl_where_group_by.sh @@ -80,7 +80,7 @@ insert into ttl_01280_4 values (1, 5, 4, 9, now())" sleep 2 optimize "ttl_01280_4" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_4 ORDER BY a, b, x, y" +$CLICKHOUSE_CLIENT --query "select x, y from ttl_01280_4 ORDER BY a, b, x, y" $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5" @@ -107,7 +107,7 @@ insert into ttl_01280_6 values (1, 5, 3, 5, now())" sleep 2 optimize "ttl_01280_6" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_6 ORDER BY a, b, x, y" +$CLICKHOUSE_CLIENT --query "select a, x, y from ttl_01280_6 ORDER BY a, b, x, y" $CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_1" $CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_2" From 7e73762b48dc3085b834d24ad680e17229fac7d5 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 28 Jun 2021 11:28:45 +0300 Subject: [PATCH 493/931] Fix flaky test and wrong message --- src/Storages/StorageReplicatedMergeTree.cpp | 12 ++++++++---- .../test_version_update_after_mutation/test.py | 4 +++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e91f3d9554e..1b57ac6d995 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -558,6 +558,14 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( throw Exception(ErrorCodes::UNFINISHED, "Mutation {} was killed, manually removed or table was dropped", mutation_id); } + if (partial_shutdown_called) + throw Exception("Mutation is not finished because table shutdown was called. It will be done after table restart.", + ErrorCodes::UNFINISHED); + + /// Replica inactive, don't check mutation status + if (!inactive_replicas.empty() && inactive_replicas.count(replica)) + continue; + /// At least we have our current mutation std::set mutation_ids; mutation_ids.insert(mutation_id); @@ -566,10 +574,6 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( /// they will happen on each replica, so we can check only in-memory info. auto mutation_status = queue.getIncompleteMutationsStatus(mutation_id, &mutation_ids); checkMutationStatus(mutation_status, mutation_ids); - - if (partial_shutdown_called) - throw Exception("Mutation is not finished because table shutdown was called. It will be done after table restart.", - ErrorCodes::UNFINISHED); } if (!inactive_replicas.empty()) diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index dd8e1bc7a9e..a1ae17b8451 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -79,7 +79,9 @@ def test_upgrade_while_mutation(start_cluster): node3.restart_with_latest_version(signal=9) - exec_query_with_retry(node3, "ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) + exec_query_with_retry(node3, "SYSTEM RESTART REPLICA mt1") + + node3.query("ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) # will delete nothing, but previous async mutation will finish with this query assert_eq_with_retry(node3, "SELECT COUNT() from mt1", "50000\n") From 55dd0fbfc2aa0663d005f1675178c63bdff27a98 Mon Sep 17 00:00:00 2001 From: Evgenia Sudarikova <56156889+otrazhenia@users.noreply.github.com> Date: Mon, 28 Jun 2021 13:16:41 +0300 Subject: [PATCH 494/931] Update docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md Co-authored-by: tavplubix --- .../external-dictionaries/external-dicts-dict-lifetime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 9d4205ab1d1..ea1b62c6cef 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -51,7 +51,7 @@ LIFETIME(300) LIFETIME(MIN 300 MAX 360) ``` -Если `0` и `0`, ClickHouse не перегружает словарь по истечении времени. +Если `0` и `0`, ClickHouse не перезагружает словарь по истечении времени. В этом случае ClickHouse может перезагрузить данные словаря, если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. При обновлении словарей сервер ClickHouse применяет различную логику в зависимости от типа [источника](external-dicts-dict-sources.md): From ee935a3733c26c878480a2d75087d5bbc06188df Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 28 Jun 2021 14:28:49 +0300 Subject: [PATCH 495/931] Update run.sh --- docker/test/stress/run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index a7c8c9f50f2..b198a3f2397 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2094 set -x @@ -153,7 +154,7 @@ zgrep -Fa "########################################" /test_output/* > /dev/null # Put logs into /test_output/ for log_file in /var/log/clickhouse-server/clickhouse-server.log* do - pigz < "${log_file}" > /test_output/$(basename "${log_file}").gz + pigz < "${log_file}" > /test_output/"$(basename \"${log_file}\")".gz done tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: From 352e1f27ffd433bc550d1eda37e2ae49da2c3128 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 28 Jun 2021 14:42:21 +0300 Subject: [PATCH 496/931] Update using Map datatyle in system log tables before merge --- docker/test/performance-comparison/compare.sh | 18 +-- docs/zh/operations/system-tables/query_log.md | 4 +- src/Core/NamesAndAliases.h | 53 +++++++ src/Core/Settings.cpp | 1 + src/Core/Settings.h | 4 +- src/Interpreters/AsynchronousMetricLog.cpp | 19 ++- src/Interpreters/AsynchronousMetricLog.h | 7 +- src/Interpreters/CrashLog.cpp | 25 ++- src/Interpreters/CrashLog.h | 3 +- src/Interpreters/InterpreterCreateQuery.cpp | 47 +++--- src/Interpreters/InterpreterCreateQuery.h | 4 +- src/Interpreters/MetricLog.cpp | 18 +-- src/Interpreters/MetricLog.h | 3 +- src/Interpreters/OpenTelemetrySpanLog.cpp | 27 ++-- src/Interpreters/OpenTelemetrySpanLog.h | 3 +- src/Interpreters/PartLog.cpp | 43 +++--- src/Interpreters/PartLog.h | 3 +- src/Interpreters/ProfileEventsExt.cpp | 1 + src/Interpreters/QueryLog.cpp | 146 ++++++++++-------- src/Interpreters/QueryLog.h | 5 +- src/Interpreters/QueryThreadLog.cpp | 100 ++++++------ src/Interpreters/QueryThreadLog.h | 3 +- src/Interpreters/SystemLog.h | 30 ++-- src/Interpreters/TextLog.cpp | 28 ++-- src/Interpreters/TextLog.h | 3 +- src/Interpreters/TraceLog.cpp | 22 +-- src/Interpreters/TraceLog.h | 3 +- src/Storages/ColumnsDescription.cpp | 19 +++ src/Storages/ColumnsDescription.h | 5 +- src/Storages/System/IStorageSystemOneBlock.h | 7 +- .../System/StorageSystemProcesses.cpp | 8 + src/Storages/System/StorageSystemProcesses.h | 2 + .../01343_min_bytes_to_use_mmap_io.sql | 2 +- .../01344_min_bytes_to_use_mmap_io_index.sql | 4 +- ...terialized_view_with_join_on_query_log.sql | 2 - 35 files changed, 409 insertions(+), 263 deletions(-) create mode 100644 src/Core/NamesAndAliases.h diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 2ec4a2f60db..dad4362b3d1 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -380,7 +380,7 @@ do done # for each query run, prepare array of metrics from query log -clickhouse-local --allow_experimental_map_type 1 --query " +clickhouse-local --query " create view query_runs as select * from file('analyze/query-runs.tsv', TSV, 'test text, query_index int, query_id text, version UInt8, time float'); @@ -507,7 +507,7 @@ do file="analyze/tmp/${prefix// /_}.tsv" grep "^$prefix " "analyze/query-run-metrics-for-stats.tsv" > "$file" & printf "%s\0\n" \ - "clickhouse-local --allow_experimental_map_type 1 \ + "clickhouse-local \ --file \"$file\" \ --structure 'test text, query text, run int, version UInt8, metrics Array(float)' \ --query \"$(cat "$script_dir/eqmed.sql")\" \ @@ -526,7 +526,7 @@ numactl --show numactl --cpunodebind=all --membind=all numactl --show numactl --cpunodebind=all --membind=all parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log -clickhouse-local --allow_experimental_map_type 1 --query " +clickhouse-local --query " -- Join the metric names back to the metric statistics we've calculated, and make -- a denormalized table of them -- statistics for all metrics for all queries. -- The WITH, ARRAY JOIN and CROSS JOIN do not like each other: @@ -627,7 +627,7 @@ build_log_column_definitions cat analyze/errors.log >> report/errors.log ||: cat profile-errors.log >> report/errors.log ||: -clickhouse-local --allow_experimental_map_type 1 --query " +clickhouse-local --query " create view query_display_names as select * from file('analyze/query-display-names.tsv', TSV, 'test text, query_index int, query_display_name text') @@ -974,7 +974,7 @@ create table all_query_metrics_tsv engine File(TSV, 'report/all-query-metrics.ts for version in {right,left} do rm -rf data - clickhouse-local --allow_experimental_map_type 1 --query " + clickhouse-local --query " create view query_profiles as with 0 as left, 1 as right select * from file('analyze/query-profiles.tsv', TSV, @@ -1168,7 +1168,7 @@ build_log_column_definitions rm -rf metrics ||: mkdir metrics -clickhouse-local --allow_experimental_map_type 1 --query " +clickhouse-local --query " create view right_async_metric_log as select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes, '$(cat right-async-metric-log.tsv.columns)') @@ -1285,10 +1285,10 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') set +x # Don't show password in the log client=(clickhouse-client # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 - # so I have to extract host and port with clickhouse-local --allow_experimental_map_type 1. I tried to use + # so I have to extract host and port with clickhouse-local. I tried to use # Poco URI parser to support this in the client, but it's broken and can't # parse host:port. - $(clickhouse-local --allow_experimental_map_type 1 --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") + $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") --secure --user "${CHPC_DATABASE_USER}" --password "${CHPC_DATABASE_PASSWORD}" @@ -1352,7 +1352,7 @@ EOF } # Check that local and client are in PATH -clickhouse-local --allow_experimental_map_type 1 --version > /dev/null +clickhouse-local --version > /dev/null clickhouse-client --version > /dev/null case "$stage" in diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index 5c25d9725a7..b7661b73a50 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -76,14 +76,14 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction]( - `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Patch component。 - `http_method` (UInt8) — 发起查询的HTTP方法. 可能值: - 0 — TCP接口的查询. - - 1 — `GET` + - 1 — `GET` - 2 — `POST` - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` The UserAgent header passed in the HTTP request。 - `quota_key` ([String](../../sql-reference/data-types/string.md)) — 在[quotas](../../operations/quotas.md) 配置里设置的“quota key” (见 `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `ProfileEvents` ([Map(String, UInt64))](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events) - `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 -- `thread_ids` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — 参与查询的线程数. +- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — 参与查询的线程数. - `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — 客户端运行查询时更改的设置的名称。 要启用对设置的日志记录更改,请将log_query_settings参数设置为1。 - `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — `Settings.Names` 列中列出的设置的值。 **示例** diff --git a/src/Core/NamesAndAliases.h b/src/Core/NamesAndAliases.h new file mode 100644 index 00000000000..694d4095ace --- /dev/null +++ b/src/Core/NamesAndAliases.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace DB +{ + +class NameAndAliasPair +{ +public: + NameAndAliasPair(const String & name_, const DataTypePtr & type_, const String & expression_) + : name(name_) + , type(type_) + , expression(expression_) + {} + + String name; + DataTypePtr type; + String expression; +}; + +/// This needed to use structured bindings for NameAndTypePair +/// const auto & [name, type] = name_and_type +template +decltype(auto) get(const NameAndAliasPair & name_and_alias) +{ + if constexpr (I == 0) + return name_and_alias.name; + else if constexpr (I == 1) + return name_and_alias.type; + else if constexpr (I == 2) + return name_and_alias.expression; +} + +using NamesAndAliases = std::vector; + +} + +namespace std +{ + template <> struct tuple_size : std::integral_constant {}; + template <> struct tuple_element<0, DB::NameAndAliasPair> { using type = DB::String; }; + template <> struct tuple_element<1, DB::NameAndAliasPair> { using type = DB::DataTypePtr; }; + template <> struct tuple_element<2, DB::NameAndAliasPair> { using type = DB::String; }; +} diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index a72992d4af7..11c625007d9 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -77,6 +77,7 @@ void Settings::dumpToMapColumn(IColumn * column, bool changed_only) value_column.insert(setting.getValueString()); size++; } + offsets.push_back(offsets.back() + size); } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index ad34bec3ae6..0197bfac7e4 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -441,7 +441,7 @@ class IColumn; M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \ M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \ M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \ - M(Bool, allow_experimental_map_type, false, "Allow data type Map", 0) \ + M(Bool, allow_experimental_map_type, true, "Obsolete setting, does nothing.", 0) \ M(Bool, allow_experimental_window_functions, false, "Allow experimental window functions", 0) \ M(Bool, allow_experimental_projection_optimization, false, "Enable projection optimization when processing SELECT queries", 0) \ M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \ @@ -596,7 +596,7 @@ struct Settings : public BaseSettings /// Load settings from configuration file, at "path" prefix in configuration. void loadSettingsFromConfig(const String & path, const Poco::Util::AbstractConfiguration & config); - /// Dumps profile events to two columns of type Array(String) + /// Dumps profile events to column of type Map(String, String) void dumpToMapColumn(IColumn * column, bool changed_only = true); /// Adds program options to set the settings from a command line. diff --git a/src/Interpreters/AsynchronousMetricLog.cpp b/src/Interpreters/AsynchronousMetricLog.cpp index 6c20a04d291..79e2d513d5c 100644 --- a/src/Interpreters/AsynchronousMetricLog.cpp +++ b/src/Interpreters/AsynchronousMetricLog.cpp @@ -11,17 +11,16 @@ namespace DB { -Block AsynchronousMetricLogElement::createBlock() +NamesAndTypesList AsynchronousMetricLogElement::getNamesAndTypes() { - ColumnsWithTypeAndName columns; - - columns.emplace_back(std::make_shared(), "event_date"); - columns.emplace_back(std::make_shared(), "event_time"); - columns.emplace_back(std::make_shared(6), "event_time_microseconds"); - columns.emplace_back(std::make_shared(std::make_shared()), "name"); - columns.emplace_back(std::make_shared(), "value"); - - return Block(columns); + return + { + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, + {"name", std::make_shared(std::make_shared())}, + {"value", std::make_shared(),} + }; } diff --git a/src/Interpreters/AsynchronousMetricLog.h b/src/Interpreters/AsynchronousMetricLog.h index 30bac3f5a99..6275572935c 100644 --- a/src/Interpreters/AsynchronousMetricLog.h +++ b/src/Interpreters/AsynchronousMetricLog.h @@ -12,8 +12,8 @@ namespace DB { -typedef double AsynchronousMetricValue; -typedef std::unordered_map AsynchronousMetricValues; +using AsynchronousMetricValue = double; +using AsynchronousMetricValues = std::unordered_map; /** AsynchronousMetricLog is a log of metric values measured at regular time interval. */ @@ -27,7 +27,8 @@ struct AsynchronousMetricLogElement double value; static std::string name() { return "AsynchronousMetricLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/CrashLog.cpp b/src/Interpreters/CrashLog.cpp index 5067acd4a5c..a9da804f1d2 100644 --- a/src/Interpreters/CrashLog.cpp +++ b/src/Interpreters/CrashLog.cpp @@ -18,21 +18,21 @@ namespace DB std::weak_ptr CrashLog::crash_log; -Block CrashLogElement::createBlock() +NamesAndTypesList CrashLogElement::getNamesAndTypes() { return { - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(), "timestamp_ns"}, - {std::make_shared(), "signal"}, - {std::make_shared(), "thread_id"}, - {std::make_shared(), "query_id"}, - {std::make_shared(std::make_shared()), "trace"}, - {std::make_shared(std::make_shared()), "trace_full"}, - {std::make_shared(), "version"}, - {std::make_shared(), "revision"}, - {std::make_shared(), "build_id"}, + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"timestamp_ns", std::make_shared()}, + {"signal", std::make_shared()}, + {"thread_id", std::make_shared()}, + {"query_id", std::make_shared()}, + {"trace", std::make_shared(std::make_shared())}, + {"trace_full", std::make_shared(std::make_shared())}, + {"version", std::make_shared()}, + {"revision", std::make_shared()}, + {"build_id", std::make_shared()}, }; } @@ -60,7 +60,6 @@ void CrashLogElement::appendToBlock(MutableColumns & columns) const } - void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, const StackTrace & stack_trace) { using namespace DB; diff --git a/src/Interpreters/CrashLog.h b/src/Interpreters/CrashLog.h index 9494bdc85c9..ba27c1f513e 100644 --- a/src/Interpreters/CrashLog.h +++ b/src/Interpreters/CrashLog.h @@ -24,7 +24,8 @@ struct CrashLogElement Array trace_full; static std::string name() { return "CrashLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 6c8bdfd765f..de858bdbdc5 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -302,6 +302,35 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns) return columns_list; } +ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns, const NamesAndAliases & alias_columns) +{ + std::shared_ptr columns_list = std::static_pointer_cast(formatColumns(columns)); + + for (const auto & alias_column : alias_columns) + { + const auto column_declaration = std::make_shared(); + column_declaration->name = alias_column.name; + + ParserDataType type_parser; + String type_name = alias_column.type->getName(); + const char * type_pos = type_name.data(); + const char * type_end = type_pos + type_name.size(); + column_declaration->type = parseQuery(type_parser, type_pos, type_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + + column_declaration->default_specifier = "ALIAS"; + + const auto & alias = alias_column.expression; + const char * alias_pos = alias.data(); + const char * alias_end = alias_pos + alias.size(); + ParserExpression expression_parser; + column_declaration->default_expression = parseQuery(expression_parser, alias_pos, alias_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + + columns_list->children.emplace_back(column_declaration); + } + + return columns_list; +} + ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) { auto columns_list = std::make_shared(); @@ -646,24 +675,6 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat } } } - - // enable allow_experimental_map_type for system tables - if (create.database != "system" && !create.attach && !settings.allow_experimental_map_type) - { - for (const auto & name_and_type_pair : properties.columns.getAllPhysical()) - { - WhichDataType which(*name_and_type_pair.type); - if (which.isMap()) - { - const auto & type_name = name_and_type_pair.type->getName(); - String message = "Cannot create table with column '" + name_and_type_pair.name + "' which type is '" - + type_name + "' because experimental Map type is not allowed. " - + "Set 'allow_experimental_map_type = 1' setting to enable"; - throw Exception(message, ErrorCodes::ILLEGAL_COLUMN); - } - } - - } } void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index 45f0bbd7cf8..7bd3ef25746 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -1,12 +1,12 @@ #pragma once +#include #include #include #include #include #include #include -#include namespace DB @@ -31,8 +31,8 @@ public: /// List of columns and their types in AST. static ASTPtr formatColumns(const NamesAndTypesList & columns); + static ASTPtr formatColumns(const NamesAndTypesList & columns, const NamesAndAliases & alias_columns); static ASTPtr formatColumns(const ColumnsDescription & columns); - static ASTPtr formatIndices(const IndicesDescription & indices); static ASTPtr formatConstraints(const ConstraintsDescription & constraints); static ASTPtr formatProjections(const ProjectionsDescription & projections); diff --git a/src/Interpreters/MetricLog.cpp b/src/Interpreters/MetricLog.cpp index fd1c120f18c..f8fab6d7a9d 100644 --- a/src/Interpreters/MetricLog.cpp +++ b/src/Interpreters/MetricLog.cpp @@ -8,21 +8,21 @@ namespace DB { -Block MetricLogElement::createBlock() +NamesAndTypesList MetricLogElement::getNamesAndTypes() { - ColumnsWithTypeAndName columns_with_type_and_name; + NamesAndTypesList columns_with_type_and_name; - columns_with_type_and_name.emplace_back(std::make_shared(), "event_date"); - columns_with_type_and_name.emplace_back(std::make_shared(), "event_time"); - columns_with_type_and_name.emplace_back(std::make_shared(6), "event_time_microseconds"); - columns_with_type_and_name.emplace_back(std::make_shared(), "milliseconds"); + columns_with_type_and_name.emplace_back("event_date", std::make_shared()); + columns_with_type_and_name.emplace_back("event_time", std::make_shared()); + columns_with_type_and_name.emplace_back("event_time_microseconds", std::make_shared(6)); + columns_with_type_and_name.emplace_back("milliseconds", std::make_shared()); for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i) { std::string name; name += "ProfileEvent_"; name += ProfileEvents::getName(ProfileEvents::Event(i)); - columns_with_type_and_name.emplace_back(std::make_shared(), std::move(name)); + columns_with_type_and_name.emplace_back(std::move(name), std::make_shared()); } for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i) @@ -30,10 +30,10 @@ Block MetricLogElement::createBlock() std::string name; name += "CurrentMetric_"; name += CurrentMetrics::getName(CurrentMetrics::Metric(i)); - columns_with_type_and_name.emplace_back(std::make_shared(), std::move(name)); + columns_with_type_and_name.emplace_back(std::move(name), std::make_shared()); } - return Block(columns_with_type_and_name); + return columns_with_type_and_name; } diff --git a/src/Interpreters/MetricLog.h b/src/Interpreters/MetricLog.h index f03b682c60a..c43c2872788 100644 --- a/src/Interpreters/MetricLog.h +++ b/src/Interpreters/MetricLog.h @@ -25,7 +25,8 @@ struct MetricLogElement std::vector current_metrics; static std::string name() { return "MetricLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/OpenTelemetrySpanLog.cpp b/src/Interpreters/OpenTelemetrySpanLog.cpp index d54c6ba9afc..89cce890555 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.cpp +++ b/src/Interpreters/OpenTelemetrySpanLog.cpp @@ -15,13 +15,13 @@ namespace DB { -Block OpenTelemetrySpanLogElement::createBlock() +NamesAndTypesList OpenTelemetrySpanLogElement::getNamesAndTypes() { return { - {std::make_shared(), "trace_id"}, - {std::make_shared(), "span_id"}, - {std::make_shared(), "parent_span_id"}, - {std::make_shared(), "operation_name"}, + {"trace_id", std::make_shared()}, + {"span_id", std::make_shared()}, + {"parent_span_id", std::make_shared()}, + {"operation_name", std::make_shared()}, // DateTime64 is really unwieldy -- there is no "normal" way to convert // it to an UInt64 count of microseconds, except: // 1) reinterpretAsUInt64(reinterpretAsFixedString(date)), which just @@ -32,14 +32,21 @@ Block OpenTelemetrySpanLogElement::createBlock() // Also subtraction of two DateTime64 points doesn't work, so you can't // get duration. // It is much less hassle to just use UInt64 of microseconds. - {std::make_shared(), "start_time_us"}, - {std::make_shared(), "finish_time_us"}, - {std::make_shared(), "finish_date"}, - {std::make_shared(std::make_shared(), std::make_shared()), - "attribute"}, + {"start_time_us", std::make_shared()}, + {"finish_time_us", std::make_shared()}, + {"finish_date", std::make_shared()}, + {"attribute", std::make_shared(std::make_shared(), std::make_shared())}, }; } +NamesAndAliases OpenTelemetrySpanLogElement::getNamesAndAliases() +{ + return + { + {"attribute.names", std::make_shared(std::make_shared()), "mapKeys(attribute)"}, + {"attribute.values", std::make_shared(std::make_shared()), "mapKeys(attribute)"} + }; +} void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const { diff --git a/src/Interpreters/OpenTelemetrySpanLog.h b/src/Interpreters/OpenTelemetrySpanLog.h index 861293b4f98..b287301325c 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.h +++ b/src/Interpreters/OpenTelemetrySpanLog.h @@ -27,7 +27,8 @@ struct OpenTelemetrySpanLogElement : public OpenTelemetrySpan : OpenTelemetrySpan(span) {} static std::string name() { return "OpenTelemetrySpanLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases(); void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index ad4fb60f00c..f89f836871a 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -16,7 +16,7 @@ namespace DB { -Block PartLogElement::createBlock() +NamesAndTypesList PartLogElement::getNamesAndTypes() { auto event_type_datatype = std::make_shared( DataTypeEnum8::Values @@ -33,35 +33,34 @@ Block PartLogElement::createBlock() ColumnsWithTypeAndName columns_with_type_and_name; return { + {"query_id", std::make_shared()}, + {"event_type", std::move(event_type_datatype)}, + {"event_date", std::make_shared()}, - columns_with_type_and_name.emplace_back(std::make_shared(), "query_id"), - columns_with_type_and_name.emplace_back(std::move(event_type_datatype), "event_type"), - columns_with_type_and_name.emplace_back(std::make_shared(), "event_date"), + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, - columns_with_type_and_name.emplace_back(std::make_shared(), "event_time"), - columns_with_type_and_name.emplace_back(std::make_shared(6), "event_time_microseconds"), + {"duration_ms", std::make_shared()}, - columns_with_type_and_name.emplace_back(std::make_shared(), "duration_ms"), + {"database", std::make_shared()}, + {"table", std::make_shared()}, + {"part_name", std::make_shared()}, + {"partition_id", std::make_shared()}, + {"path_on_disk", std::make_shared()}, - columns_with_type_and_name.emplace_back(std::make_shared(), "database"), - columns_with_type_and_name.emplace_back(std::make_shared(), "table"), - columns_with_type_and_name.emplace_back(std::make_shared(), "part_name"), - columns_with_type_and_name.emplace_back(std::make_shared(), "partition_id"), - columns_with_type_and_name.emplace_back(std::make_shared(), "path_on_disk"), - - columns_with_type_and_name.emplace_back(std::make_shared(), "rows"), - columns_with_type_and_name.emplace_back(std::make_shared(), "size_in_bytes"), // On disk + {"rows", std::make_shared()}, + {"size_in_bytes", std::make_shared()}, // On disk /// Merge-specific info - columns_with_type_and_name.emplace_back(std::make_shared(std::make_shared()), "merged_from"), - columns_with_type_and_name.emplace_back(std::make_shared(), "bytes_uncompressed"), // Result bytes - columns_with_type_and_name.emplace_back(std::make_shared(), "read_rows"), - columns_with_type_and_name.emplace_back(std::make_shared(), "read_bytes"), - columns_with_type_and_name.emplace_back(std::make_shared(), "peak_memory_usage"), + {"merged_from", std::make_shared(std::make_shared())}, + {"bytes_uncompressed", std::make_shared()}, // Result bytes + {"read_rows", std::make_shared()}, + {"read_bytes", std::make_shared()}, + {"peak_memory_usage", std::make_shared()}, /// Is there an error during the execution or commit - columns_with_type_and_name.emplace_back(std::make_shared(), "error"), - columns_with_type_and_name.emplace_back(std::make_shared(), "exception"), + {"error", std::make_shared()}, + {"exception", std::make_shared()}, }; } diff --git a/src/Interpreters/PartLog.h b/src/Interpreters/PartLog.h index edb6ab4a45f..1aec850e3dc 100644 --- a/src/Interpreters/PartLog.h +++ b/src/Interpreters/PartLog.h @@ -52,7 +52,8 @@ struct PartLogElement static std::string name() { return "PartLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/ProfileEventsExt.cpp b/src/Interpreters/ProfileEventsExt.cpp index ec3131d39a3..2e8f986ca6c 100644 --- a/src/Interpreters/ProfileEventsExt.cpp +++ b/src/Interpreters/ProfileEventsExt.cpp @@ -36,6 +36,7 @@ void dumpToMapColumn(const Counters & counters, DB::IColumn * column, bool nonze value_column.insert(value); size++; } + offsets.push_back(offsets.back() + size); } diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index e8647e3c192..3f668e5e0ab 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -26,7 +26,7 @@ namespace DB { -Block QueryLogElement::createBlock() +NamesAndTypesList QueryLogElement::getNamesAndTypes() { auto query_status_datatype = std::make_shared( DataTypeEnum8::Values @@ -39,84 +39,94 @@ Block QueryLogElement::createBlock() return { - {std::move(query_status_datatype), "type"}, - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(6), "event_time_microseconds"}, - {std::make_shared(), "query_start_time"}, - {std::make_shared(6), "query_start_time_microseconds"}, - {std::make_shared(), "query_duration_ms"}, + {"type", std::move(query_status_datatype)}, + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, + {"query_start_time", std::make_shared()}, + {"query_start_time_microseconds", std::make_shared(6)}, + {"query_duration_ms", std::make_shared()}, - {std::make_shared(), "read_rows"}, - {std::make_shared(), "read_bytes"}, - {std::make_shared(), "written_rows"}, - {std::make_shared(), "written_bytes"}, - {std::make_shared(), "result_rows"}, - {std::make_shared(), "result_bytes"}, - {std::make_shared(), "memory_usage"}, + {"read_rows", std::make_shared()}, + {"read_bytes", std::make_shared()}, + {"written_rows", std::make_shared()}, + {"written_bytes", std::make_shared()}, + {"result_rows", std::make_shared()}, + {"result_bytes", std::make_shared()}, + {"memory_usage", std::make_shared()}, - {std::make_shared(), "current_database"}, - {std::make_shared(), "query"}, - {std::make_shared(), "normalized_query_hash"}, - {std::make_shared(std::make_shared()), "query_kind"}, - {std::make_shared( - std::make_shared(std::make_shared())), "databases"}, - {std::make_shared( - std::make_shared(std::make_shared())), "tables"}, - {std::make_shared( - std::make_shared(std::make_shared())), "columns"}, - {std::make_shared( - std::make_shared(std::make_shared())), "projections"}, - {std::make_shared(), "exception_code"}, - {std::make_shared(), "exception"}, - {std::make_shared(), "stack_trace"}, + {"current_database", std::make_shared()}, + {"query", std::make_shared()}, + {"normalized_query_hash", std::make_shared()}, + {"query_kind", std::make_shared(std::make_shared())}, + {"databases", std::make_shared( + std::make_shared(std::make_shared()))}, + {"tables", std::make_shared( + std::make_shared(std::make_shared()))}, + {"columns", std::make_shared( + std::make_shared(std::make_shared()))}, + {"projections", std::make_shared( + std::make_shared(std::make_shared()))}, + {"exception_code", std::make_shared()}, + {"exception", std::make_shared()}, + {"stack_trace", std::make_shared()}, - {std::make_shared(), "is_initial_query"}, - {std::make_shared(), "user"}, - {std::make_shared(), "query_id"}, - {DataTypeFactory::instance().get("IPv6"), "address"}, - {std::make_shared(), "port"}, - {std::make_shared(), "initial_user"}, - {std::make_shared(), "initial_query_id"}, - {DataTypeFactory::instance().get("IPv6"), "initial_address"}, - {std::make_shared(), "initial_port"}, - {std::make_shared(), "initial_query_start_time"}, - {std::make_shared(6), "initial_query_start_time_microseconds"}, - {std::make_shared(), "interface"}, - {std::make_shared(), "os_user"}, - {std::make_shared(), "client_hostname"}, - {std::make_shared(), "client_name"}, - {std::make_shared(), "client_revision"}, - {std::make_shared(), "client_version_major"}, - {std::make_shared(), "client_version_minor"}, - {std::make_shared(), "client_version_patch"}, - {std::make_shared(), "http_method"}, - {std::make_shared(), "http_user_agent"}, - {std::make_shared(), "http_referer"}, - {std::make_shared(), "forwarded_for"}, - {std::make_shared(), "quota_key"}, + {"is_initial_query", std::make_shared()}, + {"user", std::make_shared()}, + {"query_id", std::make_shared()}, + {"address", DataTypeFactory::instance().get("IPv6")}, + {"port", std::make_shared()}, + {"initial_user", std::make_shared()}, + {"initial_query_id", std::make_shared()}, + {"initial_address", DataTypeFactory::instance().get("IPv6")}, + {"initial_port", std::make_shared()}, + {"initial_query_start_time", std::make_shared()}, + {"initial_query_start_time_microseconds", std::make_shared(6)}, + {"interface", std::make_shared()}, + {"os_user", std::make_shared()}, + {"client_hostname", std::make_shared()}, + {"client_name", std::make_shared()}, + {"client_revision", std::make_shared()}, + {"client_version_major", std::make_shared()}, + {"client_version_minor", std::make_shared()}, + {"client_version_patch", std::make_shared()}, + {"http_method", std::make_shared()}, + {"http_user_agent", std::make_shared()}, + {"http_referer", std::make_shared()}, + {"forwarded_for", std::make_shared()}, + {"quota_key", std::make_shared()}, - {std::make_shared(), "revision"}, + {"revision", std::make_shared()}, - {std::make_shared(), "log_comment"}, + {"log_comment", std::make_shared()}, - {std::make_shared(std::make_shared()), "thread_ids"}, - {std::make_shared(std::make_shared(), std::make_shared()), "ProfileEvents"}, - {std::make_shared(std::make_shared(), std::make_shared()), "Settings"}, + {"thread_ids", std::make_shared(std::make_shared())}, + {"ProfileEvents", std::make_shared(std::make_shared(), std::make_shared())}, + {"Settings", std::make_shared(std::make_shared(), std::make_shared())}, - {std::make_shared(std::make_shared()), "used_aggregate_functions"}, - {std::make_shared(std::make_shared()), "used_aggregate_function_combinators"}, - {std::make_shared(std::make_shared()), "used_database_engines"}, - {std::make_shared(std::make_shared()), "used_data_type_families"}, - {std::make_shared(std::make_shared()), "used_dictionaries"}, - {std::make_shared(std::make_shared()), "used_formats"}, - {std::make_shared(std::make_shared()), "used_functions"}, - {std::make_shared(std::make_shared()), "used_storages"}, - {std::make_shared(std::make_shared()), "used_table_functions"} + {"used_aggregate_functions", std::make_shared(std::make_shared())}, + {"used_aggregate_function_combinators", std::make_shared(std::make_shared())}, + {"used_database_engines", std::make_shared(std::make_shared())}, + {"used_data_type_families", std::make_shared(std::make_shared())}, + {"used_dictionaries", std::make_shared(std::make_shared())}, + {"used_formats", std::make_shared(std::make_shared())}, + {"used_functions", std::make_shared(std::make_shared())}, + {"used_storages", std::make_shared(std::make_shared())}, + {"used_table_functions", std::make_shared(std::make_shared())} }; } +NamesAndAliases QueryLogElement::getNamesAndAliases() +{ + return + { + {"ProfileEvents.Names", {std::make_shared(std::make_shared())}, "mapKeys(ProfileEvents)"}, + {"ProfileEvents.Values", {std::make_shared(std::make_shared())}, "mapValues(ProfileEvents)"}, + {"Settings.Names", {std::make_shared(std::make_shared())}, "mapKeys(Settings)" }, + {"Settings.Values", {std::make_shared(std::make_shared())}, "mapValues(Settings)"} + }; +} void QueryLogElement::appendToBlock(MutableColumns & columns) const { diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index 684a635a920..0aa02104306 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -1,9 +1,9 @@ #pragma once +#include #include #include - namespace ProfileEvents { class Counters; @@ -83,7 +83,8 @@ struct QueryLogElement static std::string name() { return "QueryLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases(); void appendToBlock(MutableColumns & columns) const; static void appendClientInfo(const ClientInfo & client_info, MutableColumns & columns, size_t & i); diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp index dfdb905f229..7ca3c10045e 100644 --- a/src/Interpreters/QueryThreadLog.cpp +++ b/src/Interpreters/QueryThreadLog.cpp @@ -19,58 +19,68 @@ namespace DB { -Block QueryThreadLogElement::createBlock() + +NamesAndTypesList QueryThreadLogElement::getNamesAndTypes() { return { - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(6), "event_time_microseconds"}, - {std::make_shared(), "query_start_time"}, - {std::make_shared(6), "query_start_time_microseconds"}, - {std::make_shared(), "query_duration_ms"}, + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, + {"query_start_time", std::make_shared()}, + {"query_start_time_microseconds", std::make_shared(6)}, + {"query_duration_ms", std::make_shared()}, - {std::make_shared(), "read_rows"}, - {std::make_shared(), "read_bytes"}, - {std::make_shared(), "written_rows"}, - {std::make_shared(), "written_bytes"}, - {std::make_shared(), "memory_usage"}, - {std::make_shared(), "peak_memory_usage"}, + {"read_rows", std::make_shared()}, + {"read_bytes", std::make_shared()}, + {"written_rows", std::make_shared()}, + {"written_bytes", std::make_shared()}, + {"memory_usage", std::make_shared()}, + {"peak_memory_usage", std::make_shared()}, - {std::make_shared(), "thread_name"}, - {std::make_shared(), "thread_id"}, - {std::make_shared(), "master_thread_id"}, - {std::make_shared(), "current_database"}, - {std::make_shared(), "query"}, - {std::make_shared(), "normalized_query_hash"}, + {"thread_name", std::make_shared()}, + {"thread_id", std::make_shared()}, + {"master_thread_id", std::make_shared()}, + {"current_database", std::make_shared()}, + {"query", std::make_shared()}, + {"normalized_query_hash", std::make_shared()}, - {std::make_shared(), "is_initial_query"}, - {std::make_shared(), "user"}, - {std::make_shared(), "query_id"}, - {DataTypeFactory::instance().get("IPv6"), "address"}, - {std::make_shared(), "port"}, - {std::make_shared(), "initial_user"}, - {std::make_shared(), "initial_query_id"}, - {DataTypeFactory::instance().get("IPv6"), "initial_address"}, - {std::make_shared(), "initial_port"}, - {std::make_shared(), "initial_query_start_time"}, - {std::make_shared(6), "initial_query_start_time_microseconds"}, - {std::make_shared(), "interface"}, - {std::make_shared(), "os_user"}, - {std::make_shared(), "client_hostname"}, - {std::make_shared(), "client_name"}, - {std::make_shared(), "client_revision"}, - {std::make_shared(), "client_version_major"}, - {std::make_shared(), "client_version_minor"}, - {std::make_shared(), "client_version_patch"}, - {std::make_shared(), "http_method"}, - {std::make_shared(), "http_user_agent"}, - {std::make_shared(), "http_referer"}, - {std::make_shared(), "forwarded_for"}, - {std::make_shared(), "quota_key"}, + {"is_initial_query", std::make_shared()}, + {"user", std::make_shared()}, + {"query_id", std::make_shared()}, + {"address", DataTypeFactory::instance().get("IPv6")}, + {"port", std::make_shared()}, + {"initial_user", std::make_shared()}, + {"initial_query_id", std::make_shared()}, + {"initial_address", DataTypeFactory::instance().get("IPv6")}, + {"initial_port", std::make_shared()}, + {"initial_query_start_time", std::make_shared()}, + {"initial_query_start_time_microseconds", std::make_shared(6)}, + {"interface", std::make_shared()}, + {"os_user", std::make_shared()}, + {"client_hostname", std::make_shared()}, + {"client_name", std::make_shared()}, + {"client_revision", std::make_shared()}, + {"client_version_major", std::make_shared()}, + {"client_version_minor", std::make_shared()}, + {"client_version_patch", std::make_shared()}, + {"http_method", std::make_shared()}, + {"http_user_agent", std::make_shared()}, + {"http_referer", std::make_shared()}, + {"forwarded_for", std::make_shared()}, + {"quota_key", std::make_shared()}, - {std::make_shared(), "revision"}, + {"revision", std::make_shared()}, - {std::make_shared(std::make_shared(), std::make_shared()), "ProfileEvents"}, + {"ProfileEvents", std::make_shared(std::make_shared(), std::make_shared())}, + }; +} + +NamesAndAliases QueryThreadLogElement::getNamesAndAliases() +{ + return + { + {"ProfileEvents.Names", {std::make_shared(std::make_shared())}, "mapKeys(ProfileEvents)"}, + {"ProfileEvents.Values", {std::make_shared(std::make_shared())}, "mapValues(ProfileEvents)"} }; } diff --git a/src/Interpreters/QueryThreadLog.h b/src/Interpreters/QueryThreadLog.h index 6771e8ba88c..57e93edbaf7 100644 --- a/src/Interpreters/QueryThreadLog.h +++ b/src/Interpreters/QueryThreadLog.h @@ -49,7 +49,8 @@ struct QueryThreadLogElement static std::string name() { return "QueryThreadLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases(); void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 9e57e308dbe..ee3116362e5 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -52,7 +52,8 @@ namespace DB /// fields static std::string name(); - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases(); void appendToBlock(MutableColumns & columns) const; }; */ @@ -451,10 +452,18 @@ void SystemLog::flushImpl(const std::vector & to_flush, /// is called from single thread. prepareTable(); - Block block = LogElement::createBlock(); + ColumnsWithTypeAndName log_element_columns; + auto log_element_names_and_types = LogElement::getNamesAndTypes(); + + for (auto name_and_type : log_element_names_and_types) + log_element_columns.emplace_back(name_and_type.type, name_and_type.name); + + Block block(std::move(log_element_columns)); + MutableColumns columns = block.mutateColumns(); for (const auto & elem : to_flush) elem.appendToBlock(columns); + block.setColumns(std::move(columns)); /// We write to table indirectly, using InterpreterInsertQuery. @@ -500,11 +509,14 @@ void SystemLog::prepareTable() if (table) { - auto metadata_snapshot = table->getInMemoryMetadataPtr(); - const Block expected = LogElement::createBlock(); - const Block actual = metadata_snapshot->getSampleBlockNonMaterialized(); + auto metadata_columns = table->getInMemoryMetadataPtr()->getColumns(); + auto old_query = InterpreterCreateQuery::formatColumns(metadata_columns); - if (!blocksHaveEqualStructure(actual, expected)) + auto ordinary_columns = LogElement::getNamesAndTypes(); + auto alias_columns = LogElement::getNamesAndAliases(); + auto current_query = InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns); + + if (old_query->getTreeHash() != current_query->getTreeHash()) { /// Rename the existing table. int suffix = 0; @@ -575,10 +587,10 @@ ASTPtr SystemLog::getCreateTableQuery() create->database = table_id.database_name; create->table = table_id.table_name; - Block sample = LogElement::createBlock(); - + auto ordinary_columns = LogElement::getNamesAndTypes(); + auto alias_columns = LogElement::getNamesAndAliases(); auto new_columns_list = std::make_shared(); - new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList())); + new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns)); create->set(create->columns_list, new_columns_list); ParserStorage storage_parser; diff --git a/src/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp index f5a0ce51d49..baf98b6771d 100644 --- a/src/Interpreters/TextLog.cpp +++ b/src/Interpreters/TextLog.cpp @@ -14,7 +14,7 @@ namespace DB { -Block TextLogElement::createBlock() +NamesAndTypesList TextLogElement::getNamesAndTypes() { auto priority_datatype = std::make_shared( DataTypeEnum8::Values @@ -31,23 +31,23 @@ Block TextLogElement::createBlock() return { - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(6), "event_time_microseconds"}, - {std::make_shared(), "microseconds"}, + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, + {"microseconds", std::make_shared()}, - {std::make_shared(std::make_shared()), "thread_name"}, - {std::make_shared(), "thread_id"}, + {"thread_name", std::make_shared(std::make_shared())}, + {"thread_id", std::make_shared()}, - {std::move(priority_datatype), "level"}, - {std::make_shared(), "query_id"}, - {std::make_shared(std::make_shared()), "logger_name"}, - {std::make_shared(), "message"}, + {"level", std::move(priority_datatype)}, + {"query_id", std::make_shared()}, + {"logger_name", std::make_shared(std::make_shared())}, + {"message", std::make_shared()}, - {std::make_shared(), "revision"}, + {"revision", std::make_shared()}, - {std::make_shared(std::make_shared()), "source_file"}, - {std::make_shared(), "source_line"} + {"source_file", std::make_shared(std::make_shared())}, + {"source_line", std::make_shared()} }; } diff --git a/src/Interpreters/TextLog.h b/src/Interpreters/TextLog.h index 0133d5e4eb6..d2ddd23d1e9 100644 --- a/src/Interpreters/TextLog.h +++ b/src/Interpreters/TextLog.h @@ -25,7 +25,8 @@ struct TextLogElement UInt64 source_line{}; static std::string name() { return "TextLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Interpreters/TraceLog.cpp b/src/Interpreters/TraceLog.cpp index fe7512f2f00..dac27aebe58 100644 --- a/src/Interpreters/TraceLog.cpp +++ b/src/Interpreters/TraceLog.cpp @@ -21,20 +21,20 @@ const TraceDataType::Values TraceLogElement::trace_values = {"MemorySample", static_cast(TraceType::MemorySample)}, }; -Block TraceLogElement::createBlock() +NamesAndTypesList TraceLogElement::getNamesAndTypes() { return { - {std::make_shared(), "event_date"}, - {std::make_shared(), "event_time"}, - {std::make_shared(6), "event_time_microseconds"}, - {std::make_shared(), "timestamp_ns"}, - {std::make_shared(), "revision"}, - {std::make_shared(trace_values), "trace_type"}, - {std::make_shared(), "thread_id"}, - {std::make_shared(), "query_id"}, - {std::make_shared(std::make_shared()), "trace"}, - {std::make_shared(), "size"}, + {"event_date", std::make_shared()}, + {"event_time", std::make_shared()}, + {"event_time_microseconds", std::make_shared(6)}, + {"timestamp_ns", std::make_shared()}, + {"revision", std::make_shared()}, + {"trace_type", std::make_shared(trace_values)}, + {"thread_id", std::make_shared()}, + {"query_id", std::make_shared()}, + {"trace", std::make_shared(std::make_shared())}, + {"size", std::make_shared()}, }; } diff --git a/src/Interpreters/TraceLog.h b/src/Interpreters/TraceLog.h index 9ee43bf32cc..85400560a7b 100644 --- a/src/Interpreters/TraceLog.h +++ b/src/Interpreters/TraceLog.h @@ -27,7 +27,8 @@ struct TraceLogElement Int64 size{}; /// Allocation size in bytes for TraceType::Memory static std::string name() { return "TraceLog"; } - static Block createBlock(); + static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases() { return {}; } void appendToBlock(MutableColumns & columns) const; }; diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index 4a904c96432..179204a1a0b 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -145,6 +145,25 @@ ColumnsDescription::ColumnsDescription(NamesAndTypesList ordinary) add(ColumnDescription(std::move(elem.name), std::move(elem.type))); } +ColumnsDescription::ColumnsDescription(NamesAndTypesList ordinary, NamesAndAliases aliases) +{ + for (auto & elem : ordinary) + add(ColumnDescription(std::move(elem.name), std::move(elem.type))); + + for (auto & alias : aliases) + { + ColumnDescription description(std::move(alias.name), std::move(alias.type)); + description.default_desc.kind = ColumnDefaultKind::Alias; + + const char * alias_expression_pos = alias.expression.data(); + const char * alias_expression_end = alias_expression_pos + alias.expression.size(); + ParserExpression expression_parser; + description.default_desc.expression = parseQuery(expression_parser, alias_expression_pos, alias_expression_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + + add(std::move(description)); + } +} + /// We are trying to find first column from end with name `column_name` or with a name beginning with `column_name` and ".". /// For example "fruits.bananas" diff --git a/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h index 7fff22abf71..f1887d772ca 100644 --- a/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -54,7 +55,9 @@ class ColumnsDescription { public: ColumnsDescription() = default; - explicit ColumnsDescription(NamesAndTypesList ordinary_); + explicit ColumnsDescription(NamesAndTypesList ordinary); + + explicit ColumnsDescription(NamesAndTypesList ordinary, NamesAndAliases aliases); /// `after_column` can be a Nested column name; void add(ColumnDescription column, const String & after_column = String(), bool first = false); diff --git a/src/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h index 0fe9ca94972..37089ac8e3d 100644 --- a/src/Storages/System/IStorageSystemOneBlock.h +++ b/src/Storages/System/IStorageSystemOneBlock.h @@ -1,4 +1,6 @@ #pragma once + +#include #include #include #include @@ -29,6 +31,7 @@ class IStorageSystemOneBlock : public IStorage protected: virtual void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const = 0; + public: #if defined(ARCADIA_BUILD) IStorageSystemOneBlock(const String & name_) : IStorageSystemOneBlock(StorageID{"system", name_}) {} @@ -37,7 +40,7 @@ public: IStorageSystemOneBlock(const StorageID & table_id_) : IStorage(table_id_) { StorageInMemoryMetadata metadata_; - metadata_.setColumns(ColumnsDescription(Self::getNamesAndTypes())); + metadata_.setColumns(ColumnsDescription(Self::getNamesAndTypes(), Self::getNamesAndAliases())); setInMemoryMetadata(metadata_); } @@ -62,6 +65,8 @@ public: return Pipe(std::make_shared(sample_block, std::move(chunk))); } + + static NamesAndAliases getNamesAndAliases() { return {}; } }; } diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index e99cd9ddf3e..e2685af7718 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -68,6 +68,14 @@ NamesAndTypesList StorageSystemProcesses::getNamesAndTypes() }; } +NamesAndAliases StorageSystemProcesses::getNamesAndAliases() +{ + return + { + {"ProfileEvents.Names", {std::make_shared(std::make_shared())}, "mapKeys(ProfileEvents)"}, + {"ProfileEvents.Values", {std::make_shared(std::make_shared())}, "mapValues(ProfileEvents)"} + }; +} void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { diff --git a/src/Storages/System/StorageSystemProcesses.h b/src/Storages/System/StorageSystemProcesses.h index e63e3cc3e9d..9daf079800f 100644 --- a/src/Storages/System/StorageSystemProcesses.h +++ b/src/Storages/System/StorageSystemProcesses.h @@ -20,6 +20,8 @@ public: static NamesAndTypesList getNamesAndTypes(); + static NamesAndAliases getNamesAndAliases(); + protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; diff --git a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql index 1eabf27692d..cbcb5c643fe 100644 --- a/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql +++ b/tests/queries/0_stateless/01343_min_bytes_to_use_mmap_io.sql @@ -6,6 +6,6 @@ SET min_bytes_to_use_mmap_io = 1; SELECT * FROM test_01343; SYSTEM FLUSH LOGS; -SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log WHERE current_database = currentDatabase() and event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND type = 2 ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['CreatedReadBufferMMap'] AS value FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND type = 2 ORDER BY event_time DESC LIMIT 1; DROP TABLE test_01343; diff --git a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql index 2c52c633c8a..3d148527270 100644 --- a/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql +++ b/tests/queries/0_stateless/01344_min_bytes_to_use_mmap_io_index.sql @@ -6,7 +6,7 @@ SET min_bytes_to_use_mmap_io = 1; SELECT * FROM test_01344 WHERE x = 'Hello, world'; SYSTEM FLUSH LOGS; -SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log - WHERE current_database = currentDatabase() and event_date >= yesterday() AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND type = 2 ORDER BY event_time DESC LIMIT 1; +SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log + WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND type = 2 ORDER BY event_time DESC LIMIT 1; DROP TABLE test_01344; diff --git a/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql b/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql index d01e5ef5a87..5f4855c6119 100644 --- a/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql +++ b/tests/queries/0_stateless/01360_materialized_view_with_join_on_query_log.sql @@ -9,8 +9,6 @@ SELECT 1; SYSTEM FLUSH LOGS; -SET allow_experimental_map_type = 1; - -- NOTE: can be rewritten using log_queries_min_query_duration_ms CREATE MATERIALIZED VIEW slow_log Engine=Memory AS From 3f1bfb17aeac6ab763c7dedc0cbe6ec3c24f54e7 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 28 Jun 2021 14:44:59 +0300 Subject: [PATCH 497/931] Fix ANTRL merge_prewhere_table test --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index ed952d0cf63..aef3f7f6d02 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -521,7 +521,8 @@ "01913_replace_dictionary", "01914_exchange_dictionaries", "01915_create_or_replace_dictionary", - "01913_names_of_tuple_literal" + "01913_names_of_tuple_literal", + "01925_merge_prewhere_table" ], "parallel": [ From d20feeb42c0feea879b4167d04daa92bc3fc6f7a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 28 Jun 2021 15:05:22 +0300 Subject: [PATCH 498/931] Relax check during DROP PART --- src/Storages/MergeTree/MergeTreeData.cpp | 29 +++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index c115f7f593d..2f4c2efc7ba 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2332,10 +2332,33 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c throw Exception("Unexpected partition_id of part " + part->name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR); /// It's a DROP PART and it's already executed by fetching some covering part - if (part->info != drop_range && part->info.contains(drop_range)) + bool is_drop_part = !drop_range.isFakeDropRangePart(); + + if (is_drop_part && part->info != drop_range) { - LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range.getPartName(), part->name); - return {}; + /// Why we check only min and max blocks here without checking merge + /// level? It's a tricky situation which can happen on a stale + /// replica. For example, we have parts all_1_1_0, all_2_2_0 and + /// all_3_3_0. Fast replica assign some merges (OPTIMIZE FINAL or + /// TTL) all_2_2_0 -> all_2_2_1 -> all_2_2_2. So it has set of parts + /// all_1_1_0, all_2_2_2 and all_3_3_0. After that it decides to + /// drop part all_2_2_2. Now set of parts is all_1_1_0 and + /// all_3_3_0. Now fast replica assign merge all_1_1_0 + all_3_3_0 + /// to all_1_3_1 and finishes it. Slow replica pulls the queue and + /// have two contradictory tasks -- drop all_2_2_2 and merge/fetch + /// all_1_3_1. If this replica will fetch all_1_3_1 first and then tries + /// to drop all_2_2_2 after that it will receive the LOGICAL ERROR. + /// So here we just check that all_1_3_1 covers blocks from drop + /// all_2_2_2. + /// + /// NOTE: this helps only to avoid logical error during drop part. + /// We still get intersecting "parts" in queue. + bool is_covered_by_min_max_block = part->info.min_block <= drop_range.min_block && part->info.max_block >= drop_range.max_block; + if (is_covered_by_min_max_block) + { + LOG_INFO(log, "Skipping drop range for part {} because covering part {} already exists", drop_range.getPartName(), part->name); + return {}; + } } if (part->info.min_block < drop_range.min_block) From 48d21bb03b3cd7c522735bdcfdc381f0185b9c13 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 28 Jun 2021 13:19:13 +0000 Subject: [PATCH 499/931] Fix inconsistency --- src/Interpreters/ActionsDAG.cpp | 3 +++ ...01925_test_const_column_group_by_consistency.reference | 6 ++++++ .../01925_test_const_column_group_by_consistency.sql | 8 ++++++++ 3 files changed, 17 insertions(+) create mode 100644 tests/queries/0_stateless/01925_test_const_column_group_by_consistency.reference create mode 100644 tests/queries/0_stateless/01925_test_const_column_group_by_consistency.sql diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 9fa48f6ceab..55c863a6f8c 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -219,6 +219,9 @@ const ActionsDAG::Node & ActionsDAG::addFunction( column = node.function_base->getConstantResultForNonConstArguments(arguments, node.result_type); } + if (all_const && column && !isColumnConst(*column) && column->size() <= 1) + column = ColumnConst::create(std::move(column), column->size()); + /// If the result is not a constant, just in case, we will consider the result as unknown. if (column && isColumnConst(*column)) { diff --git a/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.reference b/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.reference new file mode 100644 index 00000000000..f9bcdca26da --- /dev/null +++ b/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.reference @@ -0,0 +1,6 @@ +1 0 +1 0 +1 0 +1 0 +1 1 0 +0 0 0 diff --git a/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.sql b/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.sql new file mode 100644 index 00000000000..d288c7db023 --- /dev/null +++ b/tests/queries/0_stateless/01925_test_const_column_group_by_consistency.sql @@ -0,0 +1,8 @@ +SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a; + +SELECT materialize(1) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; +SELECT materialize(1) as a, count() FROM numbers(10) WHERE 0 ORDER BY a; + +SELECT isConstant(1) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; +SELECT 1 as b, isConstant(b) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; +SELECT 0 as b, least(isConstant(materialize(1)), b) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; From 1703bc91b0730f17f0b47b355c998f0b0d136731 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 28 Jun 2021 16:21:17 +0300 Subject: [PATCH 500/931] Update run.sh --- docker/test/stress/run.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index b198a3f2397..846cc4ad53d 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -1,5 +1,6 @@ #!/bin/bash # shellcheck disable=SC2094 +# shellcheck disable=SC2086 set -x From 3afb7aa97ee68ca3da5bd042bbb978b541cdab0a Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 28 Jun 2021 16:47:47 +0300 Subject: [PATCH 501/931] Update VirtualColumnUtils.cpp --- src/Storages/VirtualColumnUtils.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 05023ee2c32..d0840778c0f 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -150,12 +150,6 @@ bool prepareFilterBlockWithQuery(const ASTPtr & query, ContextPtr context, Block if (!select.where() && !select.prewhere()) return unmodified; - ASTPtr condition_ast; - if (select.prewhere() && select.where()) - condition_ast = makeASTFunction("and", select.prewhere()->clone(), select.where()->clone()); - else - condition_ast = select.prewhere() ? select.prewhere()->clone() : select.where()->clone(); - // Provide input columns as constant columns to check if an expression is constant. std::function is_constant = [&block, &context](const ASTPtr & node) { From 54a7e2158c2f976bcc660c4ed53debd008ec3220 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 28 Jun 2021 17:09:26 +0300 Subject: [PATCH 502/931] Update ActionsDAG.cpp --- src/Interpreters/ActionsDAG.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 55c863a6f8c..f0c6e49b891 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -219,8 +219,8 @@ const ActionsDAG::Node & ActionsDAG::addFunction( column = node.function_base->getConstantResultForNonConstArguments(arguments, node.result_type); } - if (all_const && column && !isColumnConst(*column) && column->size() <= 1) - column = ColumnConst::create(std::move(column), column->size()); + if (all_const && column && !isColumnConst(*column) && column->size() == 1) + column = ColumnConst::create(std::move(column), 1); /// If the result is not a constant, just in case, we will consider the result as unknown. if (column && isColumnConst(*column)) From 72b281987e59028d215c0ee77ec0bef99072d30f Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 28 Jun 2021 17:14:26 +0300 Subject: [PATCH 503/931] Add more debug --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 766d988500d..a348b07ba92 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -752,13 +752,16 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor bool force_ttl = false; for (const auto & part : parts) { - new_data_part->ttl_infos.update(part->ttl_infos); if (metadata_snapshot->hasAnyTTL() && !part->checkAllTTLCalculated(metadata_snapshot)) { LOG_INFO(log, "Some TTL values were not calculated for part {}. Will calculate them forcefully during merge.", part->name); need_remove_expired_values = true; force_ttl = true; } + else + { + new_data_part->ttl_infos.update(part->ttl_infos); + } } const auto & part_min_ttl = new_data_part->ttl_infos.part_min_ttl; @@ -939,7 +942,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor merged_stream = std::make_shared(merged_stream, sort_description, SizeLimits(), 0 /*limit_hint*/, deduplicate_by_columns); if (need_remove_expired_values) + { + LOG_DEBUG(log, "Outdated rows found in source parts, TTLs processing enabled for merge"); merged_stream = std::make_shared(merged_stream, data, metadata_snapshot, new_data_part, time_of_merge, force_ttl); + } if (metadata_snapshot->hasSecondaryIndices()) { From b64eb0ff070c9de165868229d72b80052ea070f1 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 28 Jun 2021 19:21:53 +0500 Subject: [PATCH 504/931] add DELETE IN PARTITION and UPDATE IN PARTITION into ALTER syntax TOC Signed-off-by: Slach --- docs/en/sql-reference/statements/alter/partition.md | 2 ++ docs/ru/sql-reference/statements/alter/partition.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 86381d3c6a4..090cbe93c54 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -19,6 +19,8 @@ The following operations with [partitions](../../../engines/table-engines/merget - [UNFREEZE PARTITION](#alter_unfreeze-partition) — Removes a backup of a partition. - [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server. - [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume. +- [UPDATE IN PARTITION](#update-in-partition) — Update data inside the partition by condition. +- [DELETE IN PARTITION](#delete-in-partition) — Delete data inside the partition by condition. diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index 79242e7bbf3..0a485c7b591 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -19,6 +19,8 @@ toc_title: PARTITION - [UNFREEZE PARTITION](#alter_unfreeze-partition) — удалить резервную копию партиции; - [FETCH PARTITION](#alter_fetch-partition) — скачать партицию с другого сервера; - [MOVE PARTITION\|PART](#alter_move-partition) — переместить партицию/кускок на другой диск или том. +- [UPDATE IN PARTITION](#update-in-partition) — обновить данные внутри партиции по условию. +- [DELETE IN PARTITION](#delete-in-partition) — удалить данные внутри партиции по условию. ## DETACH PARTITION\|PART {#alter_detach-partition} From 5803fb9f04e9e3a53009a6f849d99a737ced4add Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 28 Jun 2021 17:23:47 +0300 Subject: [PATCH 505/931] Update docs/zh/engines/table-engines/special/file.md Co-authored-by: Amos Bird --- docs/zh/engines/table-engines/special/file.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/engines/table-engines/special/file.md b/docs/zh/engines/table-engines/special/file.md index 503d6d7e7f5..4464dcf198c 100644 --- a/docs/zh/engines/table-engines/special/file.md +++ b/docs/zh/engines/table-engines/special/file.md @@ -54,7 +54,7 @@ SELECT * FROM file_engine_table ## 在 Clickhouse-local 中的使用 {#zai-clickhouse-local-zhong-de-shi-yong} -使用 [clickhouse-local](../../../operations/utilities/clickhouse-local.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 +使用 [clickhouse-local](../../../operations/utilities/clickhouse-local.md) 时,File 引擎除了 `Format` 之外,还可以接收文件路径参数。可以使用数字或名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 **例如:** ``` bash From a96c7a3a70bad52281a6253f58253ba945670620 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 28 Jun 2021 17:32:03 +0300 Subject: [PATCH 506/931] Update DatabaseAtomic.cpp --- src/Databases/DatabaseAtomic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 48777d92a05..9ab041ee36f 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -109,7 +109,7 @@ StoragePtr DatabaseAtomic::detachTable(const String & name) void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) { - auto * storage = tryGetTable(table_name, local_context).get(); + auto storage = tryGetTable(table_name, local_context).get(); /// Remove the inner table (if any) to avoid deadlock /// (due to attempt to execute DROP from the worker thread) if (storage) From ff8c44179c98e48dc78fa9a1d0ff7d5d6dcc6728 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 28 Jun 2021 17:37:22 +0300 Subject: [PATCH 507/931] Update DatabaseAtomic.cpp --- src/Databases/DatabaseAtomic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 9ab041ee36f..b69b74451c7 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -109,7 +109,7 @@ StoragePtr DatabaseAtomic::detachTable(const String & name) void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_name, bool no_delay) { - auto storage = tryGetTable(table_name, local_context).get(); + auto storage = tryGetTable(table_name, local_context); /// Remove the inner table (if any) to avoid deadlock /// (due to attempt to execute DROP from the worker thread) if (storage) From c2c78929cbe1c61baa3693a56babcd7414cd85cb Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Mon, 28 Jun 2021 21:50:43 +0800 Subject: [PATCH 508/931] Implemented MySQL column comments support --- src/Databases/MySQL/DatabaseMySQL.cpp | 4 +-- src/Databases/MySQL/DatabaseMySQL.h | 3 +- .../MySQL/FetchTablesColumnsList.cpp | 33 +++++++++++-------- src/Databases/MySQL/FetchTablesColumnsList.h | 4 +-- .../MySQL/InterpretersMySQLDDLQuery.cpp | 7 +--- src/Storages/ColumnsDescription.cpp | 4 +-- src/Storages/ColumnsDescription.h | 2 +- src/TableFunctions/TableFunctionMySQL.cpp | 2 +- .../test_mysql_database_engine/test.py | 19 +++++++++++ 9 files changed, 49 insertions(+), 29 deletions(-) diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index b3b1c95ef7c..5f356348829 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -232,7 +232,7 @@ void DatabaseMySQL::fetchLatestTablesStructureIntoCache( wait_update_tables_name.emplace_back(table_modification_time.first); } - std::map tables_and_columns = fetchTablesColumnsList(wait_update_tables_name, local_context); + std::map tables_and_columns = fetchTablesColumnsList(wait_update_tables_name, local_context); for (const auto & table_and_columns : tables_and_columns) { @@ -296,7 +296,7 @@ std::map DatabaseMySQL::fetchTablesWithModificationTime(ContextP return tables_with_modification_time; } -std::map +std::map DatabaseMySQL::fetchTablesColumnsList(const std::vector & tables_name, ContextPtr local_context) const { const auto & settings = local_context->getSettingsRef(); diff --git a/src/Databases/MySQL/DatabaseMySQL.h b/src/Databases/MySQL/DatabaseMySQL.h index 04246ddcbf5..0b364f0d8d3 100644 --- a/src/Databases/MySQL/DatabaseMySQL.h +++ b/src/Databases/MySQL/DatabaseMySQL.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -111,7 +112,7 @@ private: std::map fetchTablesWithModificationTime(ContextPtr local_context) const; - std::map fetchTablesColumnsList(const std::vector & tables_name, ContextPtr context) const; + std::map fetchTablesColumnsList(const std::vector & tables_name, ContextPtr context) const; void destroyLocalCacheExtraTables(const std::map & tables_with_modification_time) const; diff --git a/src/Databases/MySQL/FetchTablesColumnsList.cpp b/src/Databases/MySQL/FetchTablesColumnsList.cpp index cfd01d4ddc4..e792385d12f 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.cpp +++ b/src/Databases/MySQL/FetchTablesColumnsList.cpp @@ -40,14 +40,14 @@ String toQueryStringWithQuote(const std::vector & quote_list) namespace DB { -std::map fetchTablesColumnsList( +std::map fetchTablesColumnsList( mysqlxx::PoolWithFailover & pool, const String & database_name, const std::vector & tables_name, const Settings & settings, MultiEnum type_support) { - std::map tables_and_columns; + std::map tables_and_columns; if (tables_name.empty()) return tables_and_columns; @@ -62,6 +62,7 @@ std::map fetchTablesColumnsList( { std::make_shared(), "length" }, { std::make_shared(), "precision" }, { std::make_shared(), "scale" }, + { std::make_shared(), "column_comment" }, }; WriteBufferFromOwnString query; @@ -72,8 +73,9 @@ std::map fetchTablesColumnsList( " IS_NULLABLE = 'YES' AS is_nullable," " COLUMN_TYPE LIKE '%unsigned' AS is_unsigned," " CHARACTER_MAXIMUM_LENGTH AS length," - " NUMERIC_PRECISION as numeric_precision," - " IF(ISNULL(NUMERIC_SCALE), DATETIME_PRECISION, NUMERIC_SCALE) AS scale" // we know DATETIME_PRECISION as a scale in CH + " NUMERIC_PRECISION AS numeric_precision," + " IF(ISNULL(NUMERIC_SCALE), DATETIME_PRECISION, NUMERIC_SCALE) AS scale," // we know DATETIME_PRECISION as a scale in CH + " COLUMN_COMMENT AS column_comment" " FROM INFORMATION_SCHEMA.COLUMNS" " WHERE "; @@ -94,21 +96,24 @@ std::map fetchTablesColumnsList( const auto & char_max_length_col = *block.getByPosition(5).column; const auto & precision_col = *block.getByPosition(6).column; const auto & scale_col = *block.getByPosition(7).column; + const auto & column_comment_col = *block.getByPosition(8).column; size_t rows = block.rows(); for (size_t i = 0; i < rows; ++i) { String table_name = table_name_col[i].safeGet(); - tables_and_columns[table_name].emplace_back( - column_name_col[i].safeGet(), - convertMySQLDataType( - type_support, - column_type_col[i].safeGet(), - settings.external_table_functions_use_nulls && is_nullable_col[i].safeGet(), - is_unsigned_col[i].safeGet(), - char_max_length_col[i].safeGet(), - precision_col[i].safeGet(), - scale_col[i].safeGet())); + tables_and_columns[table_name].add( + ColumnDescription( + column_name_col[i].safeGet(), + convertMySQLDataType( + type_support, + column_type_col[i].safeGet(), + settings.external_table_functions_use_nulls && is_nullable_col[i].safeGet(), + is_unsigned_col[i].safeGet(), + char_max_length_col[i].safeGet(), + precision_col[i].safeGet(), + scale_col[i].safeGet()), + column_comment_col[i].safeGet())); } } return tables_and_columns; diff --git a/src/Databases/MySQL/FetchTablesColumnsList.h b/src/Databases/MySQL/FetchTablesColumnsList.h index 55f18e0115f..4b49fea864e 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.h +++ b/src/Databases/MySQL/FetchTablesColumnsList.h @@ -7,8 +7,8 @@ #include #include -#include #include +#include #include #include @@ -17,7 +17,7 @@ namespace DB { -std::map fetchTablesColumnsList( +std::map fetchTablesColumnsList( mysqlxx::PoolWithFailover & pool, const String & database_name, const std::vector & tables_name, diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index fbd537781de..7ebc4f1feb9 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -123,7 +123,6 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col throw Exception("Columns of different size provided.", ErrorCodes::LOGICAL_ERROR); ColumnsDescription columns_description; - ColumnDescription column_description; for ( auto [column_name_and_type, declare_column_ast] = std::tuple{columns_name_and_type.begin(), columns_definition->children.begin()}; @@ -139,11 +138,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col if (options->changes.count("comment")) comment = options->changes.at("comment")->as()->value.safeGet(); - column_description.name = column_name_and_type->name; - column_description.type = column_name_and_type->type; - if (!comment.empty()) - column_description.comment = std::move(comment); - columns_description.add(column_description); + columns_description.add(ColumnDescription(column_name_and_type->name, column_name_and_type->type, comment)); } return columns_description; diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index 4a904c96432..cd7afae8a78 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -43,8 +43,8 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ColumnDescription::ColumnDescription(String name_, DataTypePtr type_) - : name(std::move(name_)), type(std::move(type_)) +ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, String comment_) + : name(std::move(name_)), type(std::move(type_)), comment(std::move(comment_)) { } diff --git a/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h index 7fff22abf71..338b519cee6 100644 --- a/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -39,7 +39,7 @@ struct ColumnDescription ColumnDescription() = default; ColumnDescription(ColumnDescription &&) = default; ColumnDescription(const ColumnDescription &) = default; - ColumnDescription(String name_, DataTypePtr type_); + ColumnDescription(String name_, DataTypePtr type_, String comment_ = ""); bool operator==(const ColumnDescription & other) const; bool operator!=(const ColumnDescription & other) const { return !(*this == other); } diff --git a/src/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp index eb310ef3696..f8e0c41634b 100644 --- a/src/TableFunctions/TableFunctionMySQL.cpp +++ b/src/TableFunctions/TableFunctionMySQL.cpp @@ -87,7 +87,7 @@ ColumnsDescription TableFunctionMySQL::getActualTableStructure(ContextPtr contex throw Exception("MySQL table " + (remote_database_name.empty() ? "" : (backQuote(remote_database_name) + ".")) + backQuote(remote_table_name) + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE); - return ColumnsDescription{columns->second}; + return columns->second; } StoragePtr TableFunctionMySQL::executeImpl( diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 22f790e39c3..e1891aebf05 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -167,6 +167,25 @@ def test_bad_arguments_for_mysql_database_engine(started_cluster): assert 'Database engine MySQL requested literal argument.' in str(exception.value) mysql_node.query("DROP DATABASE test_bad_arguments") +def test_column_comments_for_mysql_database_engine(started_cluster): + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + mysql_node.query("DROP DATABASE IF EXISTS test_database") + mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") + + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')") + assert 'test_database' in clickhouse_node.query('SHOW DATABASES') + + mysql_node.query( + "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`), `test` int COMMENT 'test comment') ENGINE=InnoDB;") + assert 'test comment' in clickhouse_node.query('DESCRIBE TABLE `test_database`.`test_table`') + + mysql_node.query("ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11) COMMENT 'add_column comment'") + assert 'add_column comment' in clickhouse_node.query( + "SELECT comment FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + + mysql_node.query("DROP DATABASE test_database") + def test_data_types_support_level_for_mysql_database_engine(started_cluster): with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: From 764380f7015e944f6b51898e0b472b3eb40037cd Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 28 Jun 2021 19:56:15 +0300 Subject: [PATCH 509/931] Updated tests --- src/DataTypes/getLeastSupertype.cpp | 21 ++++++++-------- src/Functions/DateTimeTransforms.h | 7 +++++- .../01925_date_date_time_comparison.reference | 2 ++ .../01925_date_date_time_comparison.sql | 2 ++ .../01926_date_date_time_supertype.reference | 12 ++++++++++ .../01926_date_date_time_supertype.sql | 24 +++++++++++++++++++ 6 files changed, 57 insertions(+), 11 deletions(-) create mode 100644 tests/queries/0_stateless/01925_date_date_time_comparison.reference create mode 100644 tests/queries/0_stateless/01925_date_date_time_comparison.sql create mode 100644 tests/queries/0_stateless/01926_date_date_time_supertype.reference create mode 100644 tests/queries/0_stateless/01926_date_date_time_supertype.sql diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index 4614d65ed8a..33b40abdd47 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -289,33 +289,34 @@ DataTypePtr getLeastSupertype(const DataTypes & types) if (have_datetime64 == 0) { - for (const auto & t : types) + for (const auto & type : types) { - if (const auto * data_type = typeid_cast(t.get())) - return std::make_shared(data_type->getTimeZone().getTimeZone()); + if (isDateTime(type)) + return type; } return std::make_shared(); } UInt8 max_scale = 0; - const DataTypeDateTime64 * max_scale_date_time = nullptr; + size_t max_scale_date_time_index = 0; - for (const auto & t : types) + for (size_t i = 0; i < types.size(); ++i) { - if (const auto * dt64 = typeid_cast(t.get())) + const auto & type = types[i]; + + if (const auto * date_time64_type = typeid_cast(type.get())) { - const auto scale = dt64->getScale(); + const auto scale = date_time64_type->getScale(); if (scale > max_scale) { - max_scale_date_time = dt64; + max_scale_date_time_index = i; max_scale = scale; } } } - assert(max_scale_date_time); - return std::make_shared(max_scale, max_scale_date_time->getTimeZone().getTimeZone()); + return types[max_scale_date_time_index]; } } diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 70035cdda30..0f36fe52465 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -878,7 +878,12 @@ struct DateTimeTransformImpl } else { - Op::vector(sources->getData(), col_to->getData(), DateLUT::instance(), transform); + size_t time_zone_argument_position = 1; + if constexpr (std::is_same_v) + time_zone_argument_position = 2; + + const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_argument_position, 0); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform); } return mutable_result_col; diff --git a/tests/queries/0_stateless/01925_date_date_time_comparison.reference b/tests/queries/0_stateless/01925_date_date_time_comparison.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01925_date_date_time_comparison.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/01925_date_date_time_comparison.sql b/tests/queries/0_stateless/01925_date_date_time_comparison.sql new file mode 100644 index 00000000000..99c67816a42 --- /dev/null +++ b/tests/queries/0_stateless/01925_date_date_time_comparison.sql @@ -0,0 +1,2 @@ +SELECT toDate('2000-01-01') < toDateTime('2000-01-01 00:00:01', 'Europe/Moscow'); +SELECT toDate('2000-01-01') < toDateTime64('2000-01-01 00:00:01', 5, 'Europe/Moscow'); diff --git a/tests/queries/0_stateless/01926_date_date_time_supertype.reference b/tests/queries/0_stateless/01926_date_date_time_supertype.reference new file mode 100644 index 00000000000..ec9933dfbd2 --- /dev/null +++ b/tests/queries/0_stateless/01926_date_date_time_supertype.reference @@ -0,0 +1,12 @@ +Array +Array(DateTime(\'Europe/Moscow\')) +Array(DateTime64(5, \'Europe/Moscow\')) +Array(DateTime64(6, \'Europe/Moscow\')) +If +2000-01-01 00:00:00 DateTime(\'Europe/Moscow\') +2000-01-01 00:00:00 DateTime(\'Europe/Moscow\') +2000-01-01 00:00:00.00000 DateTime64(5, \'Europe/Moscow\') +2000-01-01 00:00:00.00000 DateTime64(5, \'Europe/Moscow\') +Cast +2000-01-01 00:00:00 DateTime(\'UTC\') +2000-01-01 00:00:00.00000 DateTime64(5, \'UTC\') diff --git a/tests/queries/0_stateless/01926_date_date_time_supertype.sql b/tests/queries/0_stateless/01926_date_date_time_supertype.sql new file mode 100644 index 00000000000..559cd465ebb --- /dev/null +++ b/tests/queries/0_stateless/01926_date_date_time_supertype.sql @@ -0,0 +1,24 @@ +SELECT 'Array'; + +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Europe/Moscow')]); +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Europe/Moscow'), toDateTime64('2000-01-01', 5, 'Europe/Moscow')]); +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Europe/Moscow'), toDateTime64('2000-01-01', 5, 'Europe/Moscow'), toDateTime64('2000-01-01', 6, 'Europe/Moscow')]); + +DROP TABLE IF EXISTS predicate_table; +CREATE TABLE predicate_table (value UInt8) ENGINE=TinyLog; + +INSERT INTO predicate_table VALUES (0), (1); + +SELECT 'If'; + +WITH toDate('2000-01-01') as a, toDateTime('2000-01-01', 'Europe/Moscow') as b +SELECT if(value, b, a) as result, toTypeName(result) +FROM predicate_table; + +WITH toDateTime('2000-01-01') as a, toDateTime64('2000-01-01', 5, 'Europe/Moscow') as b +SELECT if(value, b, a) as result, toTypeName(result) +FROM predicate_table; + +SELECT 'Cast'; +SELECT CAST(toDate('2000-01-01') AS DateTime('UTC')) AS x, toTypeName(x); +SELECT CAST(toDate('2000-01-01') AS DateTime64(5, 'UTC')) AS x, toTypeName(x); From bfc122df64a96abbcefc7203dfe9068e98050513 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 28 Jun 2021 19:02:22 +0200 Subject: [PATCH 510/931] Fix some typos in Storage classes --- src/Storages/StorageDistributed.cpp | 6 +++--- src/Storages/StorageFile.cpp | 4 ++-- src/Storages/StorageInMemoryMetadata.cpp | 6 +++--- src/Storages/StorageJoin.cpp | 6 +++--- src/Storages/StorageMemory.cpp | 2 +- src/Storages/StorageMongoDB.cpp | 4 ++-- src/Storages/StorageMongoDB.h | 4 ++-- src/Storages/StorageMySQL.cpp | 16 ++++++++-------- src/Storages/StoragePostgreSQL.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 6 +++--- src/Storages/StorageReplicatedMergeTree.h | 6 +++--- src/Storages/StorageS3.cpp | 4 ++-- src/Storages/StorageS3.h | 6 +++--- src/Storages/StorageS3Cluster.h | 9 +-------- 14 files changed, 37 insertions(+), 44 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 1013d14a453..f4d6ec5c6f7 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -230,7 +230,7 @@ ExpressionActionsPtr buildShardingKeyExpression(const ASTPtr & sharding_key, Con return ExpressionAnalyzer(query, syntax_result, context).getActions(project); } -bool isExpressionActionsDeterministics(const ExpressionActionsPtr & actions) +bool isExpressionActionsDeterministic(const ExpressionActionsPtr & actions) { for (const auto & action : actions->getActions()) { @@ -428,7 +428,7 @@ StorageDistributed::StorageDistributed( { sharding_key_expr = buildShardingKeyExpression(sharding_key_, getContext(), storage_metadata.getColumns().getAllPhysical(), false); sharding_key_column_name = sharding_key_->getColumnName(); - sharding_key_is_deterministic = isExpressionActionsDeterministics(sharding_key_expr); + sharding_key_is_deterministic = isExpressionActionsDeterministic(sharding_key_expr); } if (!relative_data_path.empty()) @@ -524,7 +524,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage( else { /// NOTE: distributed_group_by_no_merge=1 does not respect distributed_push_down_limit - /// (since in this case queries processed separatelly and the initiator is just a proxy in this case). + /// (since in this case queries processed separately and the initiator is just a proxy in this case). return QueryProcessingStage::Complete; } } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index b67cd0a0af7..efd59255c9e 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -538,7 +538,7 @@ public: std::unique_ptr naked_buffer = nullptr; if (storage.use_table_fd) { - /** NOTE: Using real file binded to FD may be misleading: + /** NOTE: Using real file bounded to FD may be misleading: * SELECT *; INSERT insert_data; SELECT *; last SELECT returns initil_fd_data + insert_data * INSERT data; SELECT *; last SELECT returns only insert_data */ @@ -642,7 +642,7 @@ Strings StorageFile::getDataPaths() const void StorageFile::rename(const String & new_path_to_table_data, const StorageID & new_table_id) { if (!is_db_table) - throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " binded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED); + throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " bounded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED); if (paths.size() != 1) throw Exception("Can't rename table " + getStorageID().getNameForLogs() + " in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED); diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 39081648248..28574d6fdf1 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -228,12 +228,12 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet auto add_dependent_columns = [&updated_columns](const auto & expression, auto & to_set) { - auto requiered_columns = expression->getRequiredColumns(); - for (const auto & dependency : requiered_columns) + auto required_columns = expression->getRequiredColumns(); + for (const auto & dependency : required_columns) { if (updated_columns.count(dependency)) { - to_set.insert(requiered_columns.begin(), requiered_columns.end()); + to_set.insert(required_columns.begin(), required_columns.end()); return true; } } diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index fd3ca00520a..c3061ce9c51 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -97,8 +97,8 @@ void StorageJoin::checkMutationIsPossible(const MutationCommands & commands, con void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context) { - /// Firstly accuire lock for mutation, that locks changes of data. - /// We cannot accuire rwlock here, because read lock is needed + /// Firstly acquire lock for mutation, that locks changes of data. + /// We cannot acquire rwlock here, because read lock is needed /// for execution of mutation interpreter. std::lock_guard mutate_lock(mutate_mutex); @@ -128,7 +128,7 @@ void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context) in->readSuffix(); } - /// Now accuire exclusive lock and modify storage. + /// Now acquire exclusive lock and modify storage. std::unique_lock lock(rwlock); join = std::move(new_data); diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index 7f7f68335bb..9e1ae24fc75 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -263,7 +263,7 @@ void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context auto storage = getStorageID(); auto storage_ptr = DatabaseCatalog::instance().getTable(storage, context); - /// When max_threads > 1, the order of returning blocks is uncentain, + /// When max_threads > 1, the order of returning blocks is uncertain, /// which will lead to inconsistency after updateBlockData. auto new_context = Context::createCopy(context); new_context->setSetting("max_streams_to_max_threads_ratio", 1); diff --git a/src/Storages/StorageMongoDB.cpp b/src/Storages/StorageMongoDB.cpp index f5f604c6f41..e27d16ecc68 100644 --- a/src/Storages/StorageMongoDB.cpp +++ b/src/Storages/StorageMongoDB.cpp @@ -58,7 +58,7 @@ void StorageMongoDB::connectIfNotConnected() if (!connection) connection = std::make_shared(host, port); - if (!authentified) + if (!authenticated) { # if POCO_VERSION >= 0x01070800 Poco::MongoDB::Database poco_db(database_name); @@ -67,7 +67,7 @@ void StorageMongoDB::connectIfNotConnected() # else authenticate(*connection, database_name, username, password); # endif - authentified = true; + authenticated = true; } } diff --git a/src/Storages/StorageMongoDB.h b/src/Storages/StorageMongoDB.h index 6caf6978f1c..2553acdd40c 100644 --- a/src/Storages/StorageMongoDB.h +++ b/src/Storages/StorageMongoDB.h @@ -52,8 +52,8 @@ private: const std::string password; std::shared_ptr connection; - bool authentified = false; - std::mutex connection_mutex; /// Protects the variables `connection` and `authentified`. + bool authenticated = false; + std::mutex connection_mutex; /// Protects the variables `connection` and `authenticated`. }; } diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 1dadcfe986b..5d37806c9ba 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -175,28 +175,28 @@ public: if (block.rows() <= max_rows) return Blocks{std::move(block)}; - const size_t splited_block_size = ceil(block.rows() * 1.0 / max_rows); - Blocks splitted_blocks(splited_block_size); + const size_t split_block_size = ceil(block.rows() * 1.0 / max_rows); + Blocks split_blocks(split_block_size); - for (size_t idx = 0; idx < splited_block_size; ++idx) - splitted_blocks[idx] = block.cloneEmpty(); + for (size_t idx = 0; idx < split_block_size; ++idx) + split_blocks[idx] = block.cloneEmpty(); const size_t columns = block.columns(); const size_t rows = block.rows(); size_t offsets = 0; UInt64 limits = max_batch_rows; - for (size_t idx = 0; idx < splited_block_size; ++idx) + for (size_t idx = 0; idx < split_block_size; ++idx) { /// For last batch, limits should be the remain size - if (idx == splited_block_size - 1) limits = rows - offsets; + if (idx == split_block_size - 1) limits = rows - offsets; for (size_t col_idx = 0; col_idx < columns; ++col_idx) { - splitted_blocks[idx].getByPosition(col_idx).column = block.getByPosition(col_idx).column->cut(offsets, limits); + split_blocks[idx].getByPosition(col_idx).column = block.getByPosition(col_idx).column->cut(offsets, limits); } offsets += max_batch_rows; } - return splitted_blocks; + return split_blocks; } static std::string dumpNamesWithBackQuote(const Block & block) diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 9f59ddb12c9..72f42ec625b 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -162,7 +162,7 @@ public: } /// Cannot just use serializeAsText for array data type even though it converts perfectly - /// any dimension number array into text format, because it incloses in '[]' and for postgres it must be '{}'. + /// any dimension number array into text format, because it encloses in '[]' and for postgres it must be '{}'. /// Check if array[...] syntax from PostgreSQL will be applicable. void parseArray(const Field & array_field, const DataTypePtr & data_type, WriteBuffer & ostr) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2599040fb80..465aa7e42bc 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -414,7 +414,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( /// We have to check granularity on other replicas. If it's fixed we /// must create our new replica with fixed granularity and store this /// information in /replica/metadata. - other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); + other_replicas_fixed_granularity = checkFixedGranularityInZookeeper(); checkTableStructure(zookeeper_path, metadata_snapshot); @@ -449,7 +449,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( if (!replica_metadata_exists || replica_metadata.empty()) { /// We have to check shared node granularity before we create ours. - other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); + other_replicas_fixed_granularity = checkFixedGranularityInZookeeper(); ReplicatedMergeTreeTableMetadata current_metadata(*this, metadata_snapshot); @@ -486,7 +486,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } -bool StorageReplicatedMergeTree::checkFixedGranualrityInZookeeper() +bool StorageReplicatedMergeTree::checkFixedGranularityInZookeeper() { auto zookeeper = getZooKeeper(); String metadata_str = zookeeper->get(zookeeper_path + "/metadata"); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index b8a78e51240..205dc9687c7 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -434,7 +434,7 @@ private: String getChecksumsForZooKeeper(const MergeTreeDataPartChecksums & checksums) const; - /// Accepts a PreComitted part, atomically checks its checksums with ones on other replicas and commit the part + /// Accepts a PreCommitted part, atomically checks its checksums with ones on other replicas and commit the part DataPartsVector checkPartChecksumsAndCommit(Transaction & transaction, const DataPartPtr & part); bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const override; @@ -633,7 +633,7 @@ private: * Because it effectively waits for other thread that usually has to also acquire a lock to proceed and this yields deadlock. * TODO: There are wrong usages of this method that are not fixed yet. * - * One method for convenient use on current table, another for waiting on foregin shards. + * One method for convenient use on current table, another for waiting on foreign shards. */ Strings waitForAllTableReplicasToProcessLogEntry(const String & table_zookeeper_path, const ReplicatedMergeTreeLogEntryData & entry, bool wait_for_non_active = true); Strings waitForAllReplicasToProcessLogEntry(const ReplicatedMergeTreeLogEntryData & entry, bool wait_for_non_active = true); @@ -690,7 +690,7 @@ private: /// Check granularity of already existing replicated table in zookeeper if it exists /// return true if it's fixed - bool checkFixedGranualrityInZookeeper(); + bool checkFixedGranularityInZookeeper(); /// Wait for timeout seconds mutation is finished on replicas void waitMutationToFinishOnReplicas( diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 290a585128e..0661081bc5a 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -62,7 +62,7 @@ public: const String key_prefix = globbed_uri.key.substr(0, globbed_uri.key.find_first_of("*?{")); - /// We don't have to list bucket, because there is no asterics. + /// We don't have to list bucket, because there is no asterisks. if (key_prefix.size() == globbed_uri.key.size()) { buffer.emplace_back(globbed_uri.key); @@ -434,7 +434,7 @@ BlockOutputStreamPtr StorageS3::write(const ASTPtr & /*query*/, const StorageMet max_single_part_upload_size); } -void StorageS3::updateClientAndAuthSettings(ContextPtr ctx, StorageS3::ClientAuthentificaiton & upd) +void StorageS3::updateClientAndAuthSettings(ContextPtr ctx, StorageS3::ClientAuthentication & upd) { auto settings = ctx->getStorageS3Settings().getSettings(upd.uri.uri.toString()); if (upd.client && (!upd.access_key_id.empty() || settings == upd.auth_settings)) diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index 73becc2aa57..e33c5d83198 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -137,7 +137,7 @@ private: friend class StorageS3Cluster; friend class TableFunctionS3Cluster; - struct ClientAuthentificaiton + struct ClientAuthentication { const S3::URI uri; const String access_key_id; @@ -147,7 +147,7 @@ private: S3AuthSettings auth_settings; }; - ClientAuthentificaiton client_auth; + ClientAuthentication client_auth; String format_name; UInt64 max_single_read_retries; @@ -157,7 +157,7 @@ private: String name; const bool distributed_processing; - static void updateClientAndAuthSettings(ContextPtr, ClientAuthentificaiton &); + static void updateClientAndAuthSettings(ContextPtr, ClientAuthentication &); }; } diff --git a/src/Storages/StorageS3Cluster.h b/src/Storages/StorageS3Cluster.h index 8da1ed120ae..821765a3780 100644 --- a/src/Storages/StorageS3Cluster.h +++ b/src/Storages/StorageS3Cluster.h @@ -21,13 +21,6 @@ namespace DB class Context; -struct ClientAuthentificationBuilder -{ - String access_key_id; - String secret_access_key; - UInt64 max_connections; -}; - class StorageS3Cluster : public shared_ptr_helper, public IStorage { friend struct shared_ptr_helper; @@ -59,7 +52,7 @@ protected: private: /// Connections from initiator to other nodes std::vector> connections; - StorageS3::ClientAuthentificaiton client_auth; + StorageS3::ClientAuthentication client_auth; String filename; String cluster_name; From c5e5ebcdf3e6b3db0fac6161b3fc4b5cfb6fa439 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 28 Jun 2021 21:25:52 +0300 Subject: [PATCH 511/931] First draft --- .../functions/logical-functions.md | 179 +++++++++++++++++- docs/en/sql-reference/operators/index.md | 10 +- 2 files changed, 182 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 6cce0e4fff5..6a41ac5bebf 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -5,15 +5,186 @@ toc_title: Logical # Logical Functions {#logical-functions} -Logical functions accept any numeric types, but return a UInt8 number equal to 0 or 1. +Logical functions accept any numeric types, but return a [UInt8](../../sql-reference/data-types/int-uint.md) number equal to 0, 1 or in some cases `NULL`. Zero as an argument is considered “false,” while any non-zero value is considered “true”. -## and, AND operator {#and-and-operator} +## and {#and-and-operator} -## or, OR operator {#or-or-operator} +Calculates the result of logical conjunction between two or more values. Corresponds to [Logical AND Operator](../../sql-reference/operators/index.md#logical-and-operator). -## not, NOT operator {#not-not-operator} +**Syntax** + +``` sql +and(val1, val2...) +``` + +**Arguments** + +- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). + +**Returned value** + +- `0`, if there is at least one zero value argument. +- `NULL`, if there are no zero values arguments and there is at least one `NULL` argument. +- `1`, otherwise. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). + +**Example** + +Query: + +``` sql +SELECT and(0, 1, -2); +``` + +Result: + +``` text +┌─and(0, 1, -2)─┐ +│ 0 │ +└───────────────┘ +``` + +With `NULL`: + +``` sql +SELECT and(NULL, 1, 10, -2); +``` + +Result: + +``` text +┌─and(NULL, 1, 10, -2)─┐ +│ ᴺᵁᴸᴸ │ +└──────────────────────┘ +``` + +## or {#or-or-operator} + +Calculates the result of logical disjunction between two or more values. Corresponds to [Logical OR Operator](../../sql-reference/operators/index.md#logical-or-operator). + +**Syntax** + +``` sql +and(val1, val2...) +``` + +**Arguments** + +- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). + +**Returned value** + +- `1`, if there is at least one non-zero value. +- `0`, if there are only zero values. +- `NULL`, if there is at least one `NULL` values. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). + +**Example** + +Query: + +``` sql +SELECT or(1, 0, 0, 2, NULL); +``` + +Result: + +``` text +┌─or(1, 0, 0, 2, NULL)─┐ +│ 1 │ +└──────────────────────┘ +``` + +With `NULL`: + +``` sql +SELECT or(0, NULL); +``` + +Result: + +``` text +┌─or(0, NULL)─┐ +│ ᴺᵁᴸᴸ │ +└─────────────┘ +``` + +## not {#not-not-operator} + +Calculates the result of logical negation of a value. Corresponds to [Logical Negation Operator](../../sql-reference/operators/index.md#logical-negation-operator). + +**Syntax** + +``` sql +not(val); +``` + +**Arguments** + +- `val` — value. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). + +**Returned value** + +- `1`, if the `val` is a `0`. +- `0`, if the `val` is a non-zero value. +- `NULL`, if the `val` is a `NULL` value. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). + +**Example** + +Query: + +``` sql +SELECT NOT(1); +``` + +Result: + +``` test +┌─not(1)─┐ +│ 0 │ +└────────┘ +``` ## xor {#xor} +Calculates the result of logical exclusive disjunction between two or more values. For more than two values the function calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). + +**Syntax** + +``` sql +xor(val1, val2...) +``` + +**Arguments** + +- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). + +**Returned value** + +- `1`, for two values: if one of the values is zero and other is not. +- `0`, for two values: if both values are zero or non-zero at the same. +- `NULL`, if there is at least one `NULL` values. + +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). + +**Example** + +Query: + +``` sql +SELECT xor(0, 1, 1); +``` + +Result + +``` text +┌─xor(0, 1, 1)─┐ +│ 0 │ +└──────────────┘ +``` diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 268e56a5034..dd519cb1454 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -213,15 +213,19 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 6 ## Logical Negation Operator {#logical-negation-operator} -`NOT a` – The `not(a)` function. +Syntax `NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#not-not-operator). ## Logical AND Operator {#logical-and-operator} -`a AND b` – The`and(a, b)` function. +Syntax `a AND b` — calculates logical conjunction of `a` and `b` with the function [and](../../sql-reference/functions/logical-functions.md#and-and-operator). ## Logical OR Operator {#logical-or-operator} -`a OR b` – The `or(a, b)` function. +Syntax `a OR b` — calculates logical disjunction of `a` and `b` with the function [or](../../sql-reference/functions/logical-functions.md#or-or-operator). + +## Logical XOR operator (#logical-xor-operator) + +Syntax `a XOR b` — calculates logical exclusive disjunction of `a` and `b` with the function [xor](../../sql-reference/functions/logical-functions.md#xor). ## Conditional Operator {#conditional-operator} From 985ca2cd5a1860233a98f4fb3fa79a0828c8d252 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 28 Jun 2021 21:44:59 +0300 Subject: [PATCH 512/931] some fixes --- .../functions/logical-functions.md | 24 +++++++++---------- docs/en/sql-reference/operators/index.md | 10 ++++---- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 6a41ac5bebf..2896f3bdd33 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -11,7 +11,7 @@ Zero as an argument is considered “false,” while any non-zero value is consi ## and {#and-and-operator} -Calculates the result of logical conjunction between two or more values. Corresponds to [Logical AND Operator](../../sql-reference/operators/index.md#logical-and-operator). +Calculates the result of the logical conjunction between two or more values. Corresponds to [Logical AND Operator](../../sql-reference/operators/index.md#logical-and-operator). **Syntax** @@ -21,7 +21,7 @@ and(val1, val2...) **Arguments** -- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). +- `val` — list of at least two values. Any [Int-UInt](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). **Returned value** @@ -29,7 +29,7 @@ and(val1, val2...) - `NULL`, if there are no zero values arguments and there is at least one `NULL` argument. - `1`, otherwise. -Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md)). **Example** @@ -63,7 +63,7 @@ Result: ## or {#or-or-operator} -Calculates the result of logical disjunction between two or more values. Corresponds to [Logical OR Operator](../../sql-reference/operators/index.md#logical-or-operator). +Calculates the result of the logical disjunction between two or more values. Corresponds to [Logical OR Operator](../../sql-reference/operators/index.md#logical-or-operator). **Syntax** @@ -73,7 +73,7 @@ and(val1, val2...) **Arguments** -- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). +- `val` — list of at least two values. Any [Int-UInt](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). **Returned value** @@ -81,7 +81,7 @@ and(val1, val2...) - `0`, if there are only zero values. - `NULL`, if there is at least one `NULL` values. -Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md)). **Example** @@ -115,7 +115,7 @@ Result: ## not {#not-not-operator} -Calculates the result of logical negation of a value. Corresponds to [Logical Negation Operator](../../sql-reference/operators/index.md#logical-negation-operator). +Calculates the result of the logical negation of a value. Corresponds to [Logical Negation Operator](../../sql-reference/operators/index.md#logical-negation-operator). **Syntax** @@ -125,7 +125,7 @@ not(val); **Arguments** -- `val` — value. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). +- `val` — value. Any [Int-UInt](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). **Returned value** @@ -133,7 +133,7 @@ not(val); - `0`, if the `val` is a non-zero value. - `NULL`, if the `val` is a `NULL` value. -Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md)). **Example** @@ -153,7 +153,7 @@ Result: ## xor {#xor} -Calculates the result of logical exclusive disjunction between two or more values. For more than two values the function calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). +Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). **Syntax** @@ -163,7 +163,7 @@ xor(val1, val2...) **Arguments** -- `val` — list of at least two values. Any [Int-UInt]](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). +- `val` — list of at least two values. Any [Int-UInt](../../sql-reference/data-types/int-uint.md) type, [float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md). **Returned value** @@ -171,7 +171,7 @@ xor(val1, val2...) - `0`, for two values: if both values are zero or non-zero at the same. - `NULL`, if there is at least one `NULL` values. -Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([[UInt8](../../sql-reference/data-types/int-uint.md)]). +Type: [UInt8](../../sql-reference/data-types/int-uint.md) or [Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md)). **Example** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index dd519cb1454..0c58c8d0353 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -213,19 +213,19 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 6 ## Logical Negation Operator {#logical-negation-operator} -Syntax `NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#not-not-operator). +Syntax `SELECT NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#not-not-operator). ## Logical AND Operator {#logical-and-operator} -Syntax `a AND b` — calculates logical conjunction of `a` and `b` with the function [and](../../sql-reference/functions/logical-functions.md#and-and-operator). +Syntax `SELECT a AND b` — calculates logical conjunction of `a` and `b` with the function [and](../../sql-reference/functions/logical-functions.md#and-and-operator). ## Logical OR Operator {#logical-or-operator} -Syntax `a OR b` — calculates logical disjunction of `a` and `b` with the function [or](../../sql-reference/functions/logical-functions.md#or-or-operator). +Syntax `SELECT a OR b` — calculates logical disjunction of `a` and `b` with the function [or](../../sql-reference/functions/logical-functions.md#or-or-operator). -## Logical XOR operator (#logical-xor-operator) +## Logical XOR operator {#logical-xor-operator} -Syntax `a XOR b` — calculates logical exclusive disjunction of `a` and `b` with the function [xor](../../sql-reference/functions/logical-functions.md#xor). +Syntax `SELECT a XOR b` — calculates logical exclusive disjunction of `a` and `b` with the function [xor](../../sql-reference/functions/logical-functions.md#xor). ## Conditional Operator {#conditional-operator} From e590d9c8bb79c013da7db9422100cc25114aa33d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 28 Jun 2021 10:44:49 +0300 Subject: [PATCH 513/931] Add comments for VERSION_REVISION vs DBMS_TCP_PROTOCOL_VERSION --- cmake/autogenerated_versions.txt | 5 ++++- src/Common/config_version.h.in | 2 ++ src/Core/Defines.h | 8 +++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 6214a229da6..49cf30d2556 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -1,4 +1,7 @@ -# This strings autochanged from release_lib.sh: +# This variables autochanged by release_lib.sh: + +# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, +# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. SET(VERSION_REVISION 54453) SET(VERSION_MAJOR 21) SET(VERSION_MINOR 8) diff --git a/src/Common/config_version.h.in b/src/Common/config_version.h.in index 09f1e5500ad..3b0700b8a8a 100644 --- a/src/Common/config_version.h.in +++ b/src/Common/config_version.h.in @@ -2,6 +2,8 @@ // .h autogenerated by cmake! +// NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, +// only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. #cmakedefine VERSION_REVISION @VERSION_REVISION@ #cmakedefine VERSION_NAME "@VERSION_NAME@" #define DBMS_NAME VERSION_NAME diff --git a/src/Core/Defines.h b/src/Core/Defines.h index 994478cd8a1..5751f4beeb7 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -85,7 +85,13 @@ #define DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH 54448 -/// Version of ClickHouse TCP protocol. Increment it manually when you change the protocol. +/// Version of ClickHouse TCP protocol. +/// +/// Should be incremented manually on protocol changes. +/// +/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, +/// later is just a number for server version (one number instead of commit SHA) +/// for simplicity (sometimes it may be more convenient in some use cases). #define DBMS_TCP_PROTOCOL_VERSION 54449 #define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449 From 12798a2c471227e51fb7bcb4f0c6cd7391250305 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 28 Jun 2021 22:32:37 +0300 Subject: [PATCH 514/931] more fixes --- docs/en/sql-reference/functions/logical-functions.md | 2 +- docs/en/sql-reference/operators/index.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 2896f3bdd33..5b967b877c7 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -153,7 +153,7 @@ Result: ## xor {#xor} -Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). +Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function works as if it calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). **Syntax** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 0c58c8d0353..818edef6a90 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -211,10 +211,6 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 6 - [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type - [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions -## Logical Negation Operator {#logical-negation-operator} - -Syntax `SELECT NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#not-not-operator). - ## Logical AND Operator {#logical-and-operator} Syntax `SELECT a AND b` — calculates logical conjunction of `a` and `b` with the function [and](../../sql-reference/functions/logical-functions.md#and-and-operator). @@ -223,6 +219,10 @@ Syntax `SELECT a AND b` — calculates logical conjunction of `a` and `b` with t Syntax `SELECT a OR b` — calculates logical disjunction of `a` and `b` with the function [or](../../sql-reference/functions/logical-functions.md#or-or-operator). +## Logical Negation Operator {#logical-negation-operator} + +Syntax `SELECT NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#not-not-operator). + ## Logical XOR operator {#logical-xor-operator} Syntax `SELECT a XOR b` — calculates logical exclusive disjunction of `a` and `b` with the function [xor](../../sql-reference/functions/logical-functions.md#xor). From c7def2a76d8a330acf076c322a6eb079f844d910 Mon Sep 17 00:00:00 2001 From: Alexey Date: Mon, 28 Jun 2021 19:56:20 +0000 Subject: [PATCH 515/931] Example fixed --- .../aggregate-functions/reference/quantilebfloat16.md | 9 +++++---- .../aggregate-functions/reference/quantiles.md | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 87b7e96dd7e..763cb07bcb1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -39,21 +39,22 @@ Input table has an integer and a float columns: │ 1 │ 1.001 │ │ 2 │ 1.002 │ │ 3 │ 1.003 │ +│ 4 │ 1.004 │ └───┴───────┘ ``` Query: ``` sql -SELECT quantilesBFloat16(0.75)(a), quantilesBFloat16(0.75)(b) FROM example_table; +SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; ``` Result: ``` text -┌─quantilesBFloat16(0.75)(a)─┬─quantilesBFloat16(0.75)(b)─┐ -│ [3] │ [1] │ -└────────────────────────────┴────────────────────────────┘ +┌─quantileBFloat16(0.75)(a)─┬─quantileBFloat16(0.75)(b)─┐ +│ 3 │ 1 │ +└───────────────────────────┴───────────────────────────┘ ``` Note that all floating point values in the example are truncated to 1.0 when converting to bfloat16. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index 766766d2f94..8ed446863a9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -6,7 +6,7 @@ toc_priority: 201 Syntax: `quantiles(level1, level2, …)(x)` -All the quantile functions also have corresponding quantiles functions. They calculate quantiles of all listed levels in one pass, and return them as an array. +All the quantile functions also have corresponding quantiles functions. They calculate quantiles of all listed levels in one pass and return them as an array. - `quantiles`; - `quantilesDeterministic`; @@ -15,4 +15,4 @@ All the quantile functions also have corresponding quantiles functions. They cal - `quantilesExact`; - `quantilesExactWeighted`; - `quantilesTDigest`; -- `quantilesBFloat16`; +- `quantilesBFloat16`. From f2c871f7c8368d977ac8ff5261060ea8a757f059 Mon Sep 17 00:00:00 2001 From: Alexey Date: Mon, 28 Jun 2021 20:07:20 +0000 Subject: [PATCH 516/931] Removed implementation details --- .../aggregate-functions/reference/quantilebfloat16.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 763cb07bcb1..30dc145ff42 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -5,7 +5,7 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. -The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates the histogram of these values. Resulting value is converted to 64-bit float by appending zero bits. +The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates bfloat16 quantile value and converts the result to a 64-bit float by appending zero bits. The function is a fast quantile estimator with a relative error no more than 0.390625%. **Syntax** From 538558ccb55e67ad4dfa17d7926d4a3a18cb004e Mon Sep 17 00:00:00 2001 From: Alexey Date: Mon, 28 Jun 2021 20:26:27 +0000 Subject: [PATCH 517/931] Fixed links in median.md --- .../aggregate-functions/reference/median.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index b309b20fd5f..f2a6080eda3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -8,15 +8,15 @@ The `median*` functions are the aliases for the corresponding `quantile*` functi Functions: -- `median` — Alias for [quantile](#quantile). -- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). -- `medianBFloat16` — Alias for [quantileBFloat16](#quantilebfloat16). +- `median` — Alias for [quantile](quantile.md). +- `medianDeterministic` — Alias for [quantileDeterministic](quantiledeterministic.md). +- `medianExact` — Alias for [quantileExact](quantileexact.md). +- `medianExactWeighted` — Alias for [quantileExactWeighted](quantileexactweighted.md). +- `medianTiming` — Alias for [quantileTiming](quantiletiming.md). +- `medianTimingWeighted` — Alias for [quantileTimingWeighted](quantiletimingweighted.md). +- `medianTDigest` — Alias for [quantileTDigest](quantiletdigest.md). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](quantiletdigestweighted.md). +- `medianBFloat16` — Alias for [quantileBFloat16](quantilebfloat16.md). **Example** From 8ac7e147b76d5044f431378acde0edfaae10d343 Mon Sep 17 00:00:00 2001 From: Alexey Date: Mon, 28 Jun 2021 20:26:57 +0000 Subject: [PATCH 518/931] Text changed to match other quantile descriptions --- .../aggregate-functions/reference/quantilebfloat16.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 30dc145ff42..3e3824c3dfb 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -4,7 +4,7 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} -Calculates a [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates bfloat16 quantile value and converts the result to a 64-bit float by appending zero bits. The function is a fast quantile estimator with a relative error no more than 0.390625%. From 15a14d6da2cb0922eff26c6a4710a4a10ac243c0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 28 Jun 2021 23:08:18 +0300 Subject: [PATCH 519/931] Fix "No available columns" for Merge() storage --- src/Interpreters/TreeRewriter.cpp | 2 +- .../0_stateless/01931_storage_merge_no_columns.reference | 0 tests/queries/0_stateless/01931_storage_merge_no_columns.sql | 4 ++++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01931_storage_merge_no_columns.reference create mode 100644 tests/queries/0_stateless/01931_storage_merge_no_columns.sql diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 76093a14d45..fabd2cc5ef6 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -742,7 +742,7 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select if (!columns.empty()) required.insert(std::min_element(columns.begin(), columns.end())->name); - else + else if (!source_columns.empty()) /// If we have no information about columns sizes, choose a column of minimum size of its data type. required.insert(ExpressionActions::getSmallestColumn(source_columns)); } diff --git a/tests/queries/0_stateless/01931_storage_merge_no_columns.reference b/tests/queries/0_stateless/01931_storage_merge_no_columns.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01931_storage_merge_no_columns.sql b/tests/queries/0_stateless/01931_storage_merge_no_columns.sql new file mode 100644 index 00000000000..787316e299c --- /dev/null +++ b/tests/queries/0_stateless/01931_storage_merge_no_columns.sql @@ -0,0 +1,4 @@ +drop table if exists data; +create table data (key Int) engine=MergeTree() order by key; +select 1 from merge(currentDatabase(), '^data$') prewhere _table in (NULL); +drop table data; From 60530b4dae2049d22d80b9756652e45d1c3e1ab5 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 29 Jun 2021 00:54:22 +0300 Subject: [PATCH 520/931] Fixed tests --- .../queries/0_stateless/00735_long_conditional.reference | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/00735_long_conditional.reference b/tests/queries/0_stateless/00735_long_conditional.reference index 6308a48218b..082c2d49de9 100644 --- a/tests/queries/0_stateless/00735_long_conditional.reference +++ b/tests/queries/0_stateless/00735_long_conditional.reference @@ -92,8 +92,8 @@ value vs value 0 1 1 UInt64 Decimal(18, 0) Decimal(38, 0) 0 1 1 UInt64 Decimal(38, 0) Decimal(38, 0) 1970-01-01 1970-01-02 1970-01-02 Date Date Date -2000-01-01 2000-01-01 00:00:01 2000-01-01 00:00:01 Date DateTime(\'Europe/Moscow\') DateTime -2000-01-01 00:00:00 2000-01-02 2000-01-02 00:00:00 DateTime(\'Europe/Moscow\') Date DateTime +2000-01-01 2000-01-01 00:00:01 2000-01-01 00:00:01 Date DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') +2000-01-01 00:00:00 2000-01-02 2000-01-02 00:00:00 DateTime(\'Europe/Moscow\') Date DateTime(\'Europe/Moscow\') 1970-01-01 03:00:00 1970-01-01 03:00:01 1970-01-01 03:00:01 DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') column vs value 0 1 1 Int8 Int8 Int8 @@ -189,6 +189,6 @@ column vs value 0 1 1 UInt64 Decimal(18, 0) Decimal(38, 0) 0 1 1 UInt64 Decimal(38, 0) Decimal(38, 0) 1970-01-01 1970-01-02 1970-01-02 Date Date Date -2000-01-01 2000-01-01 00:00:01 2000-01-01 00:00:01 Date DateTime(\'Europe/Moscow\') DateTime -2000-01-01 00:00:00 2000-01-02 2000-01-02 00:00:00 DateTime(\'Europe/Moscow\') Date DateTime +2000-01-01 2000-01-01 00:00:01 2000-01-01 00:00:01 Date DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') +2000-01-01 00:00:00 2000-01-02 2000-01-02 00:00:00 DateTime(\'Europe/Moscow\') Date DateTime(\'Europe/Moscow\') 1970-01-01 03:00:00 1970-01-01 03:00:01 1970-01-01 03:00:01 DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') DateTime(\'Europe/Moscow\') From 350445c5f11a0390e0d87c995218030508b0f6ab Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 29 Jun 2021 01:29:14 +0300 Subject: [PATCH 521/931] Update run.sh --- docker/test/stress/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 846cc4ad53d..03f625039f8 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -155,7 +155,7 @@ zgrep -Fa "########################################" /test_output/* > /dev/null # Put logs into /test_output/ for log_file in /var/log/clickhouse-server/clickhouse-server.log* do - pigz < "${log_file}" > /test_output/"$(basename \"${log_file}\")".gz + pigz < "${log_file}" > /test_output/"$(basename ${log_file})".gz done tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: From 5d89907ae9b938e544da8161d16da9cab38e76ad Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 29 Jun 2021 00:34:45 +0200 Subject: [PATCH 522/931] Fix style errors --- src/Functions/array/mapOp.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp index 5c2637270d5..ac4ac565546 100644 --- a/src/Functions/array/mapOp.cpp +++ b/src/Functions/array/mapOp.cpp @@ -68,7 +68,7 @@ private: if (which_ch_val.isFloat() != which_val.isFloat()) throw Exception( - "All value types in " + getName() + " should be ether or float or integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + "All value types in " + getName() + " should be either or float or integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); if (!(check_val_type->equals(*promoted_val_type))) { @@ -386,9 +386,9 @@ private: } } else - throw Exception{ + throw Exception( "Illegal column type " + arguments[0].type->getName() + " in arguments of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } // we can check const columns before any processing @@ -439,9 +439,9 @@ private: case TypeIndex::String: return execute1(row_count, res_type, res_value_type, args); default: - throw Exception{ + throw Exception( "Illegal column type " + key_type->getName() + " for keys in arguments of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } } }; From a7ee0b91b19c3f4e774959df31cc250560175fc5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 28 Jun 2021 00:43:44 +0300 Subject: [PATCH 523/931] Fix strange whitespaces @abyss7 --- src/Parsers/ASTIdentifier.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h index 5fc446ae477..323280e07bc 100644 --- a/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -70,23 +70,23 @@ private: class ASTTableIdentifier : public ASTIdentifier { - public: - explicit ASTTableIdentifier(const String & table_name, std::vector && name_params = {}); - explicit ASTTableIdentifier(const StorageID & table_id, std::vector && name_params = {}); - ASTTableIdentifier(const String & database_name, const String & table_name, std::vector && name_params = {}); +public: + explicit ASTTableIdentifier(const String & table_name, std::vector && name_params = {}); + explicit ASTTableIdentifier(const StorageID & table_id, std::vector && name_params = {}); + ASTTableIdentifier(const String & database_name, const String & table_name, std::vector && name_params = {}); - String getID(char delim) const override { return "TableIdentifier" + (delim + name()); } - ASTPtr clone() const override; + String getID(char delim) const override { return "TableIdentifier" + (delim + name()); } + ASTPtr clone() const override; - UUID uuid = UUIDHelpers::Nil; // FIXME(ilezhankin): make private + UUID uuid = UUIDHelpers::Nil; // FIXME(ilezhankin): make private - StorageID getTableId() const; - String getDatabaseName() const; + StorageID getTableId() const; + String getDatabaseName() const; - // FIXME: used only when it's needed to rewrite distributed table name to real remote table name. - void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this + // FIXME: used only when it's needed to rewrite distributed table name to real remote table name. + void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this - void updateTreeHashImpl(SipHash & hash_state) const override; + void updateTreeHashImpl(SipHash & hash_state) const override; }; From 7cdde9ed360ef7fed6078209420a8aeae184b9f2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 01:48:54 +0300 Subject: [PATCH 524/931] Add links to builds on the front page --- docs/_includes/install/freebsd.sh | 3 +++ docs/_includes/install/mac-arm.sh | 3 +++ docs/_includes/install/mac-x86.sh | 3 +++ docs/en/getting-started/install.md | 7 ++++--- website/templates/index/quickstart.html | 22 +++++++++++++++++++--- 5 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 docs/_includes/install/freebsd.sh create mode 100644 docs/_includes/install/mac-arm.sh create mode 100644 docs/_includes/install/mac-x86.sh diff --git a/docs/_includes/install/freebsd.sh b/docs/_includes/install/freebsd.sh new file mode 100644 index 00000000000..d664ea19a18 --- /dev/null +++ b/docs/_includes/install/freebsd.sh @@ -0,0 +1,3 @@ +wget 'https://builds.clickhouse.tech/master/freebsd/clickhouse' +chmod a+x ./clickhouse +sudo ./clickhouse install diff --git a/docs/_includes/install/mac-arm.sh b/docs/_includes/install/mac-arm.sh new file mode 100644 index 00000000000..9fc5c0cef22 --- /dev/null +++ b/docs/_includes/install/mac-arm.sh @@ -0,0 +1,3 @@ +wget 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' +chmod a+x ./clickhouse +./clickhouse diff --git a/docs/_includes/install/mac-x86.sh b/docs/_includes/install/mac-x86.sh new file mode 100644 index 00000000000..1423769b6d5 --- /dev/null +++ b/docs/_includes/install/mac-x86.sh @@ -0,0 +1,3 @@ +wget 'https://builds.clickhouse.tech/master/macos/clickhouse' +chmod a+x ./clickhouse +./clickhouse diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 4256de49e4a..af4061ad484 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -107,9 +107,10 @@ sudo ./clickhouse install For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay). -- [macOS](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` -- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` -- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` +- [MacOS x86_64](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` +- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse` +- [FreeBSD x86_64](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` +- [Linux AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. diff --git a/website/templates/index/quickstart.html b/website/templates/index/quickstart.html index 0d967e7b96c..b74e52905ff 100644 --- a/website/templates/index/quickstart.html +++ b/website/templates/index/quickstart.html @@ -2,9 +2,7 @@

Quick start

-

System requirements for pre-built packages: Linux, x86_64 with SSE 4.2.

- -

For other operating systems the easiest way to get started is using From ffdd5c67ae5f188f71e97b74051c50c3b64c3eff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 01:55:04 +0300 Subject: [PATCH 525/931] Fix error in docs --- docs/en/getting-started/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index af4061ad484..8331870d775 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -108,7 +108,7 @@ sudo ./clickhouse install For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse builds are provided as a cross-compiled binary from the latest commit of the `master` branch (with a few hours delay). - [MacOS x86_64](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos/clickhouse' && chmod a+x ./clickhouse` -- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.tech/master/macos/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse` +- [MacOS Aarch64 (Apple Silicon)](https://builds.clickhouse.tech/master/macos-aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/macos-aarch64/clickhouse' && chmod a+x ./clickhouse` - [FreeBSD x86_64](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` - [Linux AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` From f4f85a387d1fdca1efcb83e375d0484f30456b2a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 02:16:19 +0300 Subject: [PATCH 526/931] Fix DateLUT on Darwin --- base/common/DateLUTImpl.h | 124 +++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 61 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 9e60181e802..c327a031918 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -119,11 +119,13 @@ private: } public: + using Time = Int64; + /// The order of fields matters for alignment and sizeof. struct Values { - /// time_t at beginning of the day. - Int64 date; + /// Time at beginning of the day. + Time date; /// Properties of the day. UInt16 year; @@ -182,20 +184,20 @@ private: LUTIndex years_months_lut[DATE_LUT_YEARS * 12]; /// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time. - time_t offset_at_start_of_epoch; + Time offset_at_start_of_epoch; /// UTC offset at the beginning of the first supported year. - time_t offset_at_start_of_lut; + Time offset_at_start_of_lut; bool offset_is_whole_number_of_hours_during_epoch; /// Time zone name. std::string time_zone; - inline LUTIndex findIndex(time_t t) const + inline LUTIndex findIndex(Time t) const { /// First guess. - Int64 guess = (t / 86400) + daynum_offset_epoch; + Time guess = (t / 86400) + daynum_offset_epoch; - /// For negative time_t the integer division was rounded up, so the guess is offset by one. + /// For negative Time the integer division was rounded up, so the guess is offset by one. if (unlikely(t < 0)) --guess; @@ -227,7 +229,7 @@ private: return LUTIndex{static_cast(d + daynum_offset_epoch) & date_lut_mask}; } - inline LUTIndex toLUTIndex(time_t t) const + inline LUTIndex toLUTIndex(Time t) const { return findIndex(t); } @@ -280,7 +282,7 @@ public: /// Round down to start of monday. template - inline time_t toFirstDayOfWeek(DateOrTime v) const + inline Time toFirstDayOfWeek(DateOrTime v) const { const LUTIndex i = toLUTIndex(v); return lut[i - (lut[i].day_of_week - 1)].date; @@ -295,7 +297,7 @@ public: /// Round down to start of month. template - inline time_t toFirstDayOfMonth(DateOrTime v) const + inline Time toFirstDayOfMonth(DateOrTime v) const { const LUTIndex i = toLUTIndex(v); return lut[i - (lut[i].day_of_month - 1)].date; @@ -332,13 +334,13 @@ public: } template - inline time_t toFirstDayOfQuarter(DateOrTime v) const + inline Time toFirstDayOfQuarter(DateOrTime v) const { return toDate(toFirstDayOfQuarterIndex(v)); } /// Round down to start of year. - inline time_t toFirstDayOfYear(time_t t) const + inline Time toFirstDayOfYear(Time t) const { return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date; } @@ -355,14 +357,14 @@ public: return toDayNum(toFirstDayNumOfYearIndex(v)); } - inline time_t toFirstDayOfNextMonth(time_t t) const + inline Time toFirstDayOfNextMonth(Time t) const { LUTIndex index = findIndex(t); index += 32 - lut[index].day_of_month; return lut[index - (lut[index].day_of_month - 1)].date; } - inline time_t toFirstDayOfPrevMonth(time_t t) const + inline Time toFirstDayOfPrevMonth(Time t) const { LUTIndex index = findIndex(t); index -= lut[index].day_of_month; @@ -389,16 +391,16 @@ public: /** Round to start of day, then shift for specified amount of days. */ - inline time_t toDateAndShift(time_t t, Int32 days) const + inline Time toDateAndShift(Time t, Int32 days) const { return lut[findIndex(t) + days].date; } - inline time_t toTime(time_t t) const + inline Time toTime(Time t) const { const LUTIndex index = findIndex(t); - time_t res = t - lut[index].date; + Time res = t - lut[index].date; if (res >= lut[index].time_at_offset_change()) res += lut[index].amount_of_offset_change(); @@ -406,11 +408,11 @@ public: return res - offset_at_start_of_epoch; /// Starting at 1970-01-01 00:00:00 local time. } - inline unsigned toHour(time_t t) const + inline unsigned toHour(Time t) const { const LUTIndex index = findIndex(t); - time_t time = t - lut[index].date; + Time time = t - lut[index].date; if (time >= lut[index].time_at_offset_change()) time += lut[index].amount_of_offset_change(); @@ -426,7 +428,7 @@ public: * then subtract the former from the latter to get the offset result. * The boundaries when meets DST(daylight saving time) change should be handled very carefully. */ - inline time_t timezoneOffset(time_t t) const + inline Time timezoneOffset(Time t) const { const LUTIndex index = findIndex(t); @@ -434,7 +436,7 @@ public: /// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin. /// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date, /// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now. - time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400; + Time res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400; /// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder /// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward. @@ -448,7 +450,7 @@ public: } - inline unsigned toSecond(time_t t) const + inline unsigned toSecond(Time t) const { auto res = t % 60; if (likely(res >= 0)) @@ -456,7 +458,7 @@ public: return res + 60; } - inline unsigned toMinute(time_t t) const + inline unsigned toMinute(Time t) const { if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) return (t / 60) % 60; @@ -474,27 +476,27 @@ public: } /// NOTE: Assuming timezone offset is a multiple of 15 minutes. - inline time_t toStartOfMinute(time_t t) const { return roundDown(t, 60); } - inline time_t toStartOfFiveMinute(time_t t) const { return roundDown(t, 300); } - inline time_t toStartOfFifteenMinutes(time_t t) const { return roundDown(t, 900); } + inline Time toStartOfMinute(Time t) const { return roundDown(t, 60); } + inline Time toStartOfFiveMinute(Time t) const { return roundDown(t, 300); } + inline Time toStartOfFifteenMinutes(Time t) const { return roundDown(t, 900); } - inline time_t toStartOfTenMinutes(time_t t) const + inline Time toStartOfTenMinutes(Time t) const { if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) return t / 600 * 600; /// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. - Int64 date = find(t).date; + Time date = find(t).date; return date + (t - date) / 600 * 600; } /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception. - inline time_t toStartOfHour(time_t t) const + inline Time toStartOfHour(Time t) const { if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) return t / 3600 * 3600; - Int64 date = find(t).date; + Time date = find(t).date; return date + (t - date) / 3600 * 3600; } @@ -506,11 +508,11 @@ public: * because the same calendar day starts/ends at different timestamps in different time zones) */ - inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; } - inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; } + inline Time fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; } + inline Time fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; } template - inline time_t toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; } + inline Time toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; } template inline unsigned toMonth(DateOrTime v) const { return lut[toLUTIndex(v)].month; } @@ -578,7 +580,7 @@ public: return toDayNum(toFirstDayNumOfISOYearIndex(v)); } - inline time_t toFirstDayOfISOYear(time_t t) const + inline Time toFirstDayOfISOYear(Time t) const { return lut[toFirstDayNumOfISOYearIndex(t)].date; } @@ -773,7 +775,7 @@ public: } /// We count all hour-length intervals, unrelated to offset changes. - inline time_t toRelativeHourNum(time_t t) const + inline Time toRelativeHourNum(Time t) const { if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) return t / 3600; @@ -784,18 +786,18 @@ public: } template - inline time_t toRelativeHourNum(DateOrTime v) const + inline Time toRelativeHourNum(DateOrTime v) const { return toRelativeHourNum(lut[toLUTIndex(v)].date); } - inline time_t toRelativeMinuteNum(time_t t) const + inline Time toRelativeMinuteNum(Time t) const { return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60); } template - inline time_t toRelativeMinuteNum(DateOrTime v) const + inline Time toRelativeMinuteNum(DateOrTime v) const { return toRelativeMinuteNum(lut[toLUTIndex(v)].date); } @@ -842,14 +844,14 @@ public: return ExtendedDayNum(4 + (d - 4) / days * days); } - inline time_t toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const + inline Time toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const { if (days == 1) return toDate(d); return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date; } - inline time_t toStartOfHourInterval(time_t t, UInt64 hours) const + inline Time toStartOfHourInterval(Time t, UInt64 hours) const { if (hours == 1) return toStartOfHour(t); @@ -867,7 +869,7 @@ public: const LUTIndex index = findIndex(t); const Values & values = lut[index]; - time_t time = t - values.date; + Time time = t - values.date; if (time >= values.time_at_offset_change()) { /// Align to new hour numbers before rounding. @@ -892,7 +894,7 @@ public: return values.date + time; } - inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const + inline Time toStartOfMinuteInterval(Time t, UInt64 minutes) const { if (minutes == 1) return toStartOfMinute(t); @@ -909,7 +911,7 @@ public: return roundDown(t, seconds); } - inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const + inline Time toStartOfSecondInterval(Time t, UInt64 seconds) const { if (seconds == 1) return t; @@ -934,14 +936,14 @@ public: return toDayNum(makeLUTIndex(year, month, day_of_month)); } - inline time_t makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const + inline Time makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const { return lut[makeLUTIndex(year, month, day_of_month)].date; } /** Does not accept daylight saving time as argument: in case of ambiguity, it choose greater timestamp. */ - inline time_t makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const + inline Time makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const { size_t index = makeLUTIndex(year, month, day_of_month); UInt32 time_offset = hour * 3600 + minute * 60 + second; @@ -969,7 +971,7 @@ public: return values.year * 10000 + values.month * 100 + values.day_of_month; } - inline time_t YYYYMMDDToDate(UInt32 num) const + inline Time YYYYMMDDToDate(UInt32 num) const { return makeDate(num / 10000, num / 100 % 100, num % 100); } @@ -1000,13 +1002,13 @@ public: TimeComponents time; }; - inline DateComponents toDateComponents(time_t t) const + inline DateComponents toDateComponents(Time t) const { const Values & values = getValues(t); return { values.year, values.month, values.day_of_month }; } - inline DateTimeComponents toDateTimeComponents(time_t t) const + inline DateTimeComponents toDateTimeComponents(Time t) const { const LUTIndex index = findIndex(t); const Values & values = lut[index]; @@ -1017,7 +1019,7 @@ public: res.date.month = values.month; res.date.day = values.day_of_month; - time_t time = t - values.date; + Time time = t - values.date; if (time >= values.time_at_offset_change()) time += values.amount_of_offset_change(); @@ -1042,7 +1044,7 @@ public: } - inline UInt64 toNumYYYYMMDDhhmmss(time_t t) const + inline UInt64 toNumYYYYMMDDhhmmss(Time t) const { DateTimeComponents components = toDateTimeComponents(t); @@ -1055,7 +1057,7 @@ public: + UInt64(components.date.year) * 10000000000; } - inline time_t YYYYMMDDhhmmssToTime(UInt64 num) const + inline Time YYYYMMDDhhmmssToTime(UInt64 num) const { return makeDateTime( num / 10000000000, @@ -1069,12 +1071,12 @@ public: /// Adding calendar intervals. /// Implementation specific behaviour when delta is too big. - inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const + inline NO_SANITIZE_UNDEFINED Time addDays(Time t, Int64 delta) const { const LUTIndex index = findIndex(t); const Values & values = lut[index]; - time_t time = t - values.date; + Time time = t - values.date; if (time >= values.time_at_offset_change()) time += values.amount_of_offset_change(); @@ -1086,7 +1088,7 @@ public: return lut[new_index].date + time; } - inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const + inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const { return addDays(t, delta * 7); } @@ -1131,14 +1133,14 @@ public: /// If resulting month has less deys than source month, then saturation can happen. /// Example: 31 Aug + 1 month = 30 Sep. - inline time_t NO_SANITIZE_UNDEFINED addMonths(time_t t, Int64 delta) const + inline Time NO_SANITIZE_UNDEFINED addMonths(Time t, Int64 delta) const { const auto result_day = addMonthsIndex(t, delta); const LUTIndex index = findIndex(t); const Values & values = lut[index]; - time_t time = t - values.date; + Time time = t - values.date; if (time >= values.time_at_offset_change()) time += values.amount_of_offset_change(); @@ -1153,7 +1155,7 @@ public: return toDayNum(addMonthsIndex(d, delta)); } - inline time_t NO_SANITIZE_UNDEFINED addQuarters(time_t t, Int64 delta) const + inline Time NO_SANITIZE_UNDEFINED addQuarters(Time t, Int64 delta) const { return addMonths(t, delta * 3); } @@ -1180,14 +1182,14 @@ public: } /// Saturation can occur if 29 Feb is mapped to non-leap year. - inline time_t addYears(time_t t, Int64 delta) const + inline Time addYears(Time t, Int64 delta) const { auto result_day = addYearsIndex(t, delta); const LUTIndex index = findIndex(t); const Values & values = lut[index]; - time_t time = t - values.date; + Time time = t - values.date; if (time >= values.time_at_offset_change()) time += values.amount_of_offset_change(); @@ -1203,7 +1205,7 @@ public: } - inline std::string timeToString(time_t t) const + inline std::string timeToString(Time t) const { DateTimeComponents components = toDateTimeComponents(t); @@ -1228,7 +1230,7 @@ public: return s; } - inline std::string dateToString(time_t t) const + inline std::string dateToString(Time t) const { const Values & values = getValues(t); From d611297a7f3b8d185430b4fe2f18f7df424b81a1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 02:19:09 +0300 Subject: [PATCH 527/931] Add comment --- base/common/DateLUTImpl.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index c327a031918..2ccad4be348 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -119,6 +119,9 @@ private: } public: + /// We use Int64 instead of time_t because time_t is mapped to the different types (long or long long) + /// on Linux and Darwin (on both of them, long and long long are 64 bit and behaves identically, + /// but they are different types in C++ and this affects function overload resolution). using Time = Int64; /// The order of fields matters for alignment and sizeof. From 9ebbdb19d5effb6bf546a1fc58cce7e89a7dd85b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 03:17:09 +0300 Subject: [PATCH 528/931] Fix obsolete scripts --- docs/tools/build.py | 4 ---- docs/tools/test.py | 30 ------------------------------ website/README.md | 6 +++--- 3 files changed, 3 insertions(+), 37 deletions(-) diff --git a/docs/tools/build.py b/docs/tools/build.py index 39e91f59cc4..dae61eec87e 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -154,9 +154,6 @@ def build(args): if not args.skip_website: website.build_website(args) - if not args.skip_test_templates: - test.test_templates(args.website_dir) - if not args.skip_docs: generate_cmake_flags_files() @@ -197,7 +194,6 @@ if __name__ == '__main__': arg_parser.add_argument('--skip-blog', action='store_true') arg_parser.add_argument('--skip-git-log', action='store_true') arg_parser.add_argument('--skip-docs', action='store_true') - arg_parser.add_argument('--skip-test-templates', action='store_true') arg_parser.add_argument('--test-only', action='store_true') arg_parser.add_argument('--minify', action='store_true') arg_parser.add_argument('--htmlproofer', action='store_true') diff --git a/docs/tools/test.py b/docs/tools/test.py index ada4df29644..526294dbe21 100755 --- a/docs/tools/test.py +++ b/docs/tools/test.py @@ -7,36 +7,6 @@ import bs4 import subprocess -def test_template(template_path): - if template_path.endswith('amp.html'): - # Inline CSS/JS is ok for AMP pages - return - - logging.debug(f'Running tests for {template_path} template') - with open(template_path, 'r') as f: - soup = bs4.BeautifulSoup( - f, - features='html.parser' - ) - for tag in soup.find_all(): - style_attr = tag.attrs.get('style') - assert not style_attr, f'Inline CSS is prohibited, found {style_attr} in {template_path}' - - if tag.name == 'script': - if tag.attrs.get('type') == 'application/ld+json': - continue - for content in tag.contents: - assert not content, f'Inline JavaScript is prohibited, found "{content}" in {template_path}' - - -def test_templates(base_dir): - logging.info('Running tests for templates') - for root, _, filenames in os.walk(base_dir): - for filename in filenames: - if filename.endswith('.html'): - test_template(os.path.join(root, filename)) - - def test_single_page(input_path, lang): with open(input_path) as f: soup = bs4.BeautifulSoup( diff --git a/website/README.md b/website/README.md index a09a00379d1..9f808c6f658 100644 --- a/website/README.md +++ b/website/README.md @@ -12,7 +12,7 @@ sudo npm install -g purify-css amphtml-validator sudo apt install wkhtmltopdf virtualenv build -./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-blog --skip-git-log --skip-docs --skip-test-templates --livereload 8080 +./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-blog --skip-git-log --skip-docs --livereload 8080 # Open the web browser and go to http://localhost:8080/ ``` @@ -20,11 +20,11 @@ virtualenv build # How to quickly test the blog ``` -./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-git-log --skip-docs --skip-test-templates --livereload 8080 +./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-git-log --skip-docs --livereload 8080 ``` # How to quickly test the ugly annoying broken links in docs ``` -./build.py --skip-multi-page --skip-amp --skip-pdf --skip-blog --skip-git-log --skip-test-templates --lang en --livereload 8080 +./build.py --skip-multi-page --skip-amp --skip-pdf --skip-blog --skip-git-log --lang en --livereload 8080 ``` From aaf39189f1e5ca67ca102cbe432ff7a91669ccce Mon Sep 17 00:00:00 2001 From: George Date: Tue, 29 Jun 2021 03:59:19 +0300 Subject: [PATCH 529/931] Edited dateName --- .../en/sql-reference/functions/date-time-functions.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 5d72bb099fe..36ec8cbe728 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -80,6 +80,7 @@ SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` + Result: ```text @@ -1014,7 +1015,7 @@ Result: ## dateName {#dataname} -Returns part of date with specified date part. +Returns specified part of the date. **Syntax** @@ -1024,13 +1025,13 @@ dateName(date_part, date) **Arguments** -- `date_part` - Date part. Possible values . -- `date` — Date [Date](../../sql-reference/data-types/date.md) or DateTime [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md). - +- `date_part` — Date part. Possible values: `year`, `quarter`, `month`, `week`, `dayofyear`, `day`, `weekday`, `hour`, `minute`, `second`. [String](../../sql-reference/data-types/string.md). +- `date` — Date with the type [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md). **Returned value** -- Specified date part of date. +- Specified part of the date. Type: [String](../../sql-reference/data-types/string.md#string) From 5b8fad1329c2f97736174d2a6b9847beadee804e Mon Sep 17 00:00:00 2001 From: George Date: Tue, 29 Jun 2021 04:01:13 +0300 Subject: [PATCH 530/931] Added translation for dateName --- .../functions/date-time-functions.md | 70 +++++++++++++++---- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 69f9a89f4cb..9a965d6bc80 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -27,25 +27,25 @@ SELECT Возвращает часовой пояс сервера. -**Синтаксис** +**Синтаксис** ``` sql timeZone() ``` -Псевдоним: `timezone`. +Псевдоним: `timezone`. **Возвращаемое значение** -- Часовой пояс. +- Часовой пояс. Тип: [String](../../sql-reference/data-types/string.md). ## toTimeZone {#totimezone} -Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. +Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. -**Синтаксис** +**Синтаксис** ``` sql toTimezone(value, timezone) @@ -53,14 +53,14 @@ toTimezone(value, timezone) Псевдоним: `toTimezone`. -**Аргументы** +**Аргументы** - `value` — время или дата с временем. [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — часовой пояс для возвращаемого значения. [String](../../sql-reference/data-types/string.md). **Возвращаемое значение** -- Дата с временем. +- Дата с временем. Тип: [DateTime](../../sql-reference/data-types/datetime.md). @@ -80,6 +80,7 @@ SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` + Результат: ```text @@ -102,21 +103,21 @@ int32samoa: 1546300800 Возвращает название часового пояса для значений типа [DateTime](../../sql-reference/data-types/datetime.md) и [DateTime64](../../sql-reference/data-types/datetime64.md). -**Синтаксис** +**Синтаксис** ``` sql timeZoneOf(value) ``` -Псевдоним: `timezoneOf`. +Псевдоним: `timezoneOf`. **Аргументы** -- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). **Возвращаемое значение** -- Название часового пояса. +- Название часового пояса. Тип: [String](../../sql-reference/data-types/string.md). @@ -145,15 +146,15 @@ SELECT timezoneOf(now()); timeZoneOffset(value) ``` -Псевдоним: `timezoneOffset`. +Псевдоним: `timezoneOffset`. **Аргументы** -- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). **Возвращаемое значение** -- Смещение в секундах от UTC. +- Смещение в секундах от UTC. Тип: [Int32](../../sql-reference/data-types/int-uint.md). @@ -626,7 +627,7 @@ SELECT now(), date_trunc('hour', now(), 'Europe/Moscow'); Добавляет интервал времени или даты к указанной дате или дате со временем. -**Синтаксис** +**Синтаксис** ``` sql date_add(unit, value, date) @@ -1025,6 +1026,45 @@ SELECT formatDateTime(toDate('2010-01-04'), '%g'); └────────────────────────────────────────────┘ ``` +## dateName {#dataname} + +Возвращает указанную часть даты. + +**Синтаксис** + +``` sql +dateName(date_part, date) +``` + +**Аргументы** + +- `date_part` — часть даты. Возможные значения: `year`, `quarter`, `month`, `week`, `dayofyear`, `day`, `weekday`, `hour`, `minute`, `second`. [String](../../sql-reference/data-types/string.md). +- `date` — дата типа [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — часовой пояс. Необязательный параметр. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Указанная часть даты. + +Тип: [String](../../sql-reference/data-types/string.md#string) + +**Пример** + +Запрос: + +```sql +WITH toDateTime('2021-04-14 11:22:33') AS date_value +SELECT dateName('year', date_value), dateName('month', date_value), dateName('day', date_value); +``` + +Результат: + +```text +┌─dateName('year', date_value)─┬─dateName('month', date_value)─┬─dateName('day', date_value)─┐ +│ 2021 │ April │ 14 │ +└──────────────────────────────┴───────────────────────────────┴───────────────────────────── +``` + ## FROM\_UNIXTIME {#fromunixtime} Функция преобразует Unix timestamp в календарную дату и время. From ed3d5d8d162572440513bc543b9b999539ca0623 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Jun 2021 04:28:42 +0300 Subject: [PATCH 531/931] Edited bare Linux example: configs are not necessary for clickhouse binary to run --- docker/bare/prepare | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/bare/prepare b/docker/bare/prepare index 10d791cac73..912b16634c7 100755 --- a/docker/bare/prepare +++ b/docker/bare/prepare @@ -12,7 +12,6 @@ mkdir root pushd root mkdir lib lib64 etc tmp root cp ${BUILD_DIR}/programs/clickhouse . -cp ${SRC_DIR}/programs/server/{config,users}.xml . cp /lib/x86_64-linux-gnu/{libc.so.6,libdl.so.2,libm.so.6,libpthread.so.0,librt.so.1,libnss_dns.so.2,libresolv.so.2} lib cp /lib64/ld-linux-x86-64.so.2 lib64 cp /etc/resolv.conf ./etc From 6458a51ccfcee073e389c04c4a304dcbb9d80b66 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 29 Jun 2021 04:38:33 +0300 Subject: [PATCH 532/931] Edited bitPositionsToArray(num) --- docs/en/sql-reference/functions/encoding-functions.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index b464e070acc..92eb864455f 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -224,7 +224,7 @@ Accepts an integer. Returns an array of UInt64 numbers containing the list of po ## bitPositionsToArray(num) {#bitpositionstoarraynum} -Accepts an integer, argument will be converted to unsigned integer type. Returns an array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order. +Accepts an integer and converts it to an unsigned integer. Returns an array of `UInt64` numbers containing the list of positions of bits of `arg` that equals `1` in ascending order. **Syntax** @@ -234,11 +234,13 @@ bitPositionsToArray(arg) **Arguments** -- `arg` — Integer value.Types: [Int/UInt](../../sql-reference/data-types/int-uint.md) +- `arg` — Integer value. Type: [Int/UInt](../../sql-reference/data-types/int-uint.md). **Returned value** -An array of UInt64 numbers containing the list of positions of bits that equals 1. Numbers in the array are in ascending order. +- An array containing a list of positions of bits that equals `1` in ascending order. + +Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). **Example** From 8ab6f71b69e74eefb7d1336f28625de2270dc886 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 29 Jun 2021 04:39:32 +0300 Subject: [PATCH 533/931] Added translation of bitPositionsToArray(num) --- .../functions/encoding-functions.md | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index 23e840a7898..8e5069ed4a0 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -223,3 +223,53 @@ SELECT reinterpretAsUInt64(reverse(unhex('FFF'))) AS num; ## bitmaskToArray(num) {#bitmasktoarraynum} Принимает целое число. Возвращает массив чисел типа UInt64, содержащий степени двойки, в сумме дающих исходное число; числа в массиве идут по возрастанию. + +## bitPositionsToArray(num) {#bitpositionstoarraynum} + +Принимает целое число и приводит его к беззнаковому виду. Возвращает массив `UInt64` чисел, который содержит список позиций битов `arg`, равных `1`, в порядке возрастания. + +**Синтаксис** + +```sql +bitPositionsToArray(arg) +``` + +**Аргументы** + +- `arg` — целое значения. Тип: [Int/UInt](../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +- Массив, содержащий список позиций битов, равных `1`, в порядке возрастания. + +Тип: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)). + +**Example** + +Запрос: + +``` sql +SELECT bitPositionsToArray(toInt8(1)) AS bit_positions; +``` + +Результат: + +``` text +┌─bit_positions─┐ +│ [0] │ +└───────────────┘ +``` + +Запрос: + +``` sql +select bitPositionsToArray(toInt8(-1)) as bit_positions; +``` + +Результат: + +``` text +┌─bit_positions─────┐ +│ [0,1,2,3,4,5,6,7] │ +└───────────────────┘ +``` From 2af628f9632e08f518a32e7b27a14e32352e4b4a Mon Sep 17 00:00:00 2001 From: George Date: Tue, 29 Jun 2021 04:55:31 +0300 Subject: [PATCH 534/931] Small update --- docs/en/sql-reference/functions/date-time-functions.md | 2 +- docs/en/sql-reference/functions/encoding-functions.md | 2 +- docs/ru/sql-reference/functions/date-time-functions.md | 2 +- docs/ru/sql-reference/functions/encoding-functions.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 36ec8cbe728..ec744038e3f 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1026,7 +1026,7 @@ dateName(date_part, date) **Arguments** - `date_part` — Date part. Possible values: `year`, `quarter`, `month`, `week`, `dayofyear`, `day`, `weekday`, `hour`, `minute`, `second`. [String](../../sql-reference/data-types/string.md). -- `date` — Date with the type [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). +- `date` — Date. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md). **Returned value** diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 92eb864455f..defad57bdaa 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -234,7 +234,7 @@ bitPositionsToArray(arg) **Arguments** -- `arg` — Integer value. Type: [Int/UInt](../../sql-reference/data-types/int-uint.md). +- `arg` — Integer value. [Int/UInt](../../sql-reference/data-types/int-uint.md). **Returned value** diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 9a965d6bc80..63e169a4216 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -1039,7 +1039,7 @@ dateName(date_part, date) **Аргументы** - `date_part` — часть даты. Возможные значения: `year`, `quarter`, `month`, `week`, `dayofyear`, `day`, `weekday`, `hour`, `minute`, `second`. [String](../../sql-reference/data-types/string.md). -- `date` — дата типа [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). +- `date` — дата. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — часовой пояс. Необязательный параметр. [String](../../sql-reference/data-types/string.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index 8e5069ed4a0..fd028222c8f 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -236,7 +236,7 @@ bitPositionsToArray(arg) **Аргументы** -- `arg` — целое значения. Тип: [Int/UInt](../../sql-reference/data-types/int-uint.md). +- `arg` — целое значения. [Int/UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемое значение** From 296593716dc7905f4f25538a105236b3717d9877 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Tue, 29 Jun 2021 14:37:53 +0800 Subject: [PATCH 535/931] Reverted constructor change --- src/Databases/MySQL/FetchTablesColumnsList.cpp | 10 ++++++---- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 8 ++++++-- src/Storages/ColumnsDescription.cpp | 4 ++-- src/Storages/ColumnsDescription.h | 2 +- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/Databases/MySQL/FetchTablesColumnsList.cpp b/src/Databases/MySQL/FetchTablesColumnsList.cpp index e792385d12f..d668e79198c 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.cpp +++ b/src/Databases/MySQL/FetchTablesColumnsList.cpp @@ -102,8 +102,7 @@ std::map fetchTablesColumnsList( for (size_t i = 0; i < rows; ++i) { String table_name = table_name_col[i].safeGet(); - tables_and_columns[table_name].add( - ColumnDescription( + ColumnDescription column_description( column_name_col[i].safeGet(), convertMySQLDataType( type_support, @@ -112,8 +111,11 @@ std::map fetchTablesColumnsList( is_unsigned_col[i].safeGet(), char_max_length_col[i].safeGet(), precision_col[i].safeGet(), - scale_col[i].safeGet()), - column_comment_col[i].safeGet())); + scale_col[i].safeGet()) + ); + column_description.comment = column_comment_col[i].safeGet(); + + tables_and_columns[table_name].add(column_description); } } return tables_and_columns; diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 7ebc4f1feb9..bf4dfee2780 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -137,8 +137,12 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col if (const auto * options = declare_column->column_options->as()) if (options->changes.count("comment")) comment = options->changes.at("comment")->as()->value.safeGet(); - - columns_description.add(ColumnDescription(column_name_and_type->name, column_name_and_type->type, comment)); + + ColumnDescription column_description(column_name_and_type->name, column_name_and_type->type); + if(!comment.empty()) + column_description.comment = std::move(comment); + + columns_description.add(column_description); } return columns_description; diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index cd7afae8a78..4a904c96432 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -43,8 +43,8 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, String comment_) - : name(std::move(name_)), type(std::move(type_)), comment(std::move(comment_)) +ColumnDescription::ColumnDescription(String name_, DataTypePtr type_) + : name(std::move(name_)), type(std::move(type_)) { } diff --git a/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h index 338b519cee6..7fff22abf71 100644 --- a/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -39,7 +39,7 @@ struct ColumnDescription ColumnDescription() = default; ColumnDescription(ColumnDescription &&) = default; ColumnDescription(const ColumnDescription &) = default; - ColumnDescription(String name_, DataTypePtr type_, String comment_ = ""); + ColumnDescription(String name_, DataTypePtr type_); bool operator==(const ColumnDescription & other) const; bool operator!=(const ColumnDescription & other) const { return !(*this == other); } From 090d1360808d86b1651a1071952156e9398a85e3 Mon Sep 17 00:00:00 2001 From: hcz Date: Tue, 29 Jun 2021 15:18:40 +0800 Subject: [PATCH 536/931] Fix links in median.md --- .../aggregate-functions/reference/median.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index b4f38a9b562..5d681389eb0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -4,14 +4,14 @@ The `median*` functions are the aliases for the corresponding `quantile*` functi Functions: -- `median` — Alias for [quantile](#quantile). -- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). +- `median` — Alias for [quantile](../../../sql-reference/aggregate-functions/reference/quantile#quantile). +- `medianDeterministic` — Alias for [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact#quantileexact). +- `medianExactWeighted` — Alias for [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted#quantileexactweighted). +- `medianTiming` — Alias for [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming#quantiletiming). +- `medianTimingWeighted` — Alias for [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted#quantiletdigestweighted). **Example** From a9510f25f9e9c6bdc0e143346a3f829d68164c3b Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Tue, 29 Jun 2021 15:22:08 +0800 Subject: [PATCH 537/931] Style fix --- src/Databases/MySQL/FetchTablesColumnsList.cpp | 2 +- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 6 +++--- tests/integration/test_mysql_database_engine/test.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Databases/MySQL/FetchTablesColumnsList.cpp b/src/Databases/MySQL/FetchTablesColumnsList.cpp index d668e79198c..bbd187090df 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.cpp +++ b/src/Databases/MySQL/FetchTablesColumnsList.cpp @@ -114,7 +114,7 @@ std::map fetchTablesColumnsList( scale_col[i].safeGet()) ); column_description.comment = column_comment_col[i].safeGet(); - + tables_and_columns[table_name].add(column_description); } } diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index bf4dfee2780..503e5c0e707 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -137,11 +137,11 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col if (const auto * options = declare_column->column_options->as()) if (options->changes.count("comment")) comment = options->changes.at("comment")->as()->value.safeGet(); - + ColumnDescription column_description(column_name_and_type->name, column_name_and_type->type); - if(!comment.empty()) + if (!comment.empty()) column_description.comment = std::move(comment); - + columns_description.add(column_description); } diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index e1891aebf05..39198f6d56d 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -183,9 +183,9 @@ def test_column_comments_for_mysql_database_engine(started_cluster): mysql_node.query("ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11) COMMENT 'add_column comment'") assert 'add_column comment' in clickhouse_node.query( "SELECT comment FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") - + mysql_node.query("DROP DATABASE test_database") - + def test_data_types_support_level_for_mysql_database_engine(started_cluster): with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: From 941a6e539fcd3d169b7a425731db07da31afb591 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 29 Jun 2021 10:36:02 +0300 Subject: [PATCH 538/931] Fix --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2f4c2efc7ba..104bf8fb9c9 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2334,7 +2334,7 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c /// It's a DROP PART and it's already executed by fetching some covering part bool is_drop_part = !drop_range.isFakeDropRangePart(); - if (is_drop_part && part->info != drop_range) + if (is_drop_part && part->info.min_block != drop_range.min_block && part->info.max_block != drop_range.max_block) { /// Why we check only min and max blocks here without checking merge /// level? It's a tricky situation which can happen on a stale From 41620a5661335f0c5d89a81c2b795180b976bf8e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 29 Jun 2021 11:05:07 +0300 Subject: [PATCH 539/931] Fixed tests --- src/DataTypes/getLeastSupertype.cpp | 2 +- tests/queries/0_stateless/01925_date_date_time_comparison.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index 33b40abdd47..a950d18b50a 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -308,7 +308,7 @@ DataTypePtr getLeastSupertype(const DataTypes & types) if (const auto * date_time64_type = typeid_cast(type.get())) { const auto scale = date_time64_type->getScale(); - if (scale > max_scale) + if (scale >= max_scale) { max_scale_date_time_index = i; max_scale = scale; diff --git a/tests/queries/0_stateless/01925_date_date_time_comparison.sql b/tests/queries/0_stateless/01925_date_date_time_comparison.sql index 99c67816a42..13e856384d2 100644 --- a/tests/queries/0_stateless/01925_date_date_time_comparison.sql +++ b/tests/queries/0_stateless/01925_date_date_time_comparison.sql @@ -1,2 +1,2 @@ SELECT toDate('2000-01-01') < toDateTime('2000-01-01 00:00:01', 'Europe/Moscow'); -SELECT toDate('2000-01-01') < toDateTime64('2000-01-01 00:00:01', 5, 'Europe/Moscow'); +SELECT toDate('2000-01-01') < toDateTime64('2000-01-01 00:00:01', 0, 'Europe/Moscow'); From bcbfbae2a94c14e9c2819c8c849063390aa54ec6 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 29 Jun 2021 11:25:27 +0300 Subject: [PATCH 540/931] Update median.md --- .../aggregate-functions/reference/median.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index 5d681389eb0..8ab2273b32e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -4,14 +4,14 @@ The `median*` functions are the aliases for the corresponding `quantile*` functi Functions: -- `median` — Alias for [quantile](../../../sql-reference/aggregate-functions/reference/quantile#quantile). -- `medianDeterministic` — Alias for [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted#quantiletdigestweighted). +- `median` — Alias for [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile). +- `medianDeterministic` — Alias for [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact). +- `medianExactWeighted` — Alias for [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md#quantileexactweighted). +- `medianTiming` — Alias for [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). +- `medianTimingWeighted` — Alias for [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md#quantiletdigestweighted). **Example** From 44f1a96ad6eab155e7841e7e0f920d46b55819dd Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 29 Jun 2021 12:49:25 +0300 Subject: [PATCH 541/931] Better test_version_update_after_mutation --- .../integration/test_version_update_after_mutation/test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index a1ae17b8451..4f8a61a5bf0 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -39,8 +39,8 @@ def test_mutate_and_upgrade(start_cluster): node2.restart_with_latest_version(signal=9) # After hard restart table can be in readonly mode - exec_query_with_retry(node2, "INSERT INTO mt VALUES ('2020-02-13', 3)") - exec_query_with_retry(node1, "SYSTEM SYNC REPLICA mt") + exec_query_with_retry(node2, "INSERT INTO mt VALUES ('2020-02-13', 3)", retry_count=60) + exec_query_with_retry(node1, "SYSTEM SYNC REPLICA mt", retry_count=60) assert node1.query("SELECT COUNT() FROM mt") == "2\n" assert node2.query("SELECT COUNT() FROM mt") == "2\n" @@ -79,7 +79,8 @@ def test_upgrade_while_mutation(start_cluster): node3.restart_with_latest_version(signal=9) - exec_query_with_retry(node3, "SYSTEM RESTART REPLICA mt1") + # checks for readonly + exec_query_with_retry(node3, "OPTIMIZE TABLE mt1", retry_count=60) node3.query("ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) # will delete nothing, but previous async mutation will finish with this query From 7680eab0e4c122aafe7f90c5a7239c6bc127fefe Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 29 Jun 2021 13:03:00 +0300 Subject: [PATCH 542/931] one more fix --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 104bf8fb9c9..f897d833096 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2334,7 +2334,7 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSet(c /// It's a DROP PART and it's already executed by fetching some covering part bool is_drop_part = !drop_range.isFakeDropRangePart(); - if (is_drop_part && part->info.min_block != drop_range.min_block && part->info.max_block != drop_range.max_block) + if (is_drop_part && (part->info.min_block != drop_range.min_block || part->info.max_block != drop_range.max_block)) { /// Why we check only min and max blocks here without checking merge /// level? It's a tricky situation which can happen on a stale From a9fad56b7a794e52f8e2b530cd9db5338ac17fe4 Mon Sep 17 00:00:00 2001 From: Kostiantyn Storozhuk Date: Tue, 29 Jun 2021 18:07:18 +0800 Subject: [PATCH 543/931] Flaky test fixed --- tests/integration/test_mysql_database_engine/test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 39198f6d56d..b3c057e5a93 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -180,6 +180,8 @@ def test_column_comments_for_mysql_database_engine(started_cluster): "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`), `test` int COMMENT 'test comment') ENGINE=InnoDB;") assert 'test comment' in clickhouse_node.query('DESCRIBE TABLE `test_database`.`test_table`') + time.sleep( + 3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained mysql_node.query("ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11) COMMENT 'add_column comment'") assert 'add_column comment' in clickhouse_node.query( "SELECT comment FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") From 27d3251aa277636f0750730e4844d4229c76d901 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 29 Jun 2021 14:51:11 +0400 Subject: [PATCH 544/931] Update s3Cluster.md --- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index 32916a2b122..826f1a5b25b 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Аргументы** - `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам. -- `source` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные символы в режиме "только чтение": `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `source` — URL файла или нескольких файлов. Поддерживает следующие символы подстановки: `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `access_key_id` и `secret_access_key` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. From 504db73840de89f806659b41a744210ec6745353 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 29 Jun 2021 14:51:36 +0400 Subject: [PATCH 545/931] Update docs/en/sql-reference/table-functions/s3Cluster.md --- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 794d009f644..65565aa92cb 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -16,7 +16,7 @@ s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, stru **Arguments** - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. -- `source` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). +- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. From bf827936b78a9db2594c6f99975a20dd29c92db7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 29 Jun 2021 14:53:34 +0300 Subject: [PATCH 546/931] Rename PrewhereActions --- src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h | 4 ++-- src/Storages/MergeTree/MergeTreeRangeReader.cpp | 2 +- src/Storages/MergeTree/MergeTreeRangeReader.h | 7 ++++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 68f754b08fb..9334baef964 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -53,7 +53,7 @@ MergeTreeBaseSelectProcessor::MergeTreeBaseSelectProcessor( if (prewhere_info) { - prewhere_actions = std::make_unique(); + prewhere_actions = std::make_unique(); if (prewhere_info->alias_actions) prewhere_actions->alias_actions = std::make_shared(prewhere_info->alias_actions, actions_settings); diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index 2ae39dbb058..8da9b002e16 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -13,7 +13,7 @@ namespace DB class IMergeTreeReader; class UncompressedCache; class MarkCache; -struct PrewhereActions; +struct PrewhereExprInfo; /// Base class for MergeTreeThreadSelectProcessor and MergeTreeSelectProcessor class MergeTreeBaseSelectProcessor : public SourceWithProgress @@ -60,7 +60,7 @@ protected: StorageMetadataPtr metadata_snapshot; PrewhereInfoPtr prewhere_info; - std::unique_ptr prewhere_actions; + std::unique_ptr prewhere_actions; UInt64 max_block_size_rows; UInt64 preferred_block_size_bytes; diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 8072aa6a3dc..2347280a4a0 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -520,7 +520,7 @@ size_t MergeTreeRangeReader::ReadResult::countBytesInResultFilter(const IColumn: MergeTreeRangeReader::MergeTreeRangeReader( IMergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, - const PrewhereActions * prewhere_info_, + const PrewhereExprInfo * prewhere_info_, bool last_reader_in_chain_) : merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 7c36ca49c99..8cdf485ff1e 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -18,7 +18,8 @@ using PrewhereInfoPtr = std::shared_ptr; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; -struct PrewhereActions +/// The same as PrewhereInfo, but with ExpressionActions instead of ActionsDAG +struct PrewhereExprInfo { /// Actions which are executed in order to alias columns are used for prewhere actions. ExpressionActionsPtr alias_actions; @@ -42,7 +43,7 @@ public: MergeTreeRangeReader( IMergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, - const PrewhereActions * prewhere_info_, + const PrewhereExprInfo * prewhere_info_, bool last_reader_in_chain_); MergeTreeRangeReader() = default; @@ -235,7 +236,7 @@ private: IMergeTreeReader * merge_tree_reader = nullptr; const MergeTreeIndexGranularity * index_granularity = nullptr; MergeTreeRangeReader * prev_reader = nullptr; /// If not nullptr, read from prev_reader firstly. - const PrewhereActions * prewhere_info; + const PrewhereExprInfo * prewhere_info; Stream stream; From 5a746b61f30607bb74b9dbee153154cb63c1b6c1 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 29 Jun 2021 15:14:12 +0300 Subject: [PATCH 547/931] Update website/templates/index/quickstart.html Co-authored-by: Ivan Blinkov --- website/templates/index/quickstart.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/templates/index/quickstart.html b/website/templates/index/quickstart.html index b74e52905ff..1c99de63354 100644 --- a/website/templates/index/quickstart.html +++ b/website/templates/index/quickstart.html @@ -2,7 +2,7 @@

Quick start

-
-
Date: Thu, 1 Jul 2021 16:59:16 +0100 Subject: [PATCH 621/931] CLICKHOUSE-1194: add skipping index to the beginning of the list add the FIRST keyword to the ADD INDEX command to be able to add index in the beginning of the list. Signed-off-by: Aleksei Semiglazov --- .../statements/alter/index/index.md | 2 +- docs/ja/sql-reference/statements/alter.md | 2 +- .../statements/alter/index/index.md | 2 +- docs/zh/sql-reference/statements/alter.md | 2 +- src/Parsers/ASTAlterQuery.cpp | 5 ++-- src/Parsers/ParserAlterQuery.cpp | 4 ++- src/Storages/AlterCommands.cpp | 5 ++++ src/Storages/AlterCommands.h | 2 +- .../01932_alter_index_with_order.reference | 9 ++++++ .../01932_alter_index_with_order.sql | 28 +++++++++++++++++++ 10 files changed, 53 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/01932_alter_index_with_order.reference create mode 100644 tests/queries/0_stateless/01932_alter_index_with_order.sql diff --git a/docs/en/sql-reference/statements/alter/index/index.md b/docs/en/sql-reference/statements/alter/index/index.md index 56d81aaf52f..fd5657c3666 100644 --- a/docs/en/sql-reference/statements/alter/index/index.md +++ b/docs/en/sql-reference/statements/alter/index/index.md @@ -8,7 +8,7 @@ toc_title: INDEX The following operations are available: -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Adds index description to tables metadata. +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` - Adds index description to tables metadata. - `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. diff --git a/docs/ja/sql-reference/statements/alter.md b/docs/ja/sql-reference/statements/alter.md index 226565dd226..0967f60e06a 100644 --- a/docs/ja/sql-reference/statements/alter.md +++ b/docs/ja/sql-reference/statements/alter.md @@ -175,7 +175,7 @@ MODIFY ORDER BY new_expression [複製](../../engines/table-engines/mergetree-family/replication.md) テーブル)。 次の操作 利用できます: -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` -付加価指数の説明をテーブルメタデータを指すものとします。 +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` -付加価指数の説明をテーブルメタデータを指すものとします。 - `ALTER TABLE [db].name DROP INDEX name` -除去す指標の説明からテーブルメタデータを削除を行指数のファイルからディスク。 diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index 632f11ed906..1f6bbea5c4b 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -9,7 +9,7 @@ toc_title: "Манипуляции с индексами" Добавить или удалить индекс можно с помощью операций ``` sql -ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] +ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name] ALTER TABLE [db.]name DROP INDEX name ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name ``` diff --git a/docs/zh/sql-reference/statements/alter.md b/docs/zh/sql-reference/statements/alter.md index 446feac96ce..4d1cdca71e5 100644 --- a/docs/zh/sql-reference/statements/alter.md +++ b/docs/zh/sql-reference/statements/alter.md @@ -174,7 +174,7 @@ MODIFY ORDER BY new_expression 该操作仅支持 [`MergeTree`](../../engines/table-engines/mergetree-family/mergetree.md) 系列表 (含 [replicated](../../engines/table-engines/mergetree-family/replication.md) 表)。 下列操作是允许的: -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - 在表的元数据中增加索引说明 +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [FIRST|AFTER name]` - 在表的元数据中增加索引说明 - `ALTER TABLE [db].name DROP INDEX name` - 从表的元数据中删除索引描述,并从磁盘上删除索引文件 diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index 918abc39037..7e60d1175e2 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -137,8 +137,9 @@ void ASTAlterCommand::formatImpl( settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "ADD INDEX " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : ""); index_decl->formatImpl(settings, state, frame); - /// AFTER - if (index) + if (first) + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " FIRST " << (settings.hilite ? hilite_none : ""); + else if (index) /// AFTER { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << " AFTER " << (settings.hilite ? hilite_none : ""); index->formatImpl(settings, state, frame); diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index d659db64b83..2908b171ca6 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -231,7 +231,9 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_idx_decl.parse(pos, command->index_decl, expected)) return false; - if (s_after.ignore(pos, expected)) + if (s_first.ignore(pos, expected)) + command->first = true; + else if (s_after.ignore(pos, expected)) { if (!parser_name.parse(pos, command->index, expected)) return false; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 1cb936cbb84..3ac457e52d6 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -211,6 +211,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.after_index_name = command_ast->index->as().name(); command.if_not_exists = command_ast->if_not_exists; + command.first = command_ast->first; return command; } @@ -454,6 +455,10 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) auto insert_it = metadata.secondary_indices.end(); + /// insert the index in the beginning of the indices list + if (first) + insert_it = metadata.secondary_indices.begin(); + if (!after_index_name.empty()) { insert_it = std::find_if( diff --git a/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h index 4e9c9764753..d523bb2783e 100644 --- a/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -77,7 +77,7 @@ struct AlterCommand /// For ADD or MODIFY - after which column to add a new one. If an empty string, add to the end. String after_column; - /// For ADD_COLUMN, MODIFY_COLUMN - Add to the begin if it is true. + /// For ADD_COLUMN, MODIFY_COLUMN, ADD_INDEX - Add to the begin if it is true. bool first = false; /// For DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN diff --git a/tests/queries/0_stateless/01932_alter_index_with_order.reference b/tests/queries/0_stateless/01932_alter_index_with_order.reference new file mode 100644 index 00000000000..07e1aab3df9 --- /dev/null +++ b/tests/queries/0_stateless/01932_alter_index_with_order.reference @@ -0,0 +1,9 @@ +default alter_index_test index_a set a 1 +default alter_index_test index_b minmax b 1 +default alter_index_test index_c set c 2 +default alter_index_test index_a set a 1 +default alter_index_test index_d set d 1 +default alter_index_test index_b minmax b 1 +default alter_index_test index_c set c 2 +default alter_index_test index_a set a 1 +default alter_index_test index_d set d 1 diff --git a/tests/queries/0_stateless/01932_alter_index_with_order.sql b/tests/queries/0_stateless/01932_alter_index_with_order.sql new file mode 100644 index 00000000000..0f2953b53f9 --- /dev/null +++ b/tests/queries/0_stateless/01932_alter_index_with_order.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS alter_index_test; + +CREATE TABLE alter_index_test ( + a UInt32, + b Date, + c UInt32, + d UInt32, + INDEX index_a a TYPE set(0) GRANULARITY 1 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +ALTER TABLE alter_index_test ADD INDEX index_b b type minmax granularity 1 FIRST; + +ALTER TABLE alter_index_test ADD INDEX index_c c type set(0) granularity 2 AFTER index_b; + +ALTER TABLE alter_index_test ADD INDEX index_d d type set(0) granularity 1; + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +DETACH TABLE alter_index_test; +ATTACH TABLE alter_index_test; + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +DROP TABLE IF EXISTS alter_index_test; From 9a5365fc41fd703daa3ff36df318943953b81e13 Mon Sep 17 00:00:00 2001 From: Aleksei Semiglazov Date: Wed, 2 Dec 2020 21:18:25 +0000 Subject: [PATCH 622/931] CLICKHOUSE-784: reset merge tree setting Add an ability to reset custom setting to default and remove it from table's metadata. This will allow to rollback the change without knowing the system/config's default. Signed-off-by: Aleksei Semiglazov --- src/Access/AccessType.h | 2 +- src/Interpreters/InterpreterAlterQuery.cpp | 1 + src/Parsers/ASTAlterQuery.cpp | 10 ++++ src/Parsers/ASTAlterQuery.h | 4 ++ src/Parsers/ParserAlterQuery.cpp | 10 ++++ src/Parsers/ParserAlterQuery.h | 1 + src/Storages/AlterCommands.cpp | 35 +++++++++++++- src/Storages/AlterCommands.h | 6 ++- src/Storages/MergeTree/MergeTreeData.cpp | 27 +++++++++++ .../00980_merge_alter_settings.reference | 6 +++ .../00980_merge_alter_settings.sql | 47 ++++++++++++++++++ ...keeper_merge_tree_alter_settings.reference | 9 ++++ ...80_zookeeper_merge_tree_alter_settings.sql | 48 +++++++++++++++++++ .../01271_show_privileges.reference | 2 +- ...4_explain_ast_of_nonselect_query.reference | 2 +- 15 files changed, 204 insertions(+), 6 deletions(-) diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 0e295985303..47153b5ab63 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -66,7 +66,7 @@ enum class AccessType M(ALTER_TTL, "ALTER MODIFY TTL, MODIFY TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY TTL */\ M(ALTER_MATERIALIZE_TTL, "MATERIALIZE TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MATERIALIZE TTL; enabled implicitly by the grant ALTER_TABLE */\ - M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ + M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING, RESET SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \ M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \ diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index f9bf5a70ef9..6f0af049d05 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -269,6 +269,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS required_access.emplace_back(AccessType::ALTER_MATERIALIZE_TTL, database, table); break; } + case ASTAlterCommand::RESET_SETTING: [[fallthrough]]; case ASTAlterCommand::MODIFY_SETTING: { required_access.emplace_back(AccessType::ALTER_SETTINGS, database, table); diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index 918abc39037..459fa0f81b3 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -52,6 +52,11 @@ ASTPtr ASTAlterCommand::clone() const res->settings_changes = settings_changes->clone(); res->children.push_back(res->settings_changes); } + if (settings_resets) + { + res->settings_resets = settings_resets->clone(); + res->children.push_back(res->settings_resets); + } if (values) { res->values = values->clone(); @@ -378,6 +383,11 @@ void ASTAlterCommand::formatImpl( settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "MODIFY SETTING " << (settings.hilite ? hilite_none : ""); settings_changes->formatImpl(settings, state, frame); } + else if (type == ASTAlterCommand::RESET_SETTING) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "RESET SETTING " << (settings.hilite ? hilite_none : ""); + settings_resets->formatImpl(settings, state, frame); + } else if (type == ASTAlterCommand::MODIFY_QUERY) { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "MODIFY QUERY " << settings.nl_or_ws << (settings.hilite ? hilite_none : ""); diff --git a/src/Parsers/ASTAlterQuery.h b/src/Parsers/ASTAlterQuery.h index f8677c10a7b..5fc146a3072 100644 --- a/src/Parsers/ASTAlterQuery.h +++ b/src/Parsers/ASTAlterQuery.h @@ -36,6 +36,7 @@ public: MODIFY_TTL, MATERIALIZE_TTL, MODIFY_SETTING, + RESET_SETTING, MODIFY_QUERY, REMOVE_TTL, @@ -141,6 +142,9 @@ public: /// FOR MODIFY_SETTING ASTPtr settings_changes; + /// FOR RESET_SETTING + ASTPtr settings_resets; + /// For MODIFY_QUERY ASTPtr select; diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index d659db64b83..fdfaef3df6a 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -33,6 +33,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_modify_ttl("MODIFY TTL"); ParserKeyword s_materialize_ttl("MATERIALIZE TTL"); ParserKeyword s_modify_setting("MODIFY SETTING"); + ParserKeyword s_reset_setting("RESET SETTING"); ParserKeyword s_modify_query("MODIFY QUERY"); ParserKeyword s_add_index("ADD INDEX"); @@ -115,6 +116,9 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected std::make_unique(), std::make_unique(TokenType::Comma), /* allow_empty = */ false); ParserSetQuery parser_settings(true); + ParserList parser_reset_setting( + std::make_unique(), std::make_unique(TokenType::Comma), + /* allow_empty = */ false); ParserNameList values_p; ParserSelectWithUnionQuery select_p; ParserTTLExpressionList parser_ttl_list; @@ -703,6 +707,12 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected return false; command->type = ASTAlterCommand::MODIFY_SETTING; } + else if (s_reset_setting.ignore(pos, expected)) + { + if (!parser_reset_setting.parse(pos, command->settings_resets, expected)) + return false; + command->type = ASTAlterCommand::RESET_SETTING; + } else if (s_modify_query.ignore(pos, expected)) { if (!select_p.parse(pos, command->select, expected)) diff --git a/src/Parsers/ParserAlterQuery.h b/src/Parsers/ParserAlterQuery.h index b22b1c6ded2..2e54c4ddbaf 100644 --- a/src/Parsers/ParserAlterQuery.h +++ b/src/Parsers/ParserAlterQuery.h @@ -15,6 +15,7 @@ namespace DB * [RENAME COLUMN [IF EXISTS] col_name TO col_name] * [MODIFY PRIMARY KEY (a, b, c...)] * [MODIFY SETTING setting_name=setting_value, ...] + * [RESET SETTING setting_name, ...] * [COMMENT COLUMN [IF EXISTS] col_name string] * [DROP|DETACH|ATTACH PARTITION|PART partition, ...] * [FETCH PARTITION partition FROM ...] diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 1cb936cbb84..d4ee299a539 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -311,6 +311,21 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.settings_changes = command_ast->settings_changes->as().changes; return command; } + else if (command_ast->type == ASTAlterCommand::RESET_SETTING) + { + AlterCommand command; + command.ast = command_ast->clone(); + command.type = AlterCommand::RESET_SETTING; + for (const ASTPtr & identifier_ast : command_ast->settings_resets->children) + { + const auto & identifier = identifier_ast->as(); + auto insertion = command.settings_resets.emplace(identifier.name()); + if (!insertion.second) + throw Exception("Duplicate setting name " + backQuote(identifier.name()), + ErrorCodes::BAD_ARGUMENTS); + } + return command; + } else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY) { AlterCommand command; @@ -570,6 +585,20 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) settings_from_storage.push_back(change); } } + else if (type == RESET_SETTING) + { + auto & settings_from_storage = metadata.settings_changes->as().changes; + for (const auto & setting_name : settings_resets) + { + auto finder = [&setting_name](const SettingChange & c) { return c.name == setting_name; }; + auto it = std::find_if(settings_from_storage.begin(), settings_from_storage.end(), finder); + + if (it != settings_from_storage.end()) + settings_from_storage.erase(it); + + /// Intentionally ignore if there is no such setting name + } + } else if (type == RENAME_COLUMN) { metadata.columns.rename(column_name, rename_to); @@ -678,7 +707,7 @@ bool isMetadataOnlyConversion(const IDataType * from, const IDataType * to) bool AlterCommand::isSettingsAlter() const { - return type == MODIFY_SETTING; + return type == MODIFY_SETTING || type == RESET_SETTING; } bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metadata) const @@ -838,6 +867,8 @@ String alterTypeToString(const AlterCommand::Type type) return "MODIFY TTL"; case AlterCommand::Type::MODIFY_SETTING: return "MODIFY SETTING"; + case AlterCommand::Type::RESET_SETTING: + return "RESET SETTING"; case AlterCommand::Type::MODIFY_QUERY: return "MODIFY QUERY"; case AlterCommand::Type::RENAME_COLUMN: @@ -1123,7 +1154,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, ContextPt ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK}; } } - else if (command.type == AlterCommand::MODIFY_SETTING) + else if (command.type == AlterCommand::MODIFY_SETTING || command.type == AlterCommand::RESET_SETTING) { if (metadata.settings_changes == nullptr) throw Exception{"Cannot alter settings, because table engine doesn't support settings changes", ErrorCodes::BAD_ARGUMENTS}; diff --git a/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h index 4e9c9764753..3c42f0fc890 100644 --- a/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -38,6 +38,7 @@ struct AlterCommand DROP_PROJECTION, MODIFY_TTL, MODIFY_SETTING, + RESET_SETTING, MODIFY_QUERY, RENAME_COLUMN, REMOVE_TTL, @@ -80,7 +81,7 @@ struct AlterCommand /// For ADD_COLUMN, MODIFY_COLUMN - Add to the begin if it is true. bool first = false; - /// For DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN + /// For DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN, RESET_SETTING bool if_exists = false; /// For ADD_COLUMN @@ -127,6 +128,9 @@ struct AlterCommand /// For MODIFY SETTING SettingsChanges settings_changes; + /// For RESET SETTING + std::set settings_resets; + /// For MODIFY_QUERY ASTPtr select = nullptr; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index dc09a783a29..c2ba01deed6 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1802,6 +1802,31 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context if (setting_name == "storage_policy") checkStoragePolicy(getContext()->getStoragePolicy(new_value.safeGet())); } + + /// Check if it is safe to reset the settings + for (const auto & current_setting : current_changes) + { + const auto & setting_name = current_setting.name; + const Field * new_value = new_changes.tryGet(setting_name); + /// Prevent unsetting readonly setting + if (MergeTreeSettings::isReadonlySetting(setting_name) && !new_value) + { + throw Exception{"Setting '" + setting_name + "' is readonly for storage '" + getName() + "'", + ErrorCodes::READONLY_SETTING}; + } + + if (MergeTreeSettings::isPartFormatSetting(setting_name) && !new_value) + { + /// Use default settings + new and check if doesn't affect part format settings + MergeTreeSettings copy = *getSettings(); + copy.resetToDefault(); + copy.applyChanges(new_changes); + String reason; + if (!canUsePolymorphicParts(copy, &reason) && !reason.empty()) + throw Exception("Can't change settings. Reason: " + reason, ErrorCodes::NOT_IMPLEMENTED); + } + + } } for (const auto & part : getDataPartsVector()) @@ -1960,6 +1985,8 @@ void MergeTreeData::changeSettings( } MergeTreeSettings copy = *getSettings(); + /// reset to default settings before applying existing + copy.resetToDefault(); copy.applyChanges(new_changes); copy.sanityCheck(getContext()->getSettingsRef()); diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.reference b/tests/queries/0_stateless/00980_merge_alter_settings.reference index 20146ed9d1e..7a958c40651 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.reference +++ b/tests/queries/0_stateless/00980_merge_alter_settings.reference @@ -4,3 +4,9 @@ CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\ 2 CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096, merge_with_ttl_timeout = 300, max_concurrent_queries = 1 +CREATE TABLE default.table_for_reset_setting\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 4096 diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.sql b/tests/queries/0_stateless/00980_merge_alter_settings.sql index 6bf043fe4c8..755983ac62b 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.sql +++ b/tests/queries/0_stateless/00980_merge_alter_settings.sql @@ -53,3 +53,50 @@ SHOW CREATE TABLE table_for_alter; DROP TABLE IF EXISTS table_for_alter; + +DROP TABLE IF EXISTS table_for_reset_setting; + +CREATE TABLE table_for_reset_setting ( + id UInt64, + Data String +) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity=4096; + +ALTER TABLE table_for_reset_setting MODIFY SETTING index_granularity=555; -- { serverError 472 } + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); +INSERT INTO table_for_reset_setting VALUES (2, '2'); + +ALTER TABLE table_for_reset_setting MODIFY SETTING parts_to_throw_insert = 1, parts_to_delay_insert = 1; + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); -- { serverError 252 } + +ALTER TABLE table_for_reset_setting RESET SETTING parts_to_delay_insert, parts_to_throw_insert; + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); +INSERT INTO table_for_reset_setting VALUES (2, '2'); + +DETACH TABLE table_for_reset_setting; +ATTACH TABLE table_for_reset_setting; + +SHOW CREATE TABLE table_for_reset_setting; + +ALTER TABLE table_for_reset_setting RESET SETTING index_granularity; -- { serverError 472 } + +-- ignore undefined setting +ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; + +ALTER TABLE table_for_reset_setting MODIFY SETTING merge_with_ttl_timeout = 300, max_concurrent_queries = 1; + +SHOW CREATE TABLE table_for_reset_setting; + +ALTER TABLE table_for_reset_setting RESET SETTING max_concurrent_queries, merge_with_ttl_timeout; + +SHOW CREATE TABLE table_for_reset_setting; + +DROP TABLE IF EXISTS table_for_reset_setting; \ No newline at end of file diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference index ee5a8024a92..357d1bef78d 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference @@ -10,3 +10,12 @@ CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_reset_setting1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_reset_setting2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_reset_setting1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_reset_setting1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, merge_with_ttl_timeout = 100 +CREATE TABLE default.replicated_table_for_reset_setting2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, merge_with_ttl_timeout = 200 +CREATE TABLE default.replicated_table_for_reset_setting1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, merge_with_ttl_timeout = 100 +CREATE TABLE default.replicated_table_for_reset_setting2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, merge_with_ttl_timeout = 200 +CREATE TABLE default.replicated_table_for_reset_setting1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_reset_setting2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00980/replicated_table_for_reset_setting\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql index 60e4cfff4e1..6ad8860227d 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql @@ -67,3 +67,51 @@ SHOW CREATE TABLE replicated_table_for_alter2; DROP TABLE IF EXISTS replicated_table_for_alter2; DROP TABLE IF EXISTS replicated_table_for_alter1; + +DROP TABLE IF EXISTS replicated_table_for_reset_setting1; +DROP TABLE IF EXISTS replicated_table_for_reset_setting2; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE replicated_table_for_reset_setting1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980/replicated_table_for_reset_setting', '1') ORDER BY id; + +CREATE TABLE replicated_table_for_reset_setting2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980/replicated_table_for_reset_setting', '2') ORDER BY id; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +ALTER TABLE replicated_table_for_reset_setting1 MODIFY SETTING index_granularity = 4096; -- { serverError 472 } + +SHOW CREATE TABLE replicated_table_for_reset_setting1; + +ALTER TABLE replicated_table_for_reset_setting1 MODIFY SETTING merge_with_ttl_timeout = 100; +ALTER TABLE replicated_table_for_reset_setting2 MODIFY SETTING merge_with_ttl_timeout = 200; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +DETACH TABLE replicated_table_for_reset_setting2; +ATTACH TABLE replicated_table_for_reset_setting2; + +DETACH TABLE replicated_table_for_reset_setting1; +ATTACH TABLE replicated_table_for_reset_setting1; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +-- ignore undefined setting +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING merge_with_ttl_timeout; +ALTER TABLE replicated_table_for_reset_setting2 RESET SETTING merge_with_ttl_timeout; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +DROP TABLE IF EXISTS replicated_table_for_reset_setting2; +DROP TABLE IF EXISTS replicated_table_for_reset_setting1; diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 343d8ceeca3..035cb902bff 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -31,7 +31,7 @@ ALTER DROP CONSTRAINT ['DROP CONSTRAINT'] TABLE ALTER CONSTRAINT ALTER CONSTRAINT ['CONSTRAINT'] \N ALTER TABLE ALTER TTL ['ALTER MODIFY TTL','MODIFY TTL'] TABLE ALTER TABLE ALTER MATERIALIZE TTL ['MATERIALIZE TTL'] TABLE ALTER TABLE -ALTER SETTINGS ['ALTER SETTING','ALTER MODIFY SETTING','MODIFY SETTING'] TABLE ALTER TABLE +ALTER SETTINGS ['ALTER SETTING','ALTER MODIFY SETTING','MODIFY SETTING','RESET SETTING'] TABLE ALTER TABLE ALTER MOVE PARTITION ['ALTER MOVE PART','MOVE PARTITION','MOVE PART'] TABLE ALTER TABLE ALTER FETCH PARTITION ['ALTER FETCH PART','FETCH PARTITION'] TABLE ALTER TABLE ALTER FREEZE PARTITION ['FREEZE PARTITION','UNFREEZE'] TABLE ALTER TABLE diff --git a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference index 8863d3b57c7..63f00b6f9c5 100644 --- a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference +++ b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference @@ -1,6 +1,6 @@ AlterQuery t1 (children 1) ExpressionList (children 1) - AlterCommand 30 (children 1) + AlterCommand 31 (children 1) Function equals (children 1) ExpressionList (children 2) Identifier date From 3fe559b31fe4482633f0e5a279936f733ce592c4 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 31 May 2021 11:05:40 +0300 Subject: [PATCH 623/931] Compile aggregate functions --- base/common/FunctorToStaticMethodAdaptor.h | 39 ++++ src/AggregateFunctions/AggregateFunctionAvg.h | 39 ++++ src/AggregateFunctions/AggregateFunctionSum.h | 26 +++ src/AggregateFunctions/IAggregateFunction.h | 21 ++ src/Common/MemorySanitizer.h | 5 + src/Core/Settings.h | 2 + src/DataStreams/TTLAggregationAlgorithm.cpp | 3 +- src/Interpreters/Aggregator.cpp | 154 +++++++++++++-- src/Interpreters/Aggregator.h | 12 +- src/Interpreters/InterpreterSelectQuery.cpp | 8 +- src/Interpreters/JIT/compileFunction.cpp | 185 ++++++++++++++++++ src/Interpreters/JIT/compileFunction.h | 17 ++ .../MergeTree/MergeTreeDataSelectExecutor.cpp | 6 +- 13 files changed, 499 insertions(+), 18 deletions(-) create mode 100644 base/common/FunctorToStaticMethodAdaptor.h diff --git a/base/common/FunctorToStaticMethodAdaptor.h b/base/common/FunctorToStaticMethodAdaptor.h new file mode 100644 index 00000000000..f0609a0faa2 --- /dev/null +++ b/base/common/FunctorToStaticMethodAdaptor.h @@ -0,0 +1,39 @@ +#include + +template +class FunctorToStaticMethodAdaptor : public FunctorToStaticMethodAdaptor +{ +public: +}; + +template +class FunctorToStaticMethodAdaptor +{ +public: + static R call(C * ptr, Args... arguments) + { + return std::invoke(&C::operator(), ptr, arguments...); + } + + static R unsafeCall(char * ptr, Args... arguments) + { + C * ptr_typed = reinterpret_cast(ptr); + return std::invoke(&C::operator(), ptr_typed, arguments...); + } +}; + +template +class FunctorToStaticMethodAdaptor +{ +public: + static R call(C * ptr, Args... arguments) + { + return std::invoke(&C::operator(), ptr, arguments...); + } + + static R unsafeCall(char * ptr, Args... arguments) + { + C * ptr_typed = static_cast(ptr); + return std::invoke(&C::operator(), ptr_typed, arguments...); + } +}; diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 7cdef3bfe69..a440325c64b 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -9,6 +9,14 @@ #include #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -157,6 +165,37 @@ public: ++this->data(place).denominator; } +#if USE_EMBEDDED_COMPILER + + virtual bool isCompilable() const override + { + using AverageFieldType = AvgFieldType; + return std::is_same_v || std::is_same_v; + } + + virtual void compile(llvm::IRBuilderBase & builder, llvm::Value * aggregate_function_place, const DataTypePtr & value_type, llvm::Value * value) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + llvm::Type * numerator_type = b.getInt64Ty(); + llvm::Type * denominator_type = b.getInt64Ty(); + + auto * numerator_value_ptr = b.CreatePointerCast(aggregate_function_place, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_value_ptr); + auto * value_cast_to_result = nativeCast(b, value_type, value, numerator_type); + auto * sum_result_value = numerator_value->getType()->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_result) : b.CreateFAdd(numerator_value, value_cast_to_result); + b.CreateStore(sum_result_value, numerator_value_ptr); + + auto * denominator_place_ptr_untyped = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_function_place, 8); + auto * denominator_place_ptr = b.CreatePointerCast(denominator_place_ptr_untyped, denominator_type->getPointerTo()); + auto * denominator_value = b.CreateLoad(denominator_place_ptr, numerator_value_ptr); + auto * increate_denominator_value = b.CreateAdd(denominator_value, llvm::ConstantInt::get(denominator_type, 1)); + b.CreateStore(increate_denominator_value, denominator_place_ptr); + } + +#endif + + String getName() const final { return "avg"; } }; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 7261e56c044..609cb18429f 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -12,6 +12,14 @@ #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -385,6 +393,24 @@ public: column.getData().push_back(this->data(place).get()); } + #if USE_EMBEDDED_COMPILER + + virtual bool isCompilable() const override { return Type == AggregateFunctionTypeSum; } + + virtual void compile(llvm::IRBuilderBase & builder, llvm::Value * aggregate_function_place, const DataTypePtr & value_type, llvm::Value * value) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_native_type = toNativeType(b, removeNullable(getReturnType())); + auto * sum_value_ptr = b.CreatePointerCast(aggregate_function_place, return_native_type->getPointerTo()); + auto * sum_value = b.CreateLoad(return_native_type, sum_value_ptr); + auto * value_cast_to_result = nativeCast(b, value_type, value, return_native_type); + auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result); + b.CreateStore(sum_result_value, sum_value_ptr); + } + + #endif + private: UInt32 scale; }; diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index e39ca460ea9..928d212dad4 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -9,11 +9,21 @@ #include #include +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif + #include #include #include #include +namespace llvm +{ + class LLVMContext; + class Value; + class IRBuilderBase; +} namespace DB { @@ -241,6 +251,17 @@ public: // of true window functions, so this hack-ish interface suffices. virtual bool isOnlyWindowFunction() const { return false; } + #if USE_EMBEDDED_COMPILER + + virtual bool isCompilable() const { return false; } + + virtual void compile(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_function_place*/, const DataTypePtr & /*value_type*/, llvm::Value * /*value*/) const + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + + #endif + protected: DataTypes argument_types; Array parameters; diff --git a/src/Common/MemorySanitizer.h b/src/Common/MemorySanitizer.h index 9e34e454090..b52f1d74a2b 100644 --- a/src/Common/MemorySanitizer.h +++ b/src/Common/MemorySanitizer.h @@ -7,6 +7,11 @@ #pragma clang diagnostic ignored "-Wreserved-id-macro" #endif +#undef __msan_unpoison +#undef __msan_test_shadow +#undef __msan_print_shadow +#undef __msan_unpoison_string + #define __msan_unpoison(X, Y) #define __msan_test_shadow(X, Y) (false) #define __msan_print_shadow(X, Y) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0197bfac7e4..c260aa41230 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -106,6 +106,8 @@ class IColumn; M(Bool, allow_suspicious_low_cardinality_types, false, "In CREATE TABLE statement allows specifying LowCardinality modifier for types of small fixed size (8 or less). Enabling this may increase merge times and memory consumption.", 0) \ M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ + M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ + M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggreagte expressions before they are JIT-compiled", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index 9a1cf45772f..66792dcfdb2 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -33,7 +33,8 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm( Aggregator::Params params(header, keys, aggregates, false, settings.max_rows_to_group_by, settings.group_by_overflow_mode, 0, 0, settings.max_bytes_before_external_group_by, settings.empty_result_for_aggregation_by_empty_set, - storage_.getContext()->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data); + storage_.getContext()->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, settings.min_count_to_compile_aggregate_expression); aggregator = std::make_unique(params); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index da3d54ce21a..8549926fc3c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1,5 +1,7 @@ #include #include + +#include #include #include #include @@ -21,6 +23,7 @@ #include #include #include +#include namespace ProfileEvents @@ -477,6 +480,11 @@ void NO_INLINE Aggregator::executeImpl( executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } +static CHJIT & getJITInstance() +{ + static CHJIT jit; + return jit; +} template void NO_INLINE Aggregator::executeImplBatch( @@ -537,16 +545,13 @@ void NO_INLINE Aggregator::executeImplBatch( /// Generic case. - std::unique_ptr places(new AggregateDataPtr[rows]); - - /// For all rows. - for (size_t i = 0; i < rows; ++i) + auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr { - AggregateDataPtr aggregate_data = nullptr; + AggregateDataPtr aggregate_data; if constexpr (!no_more_keys) { - auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); + auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. if (emplace_result.isInserted()) @@ -567,23 +572,148 @@ void NO_INLINE Aggregator::executeImplBatch( else { /// Add only if the key already exists. - auto find_result = state.findKey(method.data, i, *aggregates_pool); + auto find_result = state.findKey(method.data, row, *aggregates_pool); if (find_result.isFound()) aggregate_data = find_result.getMapped(); else aggregate_data = overflow_row; } - places[i] = aggregate_data; - } + // std::cerr << "Row " << row << " returned place " << static_cast(aggregate_data) << std::endl; + return aggregate_data; + }; + + #if USE_EMBEDDED_COMPILER + std::vector columns_data; + std::vector functions_to_compile; + size_t aggregate_instructions_size = 0; /// Add values to the aggregate functions. for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) { - if (inst->offsets) - inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + const auto * function = inst->that; + if (function && function->isCompilable()) + { + AggregateFunctionToCompile function_to_compile + { + .function = inst->that, + .aggregate_data_offset = inst->state_offset + }; + + columns_data.emplace_back(getColumnData(inst->batch_arguments[0])); + functions_to_compile.emplace_back(std::move(function_to_compile)); + } + + ++aggregate_instructions_size; + } + + if (params.compile_aggregate_expressions && functions_to_compile.size() == aggregate_instructions_size) + { + std::string functions_dump; + + for (const auto & func : functions_to_compile) + { + const auto * function = func.function; + + std::string function_dump; + + auto return_type_name = function->getReturnType()->getName(); + + function_dump += return_type_name; + function_dump += ' '; + function_dump += function->getName(); + function_dump += '('; + + const auto & argument_types = function->getArgumentTypes(); + for (const auto & argument_type : argument_types) + { + function_dump += argument_type->getName(); + function_dump += ','; + } + + if (!argument_types.empty()) + function_dump.pop_back(); + + function_dump += ')'; + + functions_dump += function_dump; + functions_dump += ' '; + } + + static std::unordered_map aggregation_functions_dump_to_compiled_module_info; + CHJIT::CompiledModuleInfo compiled_module; + + auto it = aggregation_functions_dump_to_compiled_module_info.find(functions_dump); + if (it != aggregation_functions_dump_to_compiled_module_info.end()) + { + compiled_module = it->second; + LOG_TRACE(log, "Get compiled aggregate functions {} from cache", functions_dump); + + } else - inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); + { + compiled_module = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_dump); + aggregation_functions_dump_to_compiled_module_info[functions_dump] = compiled_module; + } + + LOG_TRACE(log, "Use compiled expression {}", functions_dump); + + JITCompiledAggregateFunction aggregate_function = reinterpret_cast(getJITInstance().findCompiledFunction(compiled_module, functions_dump)); + GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; + GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); + aggregate_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); + } + else + #endif + { + std::unique_ptr places(new AggregateDataPtr[rows]); + + /// For all rows. + for (size_t i = 0; i < rows; ++i) + { + AggregateDataPtr aggregate_data; + + if constexpr (!no_more_keys) + { + auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + createAggregateStates(aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + assert(aggregate_data != nullptr); + } + else + { + /// Add only if the key already exists. + auto find_result = state.findKey(method.data, i, *aggregates_pool); + if (find_result.isFound()) + aggregate_data = find_result.getMapped(); + else + aggregate_data = overflow_row; + } + + places[i] = aggregate_data; + } + + /// Add values to the aggregate functions. + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + { + if (inst->offsets) + inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + else + inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); + } } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index c8ab2d207a4..b279ebd4038 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -907,6 +907,10 @@ public: size_t max_threads; const size_t min_free_disk_space; + + bool compile_aggregate_expressions; + size_t min_count_to_compile_aggregate_expression; + Params( const Block & src_header_, const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, @@ -916,6 +920,8 @@ public: bool empty_result_for_aggregation_by_empty_set_, VolumePtr tmp_volume_, size_t max_threads_, size_t min_free_disk_space_, + bool compile_aggregate_expressions_, + size_t min_count_to_compile_aggregate_expression_, const Block & intermediate_header_ = {}) : src_header(src_header_), intermediate_header(intermediate_header_), @@ -925,14 +931,16 @@ public: max_bytes_before_external_group_by(max_bytes_before_external_group_by_), empty_result_for_aggregation_by_empty_set(empty_result_for_aggregation_by_empty_set_), tmp_volume(tmp_volume_), max_threads(max_threads_), - min_free_disk_space(min_free_disk_space_) + min_free_disk_space(min_free_disk_space_), + compile_aggregate_expressions(compile_aggregate_expressions_), + min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_) { } /// Only parameters that matter during merge. Params(const Block & intermediate_header_, const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_) - : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0) + : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0) { intermediate_header = intermediate_header_; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 181b60b7bf3..900820fb209 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2038,7 +2038,9 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac settings.empty_result_for_aggregation_by_empty_set, context->getTemporaryVolume(), settings.max_threads, - settings.min_free_disk_space_for_temporary_data); + settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, + settings.min_count_to_compile_aggregate_expression); SortDescription group_by_sort_description; @@ -2140,7 +2142,9 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific settings.empty_result_for_aggregation_by_empty_set, context->getTemporaryVolume(), settings.max_threads, - settings.min_free_disk_space_for_temporary_data); + settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, + settings.min_count_to_compile_aggregate_expression); auto transform_params = std::make_shared(params, true); diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 384b1a4a781..d17ec844c68 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -266,6 +266,191 @@ CHJIT::CompiledModuleInfo compileFunction(CHJIT & jit, const IFunctionBase & fun return compiled_module_info; } +CHJIT::CompiledModuleInfo compileAggregateFunctons(CHJIT & jit, const std::vector & functions, const std::string & result_name) +{ + auto compiled_module_info = jit.compileModule([&](llvm::Module & module) + { + auto & context = module.getContext(); + llvm::IRBuilder<> b (context); + + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); + + auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, result_name, module); + + auto * arguments = aggregate_loop_func_definition->args().begin(); + llvm::Value * rows_count_arg = &*arguments++; + llvm::Value * columns_arg = &*arguments++; + llvm::Value * get_place_function_arg = &*arguments++; + llvm::Value * get_place_function_context_arg = &*arguments++; + + /// Initialize ColumnDataPlaceholder llvm representation of ColumnData + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); + b.SetInsertPoint(entry); + + std::vector columns(functions.size()); + for (size_t i = 0; i < functions.size(); ++i) + { + auto argument_type = functions[i].function->getArgumentTypes()[0]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); + columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + } + + /// Initialize loop + + auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); + auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); + + b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); + + b.SetInsertPoint(loop); + + auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); + counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + + for (auto & col : columns) + { + col.data = b.CreatePHI(col.data_init->getType(), 2); + col.data->addIncoming(col.data_init, entry); + } + + auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); + + for (size_t i = 0; i < functions.size(); ++i) + { + size_t aggregate_function_offset = functions[i].aggregate_data_offset; + const auto * aggregate_function_ptr = functions[i].function; + + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); + + auto column_type = functions[i].function->getArgumentTypes()[0]; + auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); + aggregate_function_ptr->compile(b, aggregation_place_with_offset, column_type, column_data); + } + + /// End of loop + + auto * cur_block = b.GetInsertBlock(); + for (auto & col : columns) + { + col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) + col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + } + + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); + counter_phi->addIncoming(value, loop); + + b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); + + b.SetInsertPoint(end); + b.CreateRetVoid(); + + llvm::errs() << "Module before optimizations \n"; + module.print(llvm::errs(), nullptr); + }); + + return compiled_module_info; +} + +CHJIT::CompiledModuleInfo compileAggregateFunctonsV2(CHJIT & jit, const std::vector & functions, const std::string & result_name) +{ + auto compiled_module_info = jit.compileModule([&](llvm::Module & module) + { + auto & context = module.getContext(); + llvm::IRBuilder<> b (context); + + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + auto * aggregate_data_places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), aggregate_data_places_type }, false); + + auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, result_name, module); + + auto * arguments = aggregate_loop_func_definition->args().begin(); + llvm::Value * rows_count_arg = &*arguments++; + llvm::Value * columns_arg = &*arguments++; + llvm::Value * aggregate_data_places_arg = &*arguments++; + + /// Initialize ColumnDataPlaceholder llvm representation of ColumnData + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); + b.SetInsertPoint(entry); + + std::vector columns(functions.size()); + for (size_t i = 0; i < functions.size(); ++i) + { + auto argument_type = functions[i].function->getArgumentTypes()[0]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); + columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + } + + /// Initialize loop + + auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); + auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); + + b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); + + b.SetInsertPoint(loop); + + auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); + counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + + auto * aggregate_data_place_phi = b.CreatePHI(aggregate_data_places_type, 2); + aggregate_data_place_phi->addIncoming(aggregate_data_places_arg, entry); + + for (auto & col : columns) + { + col.data = b.CreatePHI(col.data_init->getType(), 2); + col.data->addIncoming(col.data_init, entry); + } + + for (size_t i = 0; i < functions.size(); ++i) + { + size_t aggregate_function_offset = functions[i].aggregate_data_offset; + const auto * aggregate_function_ptr = functions[i].function; + + auto * aggregate_data_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), aggregate_data_place_phi); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place, aggregate_function_offset); + + auto column_type = functions[i].function->getArgumentTypes()[0]; + auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); + aggregate_function_ptr->compile(b, aggregation_place_with_offset, column_type, column_data); + } + + /// End of loop + + auto * cur_block = b.GetInsertBlock(); + for (auto & col : columns) + { + col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) + col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + } + + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1), "", true, true); + counter_phi->addIncoming(value, loop); + + aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), loop); + + b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); + + b.SetInsertPoint(end); + b.CreateRetVoid(); + + llvm::errs() << "Module before optimizations \n"; + module.print(llvm::errs(), nullptr); + }); + + return compiled_module_info; +} + } #endif diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index e74d35e7aa8..4c918d54aa3 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -7,6 +7,7 @@ #if USE_EMBEDDED_COMPILER #include +#include #include namespace DB @@ -28,6 +29,7 @@ struct ColumnData ColumnData getColumnData(const IColumn * column); using ColumnDataRowsSize = size_t; + using JITCompiledFunction = void (*)(ColumnDataRowsSize, ColumnData *); /** Compile function to native jit code using CHJIT instance. @@ -41,6 +43,21 @@ using JITCompiledFunction = void (*)(ColumnDataRowsSize, ColumnData *); */ CHJIT::CompiledModuleInfo compileFunction(CHJIT & jit, const IFunctionBase & function); +using GetAggregateDataContext = char *; +using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); +using JITCompiledAggregateFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); + +struct AggregateFunctionToCompile +{ + const IAggregateFunction * function; + size_t aggregate_data_offset; +}; + +CHJIT::CompiledModuleInfo compileAggregateFunctons(CHJIT & jit, const std::vector & functions, const std::string & result_name); + +using JITCompiledAggregateFunctionV2 = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); +CHJIT::CompiledModuleInfo compileAggregateFunctonsV2(CHJIT & jit, const std::vector & functions, const std::string & result_name); + } #endif diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index ae3b533918d..cffedf44823 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -301,6 +301,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( context->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data, + settings.compile_expressions, + settings.min_count_to_compile_aggregate_expression, header_before_aggregation); // The source header is also an intermediate header transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); @@ -329,7 +331,9 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( settings.empty_result_for_aggregation_by_empty_set, context->getTemporaryVolume(), settings.max_threads, - settings.min_free_disk_space_for_temporary_data); + settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, + settings.min_count_to_compile_aggregate_expression); transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); } From 9b71b1040adf985804918916ea4caef88ddd8c43 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 3 Jun 2021 22:20:53 +0300 Subject: [PATCH 624/931] Aggregate functions update compile interface --- base/common/FunctorToStaticMethodAdaptor.h | 4 +- src/AggregateFunctions/AggregateFunctionAvg.h | 31 -- src/AggregateFunctions/AggregateFunctionSum.h | 59 ++- src/AggregateFunctions/IAggregateFunction.h | 17 +- src/Interpreters/Aggregator.cpp | 477 +++++++++++------- src/Interpreters/Aggregator.h | 42 ++ src/Interpreters/ExpressionJIT.cpp | 55 +- src/Interpreters/ExpressionJIT.h | 10 +- src/Interpreters/JIT/CHJIT.cpp | 45 +- src/Interpreters/JIT/CHJIT.h | 26 +- src/Interpreters/JIT/compileFunction.cpp | 389 ++++++++------ src/Interpreters/JIT/compileFunction.h | 37 +- src/Interpreters/examples/jit_example.cpp | 7 +- 13 files changed, 718 insertions(+), 481 deletions(-) diff --git a/base/common/FunctorToStaticMethodAdaptor.h b/base/common/FunctorToStaticMethodAdaptor.h index f0609a0faa2..273c436e9d7 100644 --- a/base/common/FunctorToStaticMethodAdaptor.h +++ b/base/common/FunctorToStaticMethodAdaptor.h @@ -1,9 +1,11 @@ #include +/** Adapt functor to static method where functor passed as context. + * Main use case to convert lambda into function that can be passed into JIT code. + */ template class FunctorToStaticMethodAdaptor : public FunctorToStaticMethodAdaptor { -public: }; template diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index a440325c64b..897306a7d32 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -165,37 +165,6 @@ public: ++this->data(place).denominator; } -#if USE_EMBEDDED_COMPILER - - virtual bool isCompilable() const override - { - using AverageFieldType = AvgFieldType; - return std::is_same_v || std::is_same_v; - } - - virtual void compile(llvm::IRBuilderBase & builder, llvm::Value * aggregate_function_place, const DataTypePtr & value_type, llvm::Value * value) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); - - llvm::Type * numerator_type = b.getInt64Ty(); - llvm::Type * denominator_type = b.getInt64Ty(); - - auto * numerator_value_ptr = b.CreatePointerCast(aggregate_function_place, numerator_type->getPointerTo()); - auto * numerator_value = b.CreateLoad(numerator_type, numerator_value_ptr); - auto * value_cast_to_result = nativeCast(b, value_type, value, numerator_type); - auto * sum_result_value = numerator_value->getType()->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_result) : b.CreateFAdd(numerator_value, value_cast_to_result); - b.CreateStore(sum_result_value, numerator_value_ptr); - - auto * denominator_place_ptr_untyped = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_function_place, 8); - auto * denominator_place_ptr = b.CreatePointerCast(denominator_place_ptr_untyped, denominator_type->getPointerTo()); - auto * denominator_value = b.CreateLoad(denominator_place_ptr, numerator_value_ptr); - auto * increate_denominator_value = b.CreateAdd(denominator_value, llvm::ConstantInt::get(denominator_type, 1)); - b.CreateStore(increate_denominator_value, denominator_place_ptr); - } - -#endif - - String getName() const final { return "avg"; } }; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 609cb18429f..18c78f2e8b5 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -395,20 +395,67 @@ public: #if USE_EMBEDDED_COMPILER - virtual bool isCompilable() const override { return Type == AggregateFunctionTypeSum; } + bool isCompilable() const override + { + if constexpr (Type == AggregateFunctionTypeSumKahan) + return false; - virtual void compile(llvm::IRBuilderBase & builder, llvm::Value * aggregate_function_place, const DataTypePtr & value_type, llvm::Value * value) const override + auto return_type = getReturnType(); + + return canBeNativeType(*return_type); + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * return_native_type = toNativeType(b, removeNullable(getReturnType())); - auto * sum_value_ptr = b.CreatePointerCast(aggregate_function_place, return_native_type->getPointerTo()); - auto * sum_value = b.CreateLoad(return_native_type, sum_value_ptr); - auto * value_cast_to_result = nativeCast(b, value_type, value, return_native_type); + auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * aggregate_sum_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + + b.CreateStore(llvm::ConstantInt::get(return_type, 0), aggregate_sum_ptr); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypePtr & value_type, llvm::Value * value) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, removeNullable(getReturnType())); + + auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + auto * sum_value = b.CreateLoad(return_type, sum_value_ptr); + + auto * value_cast_to_result = nativeCast(b, value_type, value, return_type); auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result); + b.CreateStore(sum_result_value, sum_value_ptr); } + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, removeNullable(getReturnType())); + + auto * sum_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo()); + auto * sum_value_dst = b.CreateLoad(return_type, sum_value_dst_ptr); + + auto * sum_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo()); + auto * sum_value_src = b.CreateLoad(return_type, sum_value_src_ptr); + + auto * sum_return_value = b.CreateAdd(sum_value_dst, sum_value_src); + b.CreateStore(sum_return_value, sum_value_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + + return b.CreateLoad(return_type, sum_value_ptr); + } + #endif private: diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 928d212dad4..dc1e1b234dd 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -255,7 +255,22 @@ public: virtual bool isCompilable() const { return false; } - virtual void compile(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_function_place*/, const DataTypePtr & /*value_type*/, llvm::Value * /*value*/) const + virtual void compileCreate(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + + virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypePtr & /*value_type*/, llvm::Value * /*value*/) const + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + + virtual void compileMerge(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_dst_ptr*/, llvm::Value * /*aggregate_data_src_ptr*/) const + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + + virtual llvm::Value * compileGetResult(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 8549926fc3c..b89cd182c5c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -214,6 +214,38 @@ void Aggregator::Params::explain(JSONBuilder::JSONMap & map) const } } +static CHJIT & getJITInstance() +{ + static CHJIT jit; + return jit; +} + +static std::string dumpAggregateFunction(const IAggregateFunction * function) +{ + std::string function_dump; + + auto return_type_name = function->getReturnType()->getName(); + + function_dump += return_type_name; + function_dump += ' '; + function_dump += function->getName(); + function_dump += '('; + + const auto & argument_types = function->getArgumentTypes(); + for (const auto & argument_type : argument_types) + { + function_dump += argument_type->getName(); + function_dump += ','; + } + + if (!argument_types.empty()) + function_dump.pop_back(); + + function_dump += ')'; + + return function_dump; +} + Aggregator::Aggregator(const Params & params_) : params(params_) { @@ -265,8 +297,70 @@ Aggregator::Aggregator(const Params & params_) HashMethodContext::Settings cache_settings; cache_settings.max_threads = params.max_threads; aggregation_state_cache = AggregatedDataVariants::createCache(method_chosen, cache_settings); + compileAggregateFunctions(); } +void Aggregator::compileAggregateFunctions() +{ + if (!params.compile_aggregate_expressions || + params.overflow_row) + return; + + std::vector functions_to_compile; + size_t aggregate_instructions_size = 0; + std::string functions_dump; + + /// Add values to the aggregate functions. + for (size_t i = 0; i < aggregate_functions.size(); ++i) + { + const auto * function = aggregate_functions[i]; + size_t offset_of_aggregate_function = offsets_of_aggregate_states[i]; + + if (function && function->isCompilable()) + { + AggregateFunctionWithOffset function_to_compile + { + .function = function, + .aggregate_data_offset = offset_of_aggregate_function + }; + + std::string function_dump = dumpAggregateFunction(function); + functions_dump += function_dump; + functions_dump += ' '; + + functions_to_compile.emplace_back(std::move(function_to_compile)); + } + + ++aggregate_instructions_size; + } + + if (functions_to_compile.size() != aggregate_instructions_size) + return; + + CompiledAggregateFunctions compiled_aggregate_functions; + + { + static std::unordered_map aggregation_functions_dump_to_add_compiled; + static std::mutex mtx; + + std::lock_guard lock(mtx); + + auto it = aggregation_functions_dump_to_add_compiled.find(functions_dump); + if (it != aggregation_functions_dump_to_add_compiled.end()) + { + compiled_aggregate_functions = it->second; + } + else + { + LOG_TRACE(log, "Compile expression {}", functions_dump); + + compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_dump); + aggregation_functions_dump_to_add_compiled[functions_dump] = compiled_aggregate_functions; + } + } + + compiled_functions.emplace(std::move(compiled_aggregate_functions)); +} AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() { @@ -480,10 +574,124 @@ void NO_INLINE Aggregator::executeImpl( executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } -static CHJIT & getJITInstance() +template +void NO_INLINE Aggregator::handleAggregationJIT( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const { - static CHJIT jit; - return jit; + std::vector columns_data; + columns_data.reserve(aggregate_functions.size()); + + /// Add values to the aggregate functions. + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + columns_data.emplace_back(getColumnData(inst->batch_arguments[0])); + + auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function; + auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; + + auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr + { + AggregateDataPtr aggregate_data; + + if constexpr (!no_more_keys) + { + auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + create_aggregate_states_function(aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + assert(aggregate_data != nullptr); + } + else + { + /// Add only if the key already exists. + /// Overflow row is disabled for JIT. + auto find_result = state.findKey(method.data, row, *aggregates_pool); + assert(find_result.getMapped() != nullptr); + + aggregate_data = find_result.getMapped(); + } + + return aggregate_data; + }; + + GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; + GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); + + add_into_aggregate_states_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); +} + +template +void NO_INLINE Aggregator::handleAggregationDefault( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions, + AggregateDataPtr overflow_row) const +{ + std::unique_ptr places(new AggregateDataPtr[rows]); + + /// For all rows. + for (size_t i = 0; i < rows; ++i) + { + AggregateDataPtr aggregate_data; + + if constexpr (!no_more_keys) + { + auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + createAggregateStates(aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + assert(aggregate_data != nullptr); + } + else + { + /// Add only if the key already exists. + auto find_result = state.findKey(method.data, i, *aggregates_pool); + if (find_result.isFound()) + aggregate_data = find_result.getMapped(); + else + aggregate_data = overflow_row; + } + + places[i] = aggregate_data; + } + + /// Add values to the aggregate functions. + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + { + if (inst->offsets) + inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + else + inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); + } } template @@ -508,7 +716,7 @@ void NO_INLINE Aggregator::executeImplBatch( return; } - /// Optimization for special case when aggregating by 8bit key. + /// Optimization for special case when aggregating by 8bit key.` if constexpr (!no_more_keys && std::is_same_v) { /// We use another method if there are aggregate functions with -Array combinator. @@ -543,178 +751,10 @@ void NO_INLINE Aggregator::executeImplBatch( } } - /// Generic case. - - auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr - { - AggregateDataPtr aggregate_data; - - if constexpr (!no_more_keys) - { - auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); - - /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. - if (emplace_result.isInserted()) - { - /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. - emplace_result.setMapped(nullptr); - - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - createAggregateStates(aggregate_data); - - emplace_result.setMapped(aggregate_data); - } - else - aggregate_data = emplace_result.getMapped(); - - assert(aggregate_data != nullptr); - } - else - { - /// Add only if the key already exists. - auto find_result = state.findKey(method.data, row, *aggregates_pool); - if (find_result.isFound()) - aggregate_data = find_result.getMapped(); - else - aggregate_data = overflow_row; - } - - // std::cerr << "Row " << row << " returned place " << static_cast(aggregate_data) << std::endl; - return aggregate_data; - }; - - #if USE_EMBEDDED_COMPILER - std::vector columns_data; - std::vector functions_to_compile; - size_t aggregate_instructions_size = 0; - - /// Add values to the aggregate functions. - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - { - const auto * function = inst->that; - if (function && function->isCompilable()) - { - AggregateFunctionToCompile function_to_compile - { - .function = inst->that, - .aggregate_data_offset = inst->state_offset - }; - - columns_data.emplace_back(getColumnData(inst->batch_arguments[0])); - functions_to_compile.emplace_back(std::move(function_to_compile)); - } - - ++aggregate_instructions_size; - } - - if (params.compile_aggregate_expressions && functions_to_compile.size() == aggregate_instructions_size) - { - std::string functions_dump; - - for (const auto & func : functions_to_compile) - { - const auto * function = func.function; - - std::string function_dump; - - auto return_type_name = function->getReturnType()->getName(); - - function_dump += return_type_name; - function_dump += ' '; - function_dump += function->getName(); - function_dump += '('; - - const auto & argument_types = function->getArgumentTypes(); - for (const auto & argument_type : argument_types) - { - function_dump += argument_type->getName(); - function_dump += ','; - } - - if (!argument_types.empty()) - function_dump.pop_back(); - - function_dump += ')'; - - functions_dump += function_dump; - functions_dump += ' '; - } - - static std::unordered_map aggregation_functions_dump_to_compiled_module_info; - CHJIT::CompiledModuleInfo compiled_module; - - auto it = aggregation_functions_dump_to_compiled_module_info.find(functions_dump); - if (it != aggregation_functions_dump_to_compiled_module_info.end()) - { - compiled_module = it->second; - LOG_TRACE(log, "Get compiled aggregate functions {} from cache", functions_dump); - - } - else - { - compiled_module = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_dump); - aggregation_functions_dump_to_compiled_module_info[functions_dump] = compiled_module; - } - - LOG_TRACE(log, "Use compiled expression {}", functions_dump); - - JITCompiledAggregateFunction aggregate_function = reinterpret_cast(getJITInstance().findCompiledFunction(compiled_module, functions_dump)); - GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; - GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); - aggregate_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); - } + if (compiled_functions) + handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); else - #endif - { - std::unique_ptr places(new AggregateDataPtr[rows]); - - /// For all rows. - for (size_t i = 0; i < rows; ++i) - { - AggregateDataPtr aggregate_data; - - if constexpr (!no_more_keys) - { - auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); - - /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. - if (emplace_result.isInserted()) - { - /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. - emplace_result.setMapped(nullptr); - - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - createAggregateStates(aggregate_data); - - emplace_result.setMapped(aggregate_data); - } - else - aggregate_data = emplace_result.getMapped(); - - assert(aggregate_data != nullptr); - } - else - { - /// Add only if the key already exists. - auto find_result = state.findKey(method.data, i, *aggregates_pool); - if (find_result.isFound()) - aggregate_data = find_result.getMapped(); - else - aggregate_data = overflow_row; - } - - places[i] = aggregate_data; - } - - /// Add values to the aggregate functions. - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - { - if (inst->offsets) - inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); - else - inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); - } - } + handleAggregationDefault(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } @@ -1251,11 +1291,38 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto shuffled_key_sizes = method.shuffleKeyColumns(key_columns, key_sizes); const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes; - data.forEachValue([&](const auto & key, auto & mapped) + if (compiled_functions) { - method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); - insertAggregatesIntoColumns(mapped, final_aggregate_columns, arena); - }); + std::unique_ptr places(new AggregateDataPtr[data.size()]); + size_t place_index = 0; + + data.forEachValue([&](const auto & key, auto & mapped) + { + method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); + places[place_index] = mapped; + ++place_index; + }); + + std::vector columns_data; + columns_data.reserve(final_aggregate_columns.size()); + + for (auto & final_aggregate_column : final_aggregate_columns) + { + final_aggregate_column = final_aggregate_column->cloneResized(data.size()); + columns_data.emplace_back(getColumnData(final_aggregate_column.get())); + } + + auto insert_aggregate_states_function = compiled_functions->insert_aggregates_into_columns_function; + insert_aggregate_states_function(data.size(), columns_data.data(), places.get()); + } + else + { + data.forEachValue([&](const auto & key, auto & mapped) + { + method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); + insertAggregatesIntoColumns(mapped, final_aggregate_columns, arena); + }); + } } template @@ -1684,27 +1751,45 @@ void NO_INLINE Aggregator::mergeDataImpl( if constexpr (Method::low_cardinality_optimization) mergeDataNullKey(table_dst, table_src, arena); - table_src.mergeToViaEmplace(table_dst, - [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) + if (compiled_functions) { - if (!inserted) - { - for (size_t i = 0; i < params.aggregates_size; ++i) - aggregate_functions[i]->merge( - dst + offsets_of_aggregate_states[i], - src + offsets_of_aggregate_states[i], - arena); + auto merge_aggregate_states_function_typed = compiled_functions->merge_aggregate_states_function; - for (size_t i = 0; i < params.aggregates_size; ++i) - aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); - } - else + table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) { - dst = src; - } + if (!inserted) + { + merge_aggregate_states_function_typed(dst, src); + } + else + { + dst = src; + } + + src = nullptr; + }); + } + else + { + table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) + { + if (!inserted) + { + for (size_t i = 0; i < params.aggregates_size; ++i) + aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); + + for (size_t i = 0; i < params.aggregates_size; ++i) + aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + } + else + { + dst = src; + } + + src = nullptr; + }); + } - src = nullptr; - }); table_src.clearAndShrink(); } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index b279ebd4038..91065a266b5 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -1082,6 +1083,12 @@ private: /// For external aggregation. TemporaryFiles temporary_files; + std::optional compiled_functions; + + /** Try to compile aggregate functions. + */ + void compileAggregateFunctions(); + /** Select the aggregation method based on the number and types of keys. */ AggregatedDataVariants::Type chooseAggregationMethod(); @@ -1116,6 +1123,41 @@ private: AggregateFunctionInstruction * aggregate_instructions, AggregateDataPtr overflow_row) const; + template + void handleAggregationJIT( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const; + + // template + // void handleAggregationJITV2( + // Method & method, + // typename Method::State & state, + // Arena * aggregates_pool, + // size_t rows, + // AggregateFunctionInstruction * aggregate_instructions, + // AggregateDataPtr overflow_row) const; + + // template + // void handleAggregationJITV3( + // Method & method, + // typename Method::State & state, + // Arena * aggregates_pool, + // size_t rows, + // AggregateFunctionInstruction * aggregate_instructions, + // AggregateDataPtr overflow_row) const; + + template + void handleAggregationDefault( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions, + AggregateDataPtr overflow_row) const; + /// For case when there are no keys (all aggregate into one row). static void executeWithoutKeyImpl( AggregatedDataWithoutKey & res, diff --git a/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp index b9fb1ae89d8..d30bab0e6df 100644 --- a/src/Interpreters/ExpressionJIT.cpp +++ b/src/Interpreters/ExpressionJIT.cpp @@ -42,36 +42,29 @@ static Poco::Logger * getLogger() return &logger; } -class CompiledFunction +class CompiledFunctionHolder { public: - CompiledFunction(void * compiled_function_, CHJIT::CompiledModuleInfo module_info_) + explicit CompiledFunctionHolder(CompiledFunction compiled_function_) : compiled_function(compiled_function_) - , module_info(std::move(module_info_)) {} - void * getCompiledFunction() const { return compiled_function; } - - ~CompiledFunction() + ~CompiledFunctionHolder() { - getJITInstance().deleteCompiledModule(module_info); + getJITInstance().deleteCompiledModule(compiled_function.compiled_module); } -private: - - void * compiled_function; - - CHJIT::CompiledModuleInfo module_info; + CompiledFunction compiled_function; }; class LLVMExecutableFunction : public IExecutableFunction { public: - explicit LLVMExecutableFunction(const std::string & name_, std::shared_ptr compiled_function_) + explicit LLVMExecutableFunction(const std::string & name_, std::shared_ptr compiled_function_holder_) : name(name_) - , compiled_function(compiled_function_) + , compiled_function_holder(compiled_function_holder_) { } @@ -104,8 +97,8 @@ public: columns[arguments.size()] = getColumnData(result_column.get()); - JITCompiledFunction jit_compiled_function_typed = reinterpret_cast(compiled_function->getCompiledFunction()); - jit_compiled_function_typed(input_rows_count, columns.data()); + auto jit_compiled_function = compiled_function_holder->compiled_function.compiled_function; + jit_compiled_function(input_rows_count, columns.data()); #if defined(MEMORY_SANITIZER) /// Memory sanitizer don't know about stores from JIT-ed code. @@ -135,7 +128,7 @@ public: private: std::string name; - std::shared_ptr compiled_function; + std::shared_ptr compiled_function_holder; }; class LLVMFunction : public IFunctionBase @@ -157,9 +150,9 @@ public: } } - void setCompiledFunction(std::shared_ptr compiled_function_) + void setCompiledFunction(std::shared_ptr compiled_function_holder_) { - compiled_function = compiled_function_; + compiled_function_holder = compiled_function_holder_; } bool isCompilable() const override { return true; } @@ -177,10 +170,10 @@ public: ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override { - if (!compiled_function) + if (!compiled_function_holder) throw Exception(ErrorCodes::LOGICAL_ERROR, "Compiled function was not initialized {}", name); - return std::make_unique(name, compiled_function); + return std::make_unique(name, compiled_function_holder); } bool isDeterministic() const override @@ -269,7 +262,7 @@ private: CompileDAG dag; DataTypes argument_types; std::vector nested_functions; - std::shared_ptr compiled_function; + std::shared_ptr compiled_function_holder; }; static FunctionBasePtr compile( @@ -293,22 +286,20 @@ static FunctionBasePtr compile( auto [compiled_function_cache_entry, _] = compilation_cache->getOrSet(hash_key, [&] () { LOG_TRACE(getLogger(), "Compile expression {}", llvm_function->getName()); - CHJIT::CompiledModuleInfo compiled_module_info = compileFunction(getJITInstance(), *llvm_function); - auto * compiled_jit_function = getJITInstance().findCompiledFunction(compiled_module_info, llvm_function->getName()); - auto compiled_function = std::make_shared(compiled_jit_function, compiled_module_info); + auto compiled_function = compileFunction(getJITInstance(), *llvm_function); + auto compiled_function_holder = std::make_shared(compiled_function); - return std::make_shared(std::move(compiled_function), compiled_module_info.size); + return std::make_shared(std::move(compiled_function_holder), compiled_function.compiled_module.size); }); - llvm_function->setCompiledFunction(compiled_function_cache_entry->getCompiledFunction()); + llvm_function->setCompiledFunction(compiled_function_cache_entry->getCompiledFunctionHolder()); } else { - LOG_TRACE(getLogger(), "Compile expression {}", llvm_function->getName()); - CHJIT::CompiledModuleInfo compiled_module_info = compileFunction(getJITInstance(), *llvm_function); - auto * compiled_jit_function = getJITInstance().findCompiledFunction(compiled_module_info, llvm_function->getName()); - auto compiled_function = std::make_shared(compiled_jit_function, compiled_module_info); - llvm_function->setCompiledFunction(compiled_function); + auto compiled_function = compileFunction(getJITInstance(), *llvm_function); + auto compiled_function_ptr = std::make_shared(compiled_function); + + llvm_function->setCompiledFunction(compiled_function_ptr); } return llvm_function; diff --git a/src/Interpreters/ExpressionJIT.h b/src/Interpreters/ExpressionJIT.h index 6b39acae799..4f724d2edf3 100644 --- a/src/Interpreters/ExpressionJIT.h +++ b/src/Interpreters/ExpressionJIT.h @@ -11,22 +11,22 @@ namespace DB { -class CompiledFunction; +class CompiledFunctionHolder; class CompiledFunctionCacheEntry { public: - CompiledFunctionCacheEntry(std::shared_ptr compiled_function_, size_t compiled_function_size_) - : compiled_function(std::move(compiled_function_)) + CompiledFunctionCacheEntry(std::shared_ptr compiled_function_holder_, size_t compiled_function_size_) + : compiled_function_holder(std::move(compiled_function_holder_)) , compiled_function_size(compiled_function_size_) {} - std::shared_ptr getCompiledFunction() const { return compiled_function; } + std::shared_ptr getCompiledFunctionHolder() const { return compiled_function_holder; } size_t getCompiledFunctionSize() const { return compiled_function_size; } private: - std::shared_ptr compiled_function; + std::shared_ptr compiled_function_holder; size_t compiled_function_size; }; diff --git a/src/Interpreters/JIT/CHJIT.cpp b/src/Interpreters/JIT/CHJIT.cpp index 72a2ede5853..55dfc0b2e62 100644 --- a/src/Interpreters/JIT/CHJIT.cpp +++ b/src/Interpreters/JIT/CHJIT.cpp @@ -189,7 +189,7 @@ CHJIT::CHJIT() CHJIT::~CHJIT() = default; -CHJIT::CompiledModuleInfo CHJIT::compileModule(std::function compile_function) +CHJIT::CompiledModule CHJIT::compileModule(std::function compile_function) { std::lock_guard lock(jit_lock); @@ -210,12 +210,15 @@ std::unique_ptr CHJIT::createModuleForCompilation() return module; } -CHJIT::CompiledModuleInfo CHJIT::compileModule(std::unique_ptr module) +CHJIT::CompiledModule CHJIT::compileModule(std::unique_ptr module) { runOptimizationPassesOnModule(*module); auto buffer = compiler->compile(*module); + // llvm::errs() << "Module after optimizations " << "\n"; + // module->print(llvm::errs(), nullptr); + llvm::Expected> object = llvm::object::ObjectFile::createObjectFile(*buffer); if (!object) @@ -234,7 +237,7 @@ CHJIT::CompiledModuleInfo CHJIT::compileModule(std::unique_ptr mod dynamic_linker.resolveRelocations(); module_memory_manager->getManager().finalizeMemory(); - CompiledModuleInfo module_info; + CompiledModule compiled_module; for (const auto & function : *module) { @@ -250,47 +253,29 @@ CHJIT::CompiledModuleInfo CHJIT::compileModule(std::unique_ptr mod throw Exception(ErrorCodes::CANNOT_COMPILE_CODE, "DynamicLinker could not found symbol {} after compilation", function_name); auto * jit_symbol_address = reinterpret_cast(jit_symbol.getAddress()); - - std::string symbol_name = std::to_string(current_module_key) + '_' + function_name; - name_to_symbol[symbol_name] = jit_symbol_address; - module_info.compiled_functions.emplace_back(std::move(function_name)); + compiled_module.function_name_to_symbol.emplace(std::move(function_name), jit_symbol_address); } - module_info.size = module_memory_manager->getAllocatedSize(); - module_info.identifier = current_module_key; + compiled_module.size = module_memory_manager->getAllocatedSize(); + compiled_module.identifier = current_module_key; module_identifier_to_memory_manager[current_module_key] = std::move(module_memory_manager); - compiled_code_size.fetch_add(module_info.size, std::memory_order_relaxed); + compiled_code_size.fetch_add(compiled_module.size, std::memory_order_relaxed); - return module_info; + return compiled_module; } -void CHJIT::deleteCompiledModule(const CHJIT::CompiledModuleInfo & module_info) +void CHJIT::deleteCompiledModule(const CHJIT::CompiledModule & module) { std::lock_guard lock(jit_lock); - auto module_it = module_identifier_to_memory_manager.find(module_info.identifier); + auto module_it = module_identifier_to_memory_manager.find(module.identifier); if (module_it == module_identifier_to_memory_manager.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no compiled module with identifier {}", module_info.identifier); - - for (const auto & function : module_info.compiled_functions) - name_to_symbol.erase(function); + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no compiled module with identifier {}", module.identifier); module_identifier_to_memory_manager.erase(module_it); - compiled_code_size.fetch_sub(module_info.size, std::memory_order_relaxed); -} - -void * CHJIT::findCompiledFunction(const CompiledModuleInfo & module_info, const std::string & function_name) const -{ - std::lock_guard lock(jit_lock); - - std::string symbol_name = std::to_string(module_info.identifier) + '_' + function_name; - auto it = name_to_symbol.find(symbol_name); - if (it != name_to_symbol.end()) - return it->second; - - return nullptr; + compiled_code_size.fetch_sub(module.size, std::memory_order_relaxed); } void CHJIT::registerExternalSymbol(const std::string & symbol_name, void * address) diff --git a/src/Interpreters/JIT/CHJIT.h b/src/Interpreters/JIT/CHJIT.h index 45a8aef50af..3e53f83b92d 100644 --- a/src/Interpreters/JIT/CHJIT.h +++ b/src/Interpreters/JIT/CHJIT.h @@ -52,32 +52,31 @@ public: ~CHJIT(); - struct CompiledModuleInfo + struct CompiledModule { /// Size of compiled module code in bytes size_t size; + /// Module identifier. Should not be changed by client uint64_t identifier; - /// Vector of compiled function nameds. Should not be changed by client - std::vector compiled_functions; + + /// Vector of compiled functions. Should not be changed by client. + /// It is client responsibility to cast result function to right signature. + /// After call to deleteCompiledModule compiled functions from module become invalid. + std::unordered_map function_name_to_symbol; + }; /** Compile module. In compile function client responsibility is to fill module with necessary * IR code, then it will be compiled by CHJIT instance. - * Return compiled module info. + * Return compiled module. */ - CompiledModuleInfo compileModule(std::function compile_function); + CompiledModule compileModule(std::function compile_function); /** Delete compiled module. Pointers to functions from module become invalid after this call. * It is client responsibility to be sure that there are no pointers to compiled module code. */ - void deleteCompiledModule(const CompiledModuleInfo & module_info); - - /** Find compiled function using module_info, and function_name. - * It is client responsibility to case result function to right signature. - * After call to deleteCompiledModule compiled functions from module become invalid. - */ - void * findCompiledFunction(const CompiledModuleInfo & module_info, const std::string & function_name) const; + void deleteCompiledModule(const CompiledModule & module_info); /** Register external symbol for CHJIT instance to use, during linking. * It can be function, or global constant. @@ -93,7 +92,7 @@ private: std::unique_ptr createModuleForCompilation(); - CompiledModuleInfo compileModule(std::unique_ptr module); + CompiledModule compileModule(std::unique_ptr module); std::string getMangledName(const std::string & name_to_mangle) const; @@ -107,7 +106,6 @@ private: std::unique_ptr compiler; std::unique_ptr symbol_resolver; - std::unordered_map name_to_symbol; std::unordered_map> module_identifier_to_memory_manager; uint64_t current_module_key = 0; std::atomic compiled_code_size = 0; diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index d17ec844c68..ea3f7c646f1 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -250,205 +250,288 @@ static void compileFunction(llvm::Module & module, const IFunctionBase & functio b.CreateRetVoid(); } -CHJIT::CompiledModuleInfo compileFunction(CHJIT & jit, const IFunctionBase & function) +CompiledFunction compileFunction(CHJIT & jit, const IFunctionBase & function) { Stopwatch watch; - auto compiled_module_info = jit.compileModule([&](llvm::Module & module) + auto compiled_module = jit.compileModule([&](llvm::Module & module) { compileFunction(module, function); }); ProfileEvents::increment(ProfileEvents::CompileExpressionsMicroseconds, watch.elapsedMicroseconds()); - ProfileEvents::increment(ProfileEvents::CompileExpressionsBytes, compiled_module_info.size); + ProfileEvents::increment(ProfileEvents::CompileExpressionsBytes, compiled_module.size); ProfileEvents::increment(ProfileEvents::CompileFunction); - return compiled_module_info; + auto compiled_function_ptr = reinterpret_cast(compiled_module.function_name_to_symbol[function.getName()]); + assert(compiled_function_ptr); + + CompiledFunction result_compiled_function + { + .compiled_function = compiled_function_ptr, + .compiled_module = compiled_module + }; + + return result_compiled_function; } -CHJIT::CompiledModuleInfo compileAggregateFunctons(CHJIT & jit, const std::vector & functions, const std::string & result_name) +static void compileCreateAggregateStatesFunctions(llvm::Module & module, const std::vector & functions, const std::string & name) { - auto compiled_module_info = jit.compileModule([&](llvm::Module & module) + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); + + auto * aggregate_data_places_type = b.getInt8Ty()->getPointerTo(); + auto * create_aggregate_states_function_type = llvm::FunctionType::get(b.getVoidTy(), { aggregate_data_places_type }, false); + auto * create_aggregate_states_function = llvm::Function::Create(create_aggregate_states_function_type, llvm::Function::ExternalLinkage, name, module); + + auto * arguments = create_aggregate_states_function->args().begin(); + llvm::Value * aggregate_data_place_arg = arguments++; + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", create_aggregate_states_function); + b.SetInsertPoint(entry); + + std::vector columns(functions.size()); + for (const auto & function_to_compile : functions) { - auto & context = module.getContext(); - llvm::IRBuilder<> b (context); + size_t aggregate_function_offset = function_to_compile.aggregate_data_offset; + const auto * aggregate_function = function_to_compile.function; + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_arg, aggregate_function_offset); + aggregate_function->compileCreate(b, aggregation_place_with_offset); + } - auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + module.print(llvm::errs(), nullptr); - auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); - auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); - auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); - - auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, result_name, module); - - auto * arguments = aggregate_loop_func_definition->args().begin(); - llvm::Value * rows_count_arg = &*arguments++; - llvm::Value * columns_arg = &*arguments++; - llvm::Value * get_place_function_arg = &*arguments++; - llvm::Value * get_place_function_context_arg = &*arguments++; - - /// Initialize ColumnDataPlaceholder llvm representation of ColumnData - - auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); - b.SetInsertPoint(entry); - - std::vector columns(functions.size()); - for (size_t i = 0; i < functions.size(); ++i) - { - auto argument_type = functions[i].function->getArgumentTypes()[0]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); - columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); - } - - /// Initialize loop - - auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); - auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); - - b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); - - b.SetInsertPoint(loop); - - auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); - counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); - - for (auto & col : columns) - { - col.data = b.CreatePHI(col.data_init->getType(), 2); - col.data->addIncoming(col.data_init, entry); - } - - auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); - - for (size_t i = 0; i < functions.size(); ++i) - { - size_t aggregate_function_offset = functions[i].aggregate_data_offset; - const auto * aggregate_function_ptr = functions[i].function; - - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); - - auto column_type = functions[i].function->getArgumentTypes()[0]; - auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); - aggregate_function_ptr->compile(b, aggregation_place_with_offset, column_type, column_data); - } - - /// End of loop - - auto * cur_block = b.GetInsertBlock(); - for (auto & col : columns) - { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); - if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); - } - - auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); - counter_phi->addIncoming(value, loop); - - b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); - - b.SetInsertPoint(end); - b.CreateRetVoid(); - - llvm::errs() << "Module before optimizations \n"; - module.print(llvm::errs(), nullptr); - }); - - return compiled_module_info; + b.CreateRetVoid(); } -CHJIT::CompiledModuleInfo compileAggregateFunctonsV2(CHJIT & jit, const std::vector & functions, const std::string & result_name) +static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const std::vector & functions, const std::string & name) { - auto compiled_module_info = jit.compileModule([&](llvm::Module & module) + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); + + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); + + auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); + + auto * arguments = aggregate_loop_func_definition->args().begin(); + llvm::Value * rows_count_arg = arguments++; + llvm::Value * columns_arg = arguments++; + llvm::Value * get_place_function_arg = arguments++; + llvm::Value * get_place_function_context_arg = arguments++; + + /// Initialize ColumnDataPlaceholder llvm representation of ColumnData + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); + b.SetInsertPoint(entry); + + std::vector columns(functions.size()); + for (size_t i = 0; i < functions.size(); ++i) { - auto & context = module.getContext(); - llvm::IRBuilder<> b (context); + auto argument_type = functions[i].function->getArgumentTypes()[0]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); + columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + } - auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + /// Initialize loop - auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); - auto * aggregate_data_places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); - auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), aggregate_data_places_type }, false); + auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); + auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); - auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, result_name, module); + b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); - auto * arguments = aggregate_loop_func_definition->args().begin(); - llvm::Value * rows_count_arg = &*arguments++; - llvm::Value * columns_arg = &*arguments++; - llvm::Value * aggregate_data_places_arg = &*arguments++; + b.SetInsertPoint(loop); - /// Initialize ColumnDataPlaceholder llvm representation of ColumnData + auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); + counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); - auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); - b.SetInsertPoint(entry); + for (auto & col : columns) + { + col.data = b.CreatePHI(col.data_init->getType(), 2); + col.data->addIncoming(col.data_init, entry); + } - std::vector columns(functions.size()); - for (size_t i = 0; i < functions.size(); ++i) - { - auto argument_type = functions[i].function->getArgumentTypes()[0]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); - columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); - } + auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); - /// Initialize loop + for (size_t i = 0; i < functions.size(); ++i) + { + size_t aggregate_function_offset = functions[i].aggregate_data_offset; + const auto * aggregate_function_ptr = functions[i].function; - auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); - auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); - b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); + auto column_type = functions[i].function->getArgumentTypes()[0]; + auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); + aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, column_type, column_data); + } - b.SetInsertPoint(loop); + /// End of loop - auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); - counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + auto * cur_block = b.GetInsertBlock(); + for (auto & col : columns) + { + col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) + col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + } - auto * aggregate_data_place_phi = b.CreatePHI(aggregate_data_places_type, 2); - aggregate_data_place_phi->addIncoming(aggregate_data_places_arg, entry); + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); + counter_phi->addIncoming(value, loop); - for (auto & col : columns) - { - col.data = b.CreatePHI(col.data_init->getType(), 2); - col.data->addIncoming(col.data_init, entry); - } + b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); - for (size_t i = 0; i < functions.size(); ++i) - { - size_t aggregate_function_offset = functions[i].aggregate_data_offset; - const auto * aggregate_function_ptr = functions[i].function; + b.SetInsertPoint(end); + b.CreateRetVoid(); +} - auto * aggregate_data_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), aggregate_data_place_phi); - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place, aggregate_function_offset); +static void compileMergeAggregatesStates(llvm::Module & module, const std::vector & functions, const std::string & name) +{ + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); - auto column_type = functions[i].function->getArgumentTypes()[0]; - auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); - aggregate_function_ptr->compile(b, aggregation_place_with_offset, column_type, column_data); - } + auto * aggregate_data_places_type = b.getInt8Ty()->getPointerTo(); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { aggregate_data_places_type, aggregate_data_places_type }, false); + auto * aggregate_loop_func = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); - /// End of loop + auto * arguments = aggregate_loop_func->args().begin(); + llvm::Value * aggregate_data_place_dst_arg = arguments++; + llvm::Value * aggregate_data_place_src_arg = arguments++; - auto * cur_block = b.GetInsertBlock(); - for (auto & col : columns) - { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); - if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); - } + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func); + b.SetInsertPoint(entry); - auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1), "", true, true); - counter_phi->addIncoming(value, loop); + for (const auto & function_to_compile : functions) + { + size_t aggregate_function_offset = function_to_compile.aggregate_data_offset; + const auto * aggregate_function_ptr = function_to_compile.function; - aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), loop); + auto * aggregate_data_place_merge_dst_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_dst_arg, aggregate_function_offset); + auto * aggregate_data_place_merge_src_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_src_arg, aggregate_function_offset); - b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); + aggregate_function_ptr->compileMerge(b, aggregate_data_place_merge_dst_with_offset, aggregate_data_place_merge_src_with_offset); + } - b.SetInsertPoint(end); - b.CreateRetVoid(); + b.CreateRetVoid(); +} - llvm::errs() << "Module before optimizations \n"; - module.print(llvm::errs(), nullptr); +static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, const std::vector & functions, const std::string & name) +{ + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); + + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + auto * aggregate_data_places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), aggregate_data_places_type }, false); + auto * aggregate_loop_func = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); + + auto * arguments = aggregate_loop_func->args().begin(); + llvm::Value * rows_count_arg = &*arguments++; + llvm::Value * columns_arg = &*arguments++; + llvm::Value * aggregate_data_places_arg = &*arguments++; + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func); + b.SetInsertPoint(entry); + + std::vector columns(functions.size()); + for (size_t i = 0; i < functions.size(); ++i) + { + auto return_type = functions[i].function->getReturnType(); + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); + columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(return_type))->getPointerTo()); + } + + auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func); + auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func); + + b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); + + b.SetInsertPoint(loop); + + auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); + counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + + auto * aggregate_data_place_phi = b.CreatePHI(aggregate_data_places_type, 2); + aggregate_data_place_phi->addIncoming(aggregate_data_places_arg, entry); + + for (auto & col : columns) + { + col.data = b.CreatePHI(col.data_init->getType(), 2); + col.data->addIncoming(col.data_init, entry); + } + + for (size_t i = 0; i < functions.size(); ++i) + { + size_t aggregate_function_offset = functions[i].aggregate_data_offset; + const auto * aggregate_function_ptr = functions[i].function; + + auto * aggregate_data_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), aggregate_data_place_phi); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place, aggregate_function_offset); + + auto column_type = functions[i].function->getArgumentTypes()[0]; + auto * final_value = aggregate_function_ptr->compileGetResult(b, aggregation_place_with_offset); + b.CreateStore(final_value, columns[i].data); + } + + /// End of loop + + auto * cur_block = b.GetInsertBlock(); + for (auto & col : columns) + { + col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) + col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + } + + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1), "", true, true); + counter_phi->addIncoming(value, loop); + + aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), loop); + + b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); + + b.SetInsertPoint(end); + b.CreateRetVoid(); +} + +CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vector & functions, std::string functions_dump_name) +{ + std::string create_aggregate_states_functions_name = functions_dump_name + "_create"; + std::string add_aggregate_states_functions_name = functions_dump_name + "_add"; + std::string merge_aggregate_states_functions_name = functions_dump_name + "_merge"; + std::string insert_aggregate_states_functions_name = functions_dump_name + "_insert"; + + auto compiled_module = jit.compileModule([&](llvm::Module & module) + { + compileCreateAggregateStatesFunctions(module, functions, create_aggregate_states_functions_name); + compileAddIntoAggregateStatesFunctions(module, functions, add_aggregate_states_functions_name); + compileMergeAggregatesStates(module, functions, merge_aggregate_states_functions_name); + compileInsertAggregatesIntoResultColumns(module, functions, insert_aggregate_states_functions_name); }); - return compiled_module_info; + auto create_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[create_aggregate_states_functions_name]); + auto add_into_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name]); + auto merge_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[merge_aggregate_states_functions_name]); + auto insert_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[insert_aggregate_states_functions_name]); + + assert(create_aggregate_states_function); + assert(add_into_aggregate_states_function); + assert(merge_aggregate_states_function); + assert(insert_aggregate_states_function); + + CompiledAggregateFunctions compiled_aggregate_functions + { + .create_aggregate_states_function = create_aggregate_states_function, + .add_into_aggregate_states_function = add_into_aggregate_states_function, + .merge_aggregate_states_function = merge_aggregate_states_function, + .insert_aggregates_into_columns_function = insert_aggregate_states_function + }; + + return compiled_aggregate_functions; } } diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 4c918d54aa3..788b614e551 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -32,6 +32,14 @@ using ColumnDataRowsSize = size_t; using JITCompiledFunction = void (*)(ColumnDataRowsSize, ColumnData *); +struct CompiledFunction +{ + + JITCompiledFunction compiled_function; + + CHJIT::CompiledModule compiled_module; +}; + /** Compile function to native jit code using CHJIT instance. * Function is compiled as single module. * After this function execution, code for function will be compiled and can be queried using @@ -41,22 +49,33 @@ using JITCompiledFunction = void (*)(ColumnDataRowsSize, ColumnData *); * It is important that ColumnData parameter of JITCompiledFunction is result column, * and will be filled by compiled function. */ -CHJIT::CompiledModuleInfo compileFunction(CHJIT & jit, const IFunctionBase & function); +CompiledFunction compileFunction(CHJIT & jit, const IFunctionBase & function); -using GetAggregateDataContext = char *; -using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); -using JITCompiledAggregateFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); - -struct AggregateFunctionToCompile +struct AggregateFunctionWithOffset { const IAggregateFunction * function; size_t aggregate_data_offset; }; -CHJIT::CompiledModuleInfo compileAggregateFunctons(CHJIT & jit, const std::vector & functions, const std::string & result_name); +using GetAggregateDataContext = char *; +using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); -using JITCompiledAggregateFunctionV2 = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); -CHJIT::CompiledModuleInfo compileAggregateFunctonsV2(CHJIT & jit, const std::vector & functions, const std::string & result_name); +using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr); +using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); +using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr); +using JITInsertAggregatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); + +struct CompiledAggregateFunctions +{ + JITCreateAggregateStatesFunction create_aggregate_states_function; + JITAddIntoAggregateStatesFunction add_into_aggregate_states_function; + JITMergeAggregateStatesFunction merge_aggregate_states_function; + JITInsertAggregatesIntoColumnsFunction insert_aggregates_into_columns_function; + + CHJIT::CompiledModule compiled_module; +}; + +CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vector & functions, std::string functions_dump_name); } diff --git a/src/Interpreters/examples/jit_example.cpp b/src/Interpreters/examples/jit_example.cpp index 9694314b820..8a4dce3ca1b 100644 --- a/src/Interpreters/examples/jit_example.cpp +++ b/src/Interpreters/examples/jit_example.cpp @@ -18,7 +18,7 @@ int main(int argc, char **argv) jit.registerExternalSymbol("test_function", reinterpret_cast(&test_function)); - auto compiled_module_info = jit.compileModule([](llvm::Module & module) + auto compiled_module = jit.compileModule([](llvm::Module & module) { auto & context = module.getContext(); llvm::IRBuilder<> b (context); @@ -43,13 +43,14 @@ int main(int argc, char **argv) b.CreateRet(value); }); - for (const auto & compiled_function_name : compiled_module_info.compiled_functions) + for (const auto & [compiled_function_name, _] : compiled_module.function_name_to_symbol) { std::cerr << compiled_function_name << std::endl; } int64_t value = 5; - auto * test_name_function = reinterpret_cast(jit.findCompiledFunction(compiled_module_info, "test_name")); + auto * symbol = compiled_module.function_name_to_symbol["test_name"]; + auto * test_name_function = reinterpret_cast(symbol); auto result = test_name_function(&value); std::cerr << "Result " << result << std::endl; From a5ef0067b8da1e13c92fb1595152b94cc159bfe8 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 4 Jun 2021 13:43:11 +0300 Subject: [PATCH 625/931] Compile AggregateFunctionIf --- src/AggregateFunctions/AggregateFunctionIf.h | 78 ++++++++++++++++ src/AggregateFunctions/AggregateFunctionSum.h | 11 ++- src/AggregateFunctions/IAggregateFunction.h | 2 +- src/Interpreters/Aggregator.cpp | 93 ++++++++++++++++++- src/Interpreters/Aggregator.h | 2 + src/Interpreters/JIT/compileFunction.cpp | 54 ++++++++--- src/Interpreters/examples/jit_example.cpp | 17 ++++ 7 files changed, 235 insertions(+), 22 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h index 5ef8e3bc75a..e01ee982bd0 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.h +++ b/src/AggregateFunctions/AggregateFunctionIf.h @@ -5,6 +5,14 @@ #include #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -154,6 +162,76 @@ public: const Array & params, const AggregateFunctionProperties & properties) const override; AggregateFunctionPtr getNestedFunction() const override { return nested_func; } + + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return nested_func->isCompilable(); + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + nested_func->compileCreate(builder, aggregate_data_ptr); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + const auto & predicate_type = arguments_types[argument_values.size() - 1]; + auto * predicate_value = argument_values[argument_values.size() - 1]; + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent()); + auto * if_false = llvm::BasicBlock::Create(head->getContext(), "if_false", head->getParent()); + + auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value); + + b.CreateCondBr(is_predicate_true, if_true, if_false); + + b.SetInsertPoint(if_true); + + size_t arguments_size = arguments_types.size(); + + DataTypes argument_types_without_predicate; + std::vector argument_values_without_predicate; + + argument_types_without_predicate.resize(arguments_size - 1); + argument_values_without_predicate.resize(arguments_size - 1); + + for (size_t i = 0; i < arguments_types.size() - 1; ++i) + { + argument_types_without_predicate[i] = arguments_types[i]; + argument_values_without_predicate[i] = argument_values[i]; + } + + nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate); + + b.CreateBr(join_block); + + b.SetInsertPoint(if_false); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + nested_func->compileMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + return nested_func->compileGetResult(builder, aggregate_data_ptr); + } + +#endif + + }; } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 18c78f2e8b5..06b43d0551d 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -393,7 +393,7 @@ public: column.getData().push_back(this->data(place).get()); } - #if USE_EMBEDDED_COMPILER +#if USE_EMBEDDED_COMPILER bool isCompilable() const override { @@ -415,7 +415,7 @@ public: b.CreateStore(llvm::ConstantInt::get(return_type, 0), aggregate_sum_ptr); } - void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypePtr & value_type, llvm::Value * value) const override + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { llvm::IRBuilder<> & b = static_cast &>(builder); @@ -424,7 +424,10 @@ public: auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); auto * sum_value = b.CreateLoad(return_type, sum_value_ptr); - auto * value_cast_to_result = nativeCast(b, value_type, value, return_type); + const auto & argument_type = arguments_types[0]; + const auto & argument_value = argument_values[0]; + + auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type); auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result); b.CreateStore(sum_result_value, sum_value_ptr); @@ -456,7 +459,7 @@ public: return b.CreateLoad(return_type, sum_value_ptr); } - #endif +#endif private: UInt32 scale; diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index dc1e1b234dd..726ab727a5d 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -260,7 +260,7 @@ public: throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } - virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypePtr & /*value_type*/, llvm::Value * /*value*/) const + virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector & /*arguments_values*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index b89cd182c5c..676d9a984ab 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -214,6 +214,8 @@ void Aggregator::Params::explain(JSONBuilder::JSONMap & map) const } } +#if USE_EMBEDDED_COMPILER + static CHJIT & getJITInstance() { static CHJIT jit; @@ -246,6 +248,8 @@ static std::string dumpAggregateFunction(const IAggregateFunction * function) return function_dump; } +#endif + Aggregator::Aggregator(const Params & params_) : params(params_) { @@ -297,13 +301,18 @@ Aggregator::Aggregator(const Params & params_) HashMethodContext::Settings cache_settings; cache_settings.max_threads = params.max_threads; aggregation_state_cache = AggregatedDataVariants::createCache(method_chosen, cache_settings); + +#if USE_EMBEDDED_COMPILER compileAggregateFunctions(); +#endif + } +#if USE_EMBEDDED_COMPILER + void Aggregator::compileAggregateFunctions() { - if (!params.compile_aggregate_expressions || - params.overflow_row) + if (!params.compile_aggregate_expressions || params.overflow_row) return; std::vector functions_to_compile; @@ -334,7 +343,7 @@ void Aggregator::compileAggregateFunctions() ++aggregate_instructions_size; } - if (functions_to_compile.size() != aggregate_instructions_size) + if (functions_to_compile.empty() || functions_to_compile.size() != aggregate_instructions_size) return; CompiledAggregateFunctions compiled_aggregate_functions; @@ -362,6 +371,8 @@ void Aggregator::compileAggregateFunctions() compiled_functions.emplace(std::move(compiled_aggregate_functions)); } +#endif + AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() { /// If no keys. All aggregating to single row. @@ -574,6 +585,8 @@ void NO_INLINE Aggregator::executeImpl( executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } +#if USE_EMBEDDED_COMPILER + template void NO_INLINE Aggregator::handleAggregationJIT( Method & method, @@ -587,7 +600,11 @@ void NO_INLINE Aggregator::handleAggregationJIT( /// Add values to the aggregate functions. for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - columns_data.emplace_back(getColumnData(inst->batch_arguments[0])); + { + size_t arguments_size = inst->that->getArgumentTypes().size(); + for (size_t i = 0; i < arguments_size; ++i) + columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); + } auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function; auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; @@ -635,6 +652,8 @@ void NO_INLINE Aggregator::handleAggregationJIT( add_into_aggregate_states_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); } +#endif + template void NO_INLINE Aggregator::handleAggregationDefault( Method & method, @@ -751,10 +770,16 @@ void NO_INLINE Aggregator::executeImplBatch( } } +#if USE_EMBEDDED_COMPILER if (compiled_functions) + { handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); + } else +#endif + { handleAggregationDefault(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); + } } @@ -857,6 +882,39 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedDataVariants & result, ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys) { + // std::cerr << "Aggregator::executeOnBlock" << std::endl; + // std::cerr << "Columns " << columns.size() << std::endl; + + // for (const auto & column : columns) + // { + // if (column) + // std::cerr << column->dumpStructure() << "\n"; + // } + + // std::cerr << "Num rows " << num_rows << std::endl; + // std::cerr << "Key columns before " << key_columns.size() << std::endl; + // for (const auto & column : key_columns) + // { + // if (column) + // std::cerr << column->dumpStructure() << "\n"; + // } + + // std::cerr << "Aggregate columns before " << aggregate_columns.size() << std::endl; + // for (size_t i = 0; i < aggregate_columns.size(); ++i) + // { + // const auto & aggregate_function_columns = aggregate_columns[i]; + + // for (const auto & aggregate_function_column : aggregate_function_columns) + // { + // if (aggregate_function_column) + // { + // std::cerr << "Aggregate function column " << static_cast(aggregate_function_column) << std::endl; + // std::cerr << aggregate_function_column->dumpStructure() << "\n"; + // } + // } + // } + // std::cerr << "No more keys " << no_more_keys << std::endl; + /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; @@ -890,6 +948,7 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData } } } + NestedColumnsHolder nested_columns_holder; AggregateFunctionInstructions aggregate_functions_instructions; prepareAggregateInstructions(columns, aggregate_columns, materialized_columns, aggregate_functions_instructions, nested_columns_holder); @@ -901,6 +960,28 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData result.without_key = place; } + // std::cerr << "Key columns after " << key_columns.size() << std::endl; + // for (const auto & column : key_columns) + // { + // if (column) + // std::cerr << column->dumpStructure() << "\n"; + // } + + // std::cerr << "Aggregate columns after " << aggregate_columns.size() << std::endl; + // for (size_t i = 0; i < aggregate_columns.size(); ++i) + // { + // const auto & aggregate_function_columns = aggregate_columns[i]; + + // for (const auto & aggregate_function_column : aggregate_function_columns) + // { + // if (aggregate_function_column) + // { + // std::cerr << "Aggregate function column " << static_cast(aggregate_function_column) << std::endl; + // std::cerr << aggregate_function_column->dumpStructure() << "\n"; + // } + // } + // } + /// We select one of the aggregation methods and call it. /// For the case when there are no keys (all aggregate into one row). @@ -1291,6 +1372,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto shuffled_key_sizes = method.shuffleKeyColumns(key_columns, key_sizes); const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes; +#if USE_EMBEDDED_COMPILER if (compiled_functions) { std::unique_ptr places(new AggregateDataPtr[data.size()]); @@ -1316,6 +1398,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( insert_aggregate_states_function(data.size(), columns_data.data(), places.get()); } else +#endif { data.forEachValue([&](const auto & key, auto & mapped) { @@ -1751,6 +1834,7 @@ void NO_INLINE Aggregator::mergeDataImpl( if constexpr (Method::low_cardinality_optimization) mergeDataNullKey(table_dst, table_src, arena); +#if USE_EMBEDDED_COMPILER if (compiled_functions) { auto merge_aggregate_states_function_typed = compiled_functions->merge_aggregate_states_function; @@ -1770,6 +1854,7 @@ void NO_INLINE Aggregator::mergeDataImpl( }); } else +#endif { table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) { diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 91065a266b5..3a67f9fd9a1 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1083,7 +1083,9 @@ private: /// For external aggregation. TemporaryFiles temporary_files; +#if USE_EMBEDDED_COMPILER std::optional compiled_functions; +#endif /** Try to compile aggregate functions. */ diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index ea3f7c646f1..f79272ac4ff 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -299,8 +299,6 @@ static void compileCreateAggregateStatesFunctions(llvm::Module & module, const s aggregate_function->compileCreate(b, aggregation_place_with_offset); } - module.print(llvm::errs(), nullptr); - b.CreateRetVoid(); } @@ -328,12 +326,29 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); b.SetInsertPoint(entry); - std::vector columns(functions.size()); + std::vector columns; + size_t previous_columns_size = 0; + for (size_t i = 0; i < functions.size(); ++i) { - auto argument_type = functions[i].function->getArgumentTypes()[0]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); - columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + auto argument_types = functions[i].function->getArgumentTypes(); + + ColumnDataPlaceholder data_placeholder; + + std::cerr << "Function " << functions[i].function->getName() << std::endl; + + size_t function_arguments_size = argument_types.size(); + + for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) + { + const auto & argument_type = argument_types[previous_columns_size + column_argument_index]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, column_argument_index)); + std::cerr << "Argument type " << argument_type->getName() << std::endl; + data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + columns.emplace_back(data_placeholder); + } + + previous_columns_size += function_arguments_size; } /// Initialize loop @@ -356,16 +371,28 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); - for (size_t i = 0; i < functions.size(); ++i) + previous_columns_size = 0; + for (const auto & function : functions) { - size_t aggregate_function_offset = functions[i].aggregate_data_offset; - const auto * aggregate_function_ptr = functions[i].function; + size_t aggregate_function_offset = function.aggregate_data_offset; + const auto * aggregate_function_ptr = function.function; + + auto arguments_types = function.function->getArgumentTypes(); + std::vector arguments_values; + + size_t function_arguments_size = arguments_types.size(); + arguments_values.resize(function_arguments_size); + + for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) + { + auto * column_argument_data = columns[previous_columns_size + column_argument_index].data; + arguments_values[column_argument_index] = b.CreateLoad(toNativeType(b, arguments_types[column_argument_index]), column_argument_data); + } auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); + aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, arguments_types, arguments_values); - auto column_type = functions[i].function->getArgumentTypes()[0]; - auto * column_data = b.CreateLoad(toNativeType(b, column_type), columns[i].data); - aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, column_type, column_data); + previous_columns_size += function_arguments_size; } /// End of loop @@ -374,12 +401,13 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const for (auto & col : columns) { col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); } auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); - counter_phi->addIncoming(value, loop); + counter_phi->addIncoming(value, cur_block); b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); diff --git a/src/Interpreters/examples/jit_example.cpp b/src/Interpreters/examples/jit_example.cpp index 8a4dce3ca1b..3fa2a901eac 100644 --- a/src/Interpreters/examples/jit_example.cpp +++ b/src/Interpreters/examples/jit_example.cpp @@ -1,5 +1,11 @@ #include +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif + +#if USE_EMBEDDED_COMPILER + #include #include @@ -56,3 +62,14 @@ int main(int argc, char **argv) return 0; } + +#else + +int main(int argc, char **argv) +{ + (void)(argc); + (void)(argv); + return 0; +} + +#endif From b491e75a5632c2ca720ead46fdcf106fbbd09e2a Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 4 Jun 2021 15:42:46 +0300 Subject: [PATCH 626/931] Fix compile functions data type offset --- src/Interpreters/JIT/CHJIT.cpp | 25 +++++++++++++++++++++--- src/Interpreters/JIT/compileFunction.cpp | 11 ++++------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/JIT/CHJIT.cpp b/src/Interpreters/JIT/CHJIT.cpp index 55dfc0b2e62..c06b4132309 100644 --- a/src/Interpreters/JIT/CHJIT.cpp +++ b/src/Interpreters/JIT/CHJIT.cpp @@ -80,6 +80,28 @@ private: llvm::TargetMachine & target_machine; }; +// class AssemblyPrinter +// { +// public: + +// explicit AssemblyPrinter(llvm::TargetMachine &target_machine_) +// : target_machine(target_machine_) +// { +// } + +// void print(llvm::Module & module) +// { +// llvm::legacy::PassManager pass_manager; +// target_machine.Options.MCOptions.AsmVerbose = true; +// if (target_machine.addPassesToEmitFile(pass_manager, llvm::errs(), nullptr, llvm::CodeGenFileType::CGFT_AssemblyFile)) +// throw Exception(ErrorCodes::CANNOT_COMPILE_CODE, "MachineCode cannot be printed"); + +// pass_manager.run(module); +// } +// private: +// llvm::TargetMachine & target_machine; +// }; + /** MemoryManager for module. * Keep total allocated size during RuntimeDyld linker execution. * Actual compiled code memory is stored in llvm::SectionMemoryManager member, we cannot use ZeroBase optimization here @@ -216,9 +238,6 @@ CHJIT::CompiledModule CHJIT::compileModule(std::unique_ptr module) auto buffer = compiler->compile(*module); - // llvm::errs() << "Module after optimizations " << "\n"; - // module->print(llvm::errs(), nullptr); - llvm::Expected> object = llvm::object::ObjectFile::createObjectFile(*buffer); if (!object) diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index f79272ac4ff..813d8abc7df 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -329,21 +329,18 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const std::vector columns; size_t previous_columns_size = 0; - for (size_t i = 0; i < functions.size(); ++i) + for (const auto & function : functions) { - auto argument_types = functions[i].function->getArgumentTypes(); + auto argument_types = function.function->getArgumentTypes(); ColumnDataPlaceholder data_placeholder; - std::cerr << "Function " << functions[i].function->getName() << std::endl; - size_t function_arguments_size = argument_types.size(); for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) { - const auto & argument_type = argument_types[previous_columns_size + column_argument_index]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, column_argument_index)); - std::cerr << "Argument type " << argument_type->getName() << std::endl; + const auto & argument_type = argument_types[column_argument_index]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, previous_columns_size + column_argument_index)); data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); columns.emplace_back(data_placeholder); } From f88d8ccb1fdd486c42c505a9c54f569c0db82a71 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 4 Jun 2021 17:39:54 +0300 Subject: [PATCH 627/931] Compile AggregateFunctionAvg --- src/AggregateFunctions/AggregateFunctionAvg.h | 105 ++++++++++++++++++ src/DataTypes/Native.h | 71 +++++++++--- 2 files changed, 161 insertions(+), 15 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 897306a7d32..2ac0a0935a8 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -166,5 +166,110 @@ public: } String getName() const final { return "avg"; } + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + bool can_be_compiled = true; + + for (const auto & argument : this->argument_types) + can_be_compiled &= canBeNativeType(*argument); + + return can_be_compiled; + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType>(b); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + + auto * denominator_type = toNativeType(b); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + + b.CreateStore(llvm::Constant::getNullValue(numerator_type), numerator_ptr); + b.CreateStore(llvm::Constant::getNullValue(denominator_type), denominator_ptr); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType>(b); + + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + + const auto & argument_type = arguments_types[0]; + const auto & argument_value = argument_values[0]; + auto * value_cast_to_numerator = nativeCast(b, argument_type, argument_value, numerator_type); + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator); + b.CreateStore(numerator_result_value, numerator_ptr); + + auto * denominator_type = toNativeType(b); + + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + + auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + auto * denominator_value_updated = b.CreateAdd(denominator_value, llvm::ConstantInt::get(denominator_type, 1)); + + b.CreateStore(denominator_value_updated, denominator_ptr); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType>(b); + + auto * numerator_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); + auto * numerator_dst_value = b.CreateLoad(numerator_type, numerator_dst_ptr); + + auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); + auto * numerator_src_value = b.CreateLoad(numerator_type, numerator_src_ptr); + + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_dst_value, numerator_src_value) : b.CreateFAdd(numerator_dst_value, numerator_src_value); + b.CreateStore(numerator_result_value, numerator_dst_ptr); + + auto * denominator_type = toNativeType(b); + + auto * denominator_dst_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, sizeof(AvgFieldType)); + auto * denominator_src_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, sizeof(AvgFieldType)); + + auto * denominator_dst_ptr = b.CreatePointerCast(denominator_dst_offset_ptr, denominator_type->getPointerTo()); + auto * denominator_src_ptr = b.CreatePointerCast(denominator_src_offset_ptr, denominator_type->getPointerTo()); + + auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); + auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); + + auto * denominator_result_value = b.CreateAdd(denominator_src_value, denominator_dst_value); + b.CreateStore(denominator_result_value, denominator_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType>(b); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + + auto * denominator_type = toNativeType(b); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + + auto * double_numerator = nativeCast>(b, numerator_value, b.getDoubleTy()); + auto * double_denominator = nativeCast(b, denominator_value, b.getDoubleTy()); + + return b.CreateFDiv(double_numerator, double_denominator); + } + +#endif + }; } diff --git a/src/DataTypes/Native.h b/src/DataTypes/Native.h index 0a13ce83590..a62c73fa352 100644 --- a/src/DataTypes/Native.h +++ b/src/DataTypes/Native.h @@ -61,6 +61,25 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDa return nullptr; } +template +static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder) +{ + if constexpr (std::is_same_v || std::is_same_v) + return builder.getInt8Ty(); + else if constexpr (std::is_same_v || std::is_same_v) + return builder.getInt16Ty(); + else if constexpr (std::is_same_v || std::is_same_v) + return builder.getInt32Ty(); + else if constexpr (std::is_same_v || std::is_same_v) + return builder.getInt64Ty(); + else if constexpr (std::is_same_v) + return builder.getFloatTy(); + else if constexpr (std::is_same_v) + return builder.getDoubleTy(); + + return nullptr; +} + static inline bool canBeNativeType(const IDataType & type) { WhichDataType data_type(type); @@ -79,40 +98,62 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const Dat return toNativeType(builder, *type); } -static inline llvm::Value * nativeBoolCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value) +static inline llvm::Value * nativeBoolCast(llvm::IRBuilder<> & b, const DataTypePtr & from_type, llvm::Value * value) { - if (from->isNullable()) + if (from_type->isNullable()) { - auto * inner = nativeBoolCast(b, removeNullable(from), b.CreateExtractValue(value, {0})); + auto * inner = nativeBoolCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0})); return b.CreateAnd(b.CreateNot(b.CreateExtractValue(value, {1})), inner); } auto * zero = llvm::Constant::getNullValue(value->getType()); + if (value->getType()->isIntegerTy()) return b.CreateICmpNE(value, zero); if (value->getType()->isFloatingPointTy()) return b.CreateFCmpONE(value, zero); /// QNaN is false - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from->getName()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from_type->getName()); } -static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, llvm::Type * to) +static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, llvm::Type * to_type) { - auto * n_from = value->getType(); + auto * from_type = value->getType(); - if (n_from == to) + if (from_type == to_type) return value; - else if (n_from->isIntegerTy() && to->isFloatingPointTy()) - return typeIsSigned(*from) ? b.CreateSIToFP(value, to) : b.CreateUIToFP(value, to); - else if (n_from->isFloatingPointTy() && to->isIntegerTy()) - return typeIsSigned(*from) ? b.CreateFPToSI(value, to) : b.CreateFPToUI(value, to); - else if (n_from->isIntegerTy() && to->isIntegerTy()) - return b.CreateIntCast(value, to, typeIsSigned(*from)); - else if (n_from->isFloatingPointTy() && to->isFloatingPointTy()) - return b.CreateFPCast(value, to); + else if (from_type->isIntegerTy() && to_type->isFloatingPointTy()) + return typeIsSigned(*from) ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type); + else if (from_type->isFloatingPointTy() && to_type->isIntegerTy()) + return typeIsSigned(*from) ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type); + else if (from_type->isIntegerTy() && to_type->isIntegerTy()) + return b.CreateIntCast(value, to_type, typeIsSigned(*from)); + else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy()) + return b.CreateFPCast(value, to_type); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", from->getName()); } +template +static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, llvm::Value * value, llvm::Type * to_type) +{ + auto * from_type = value->getType(); + + static constexpr bool from_type_is_signed = std::numeric_limits::is_signed; + + if (from_type == to_type) + return value; + else if (from_type->isIntegerTy() && to_type->isFloatingPointTy()) + return from_type_is_signed ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type); + else if (from_type->isFloatingPointTy() && to_type->isIntegerTy()) + return from_type_is_signed ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type); + else if (from_type->isIntegerTy() && to_type->isIntegerTy()) + return b.CreateIntCast(value, to_type, from_type_is_signed); + else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy()) + return b.CreateFPCast(value, to_type); + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", TypeName); +} + static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, const DataTypePtr & to) { auto * n_to = toNativeType(b, to); From f93da5ed2bbe19acba4e0d4bccca7ba1ccb9b6d3 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 18:10:16 +0300 Subject: [PATCH 628/931] Compile AggregateFunctionNullUnary --- src/AggregateFunctions/AggregateFunctionIf.h | 8 +- .../AggregateFunctionNull.h | 128 ++++++++++++++++++ src/Interpreters/JIT/compileFunction.cpp | 46 ++++++- 3 files changed, 174 insertions(+), 8 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h index e01ee982bd0..153c80e87b2 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.h +++ b/src/AggregateFunctions/AggregateFunctionIf.h @@ -195,15 +195,15 @@ public: b.SetInsertPoint(if_true); - size_t arguments_size = arguments_types.size(); + size_t arguments_size_without_predicate = arguments_types.size() - 1; DataTypes argument_types_without_predicate; std::vector argument_values_without_predicate; - argument_types_without_predicate.resize(arguments_size - 1); - argument_values_without_predicate.resize(arguments_size - 1); + argument_types_without_predicate.resize(arguments_size_without_predicate); + argument_values_without_predicate.resize(arguments_size_without_predicate); - for (size_t i = 0; i < arguments_types.size() - 1; ++i) + for (size_t i = 0; i < arguments_size_without_predicate; ++i) { argument_types_without_predicate[i] = arguments_types[i]; argument_values_without_predicate[i] = argument_values[i]; diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index a0e36d1bc3d..db262506066 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -6,9 +6,18 @@ #include #include #include +#include #include #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -226,6 +235,125 @@ public: if (!memoryIsByte(null_map, batch_size, 1)) this->setFlag(place); } + + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return this->nested_function->isCompilable(); + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + if constexpr (result_is_nullable) + { + auto alignemnt = llvm::assumeAligned(this->alignOfData()); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, alignemnt); + } + + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileCreate(b, aggregate_data_ptr_with_prefix_size_offset); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + + llvm::IRBuilder<> & b = static_cast &>(builder); + + const auto & nullable_type = arguments_types[0]; + const auto & nullable_value = argument_values[0]; + + auto * wrapped_value = b.CreateExtractValue(nullable_value, {0}); + auto * is_null_value = b.CreateExtractValue(nullable_value, {1}); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); + auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); + + b.CreateCondBr(is_null_value, if_null, if_not_null); + + b.SetInsertPoint(if_null); + b.CreateBr(join_block); + + b.SetInsertPoint(if_not_null); + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + if constexpr (result_is_nullable) + { + auto alignment = llvm::assumeAligned(this->alignOfData()); + b.CreateMemCpy(aggregate_data_dst_ptr, alignment, aggregate_data_src_ptr, alignment, this->prefix_size); + } + + auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, this->prefix_size); + auto * aggregate_data_src_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, this->prefix_size); + + this->nested_function->compileMerge(b, aggregate_data_dst_ptr_with_prefix_size_offset, aggregate_data_src_ptr_with_prefix_size_offset); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, this->getReturnType()); + + llvm::Value * result = nullptr; + + if constexpr (result_is_nullable) + { + auto * place = b.CreateLoad(b.getInt8Ty(), aggregate_data_ptr); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); + auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); + + auto * nullable_value_ptr = b.CreateAlloca(return_type); + b.CreateStore(llvm::ConstantInt::getNullValue(return_type), nullable_value_ptr); + auto * nullable_value = b.CreateLoad(return_type, nullable_value_ptr); + + b.CreateCondBr(nativeBoolCast(b, std::make_shared(), place), if_not_null, if_null); + + b.SetInsertPoint(if_null); + b.CreateStore(b.CreateInsertValue(nullable_value, b.getInt1(true), {1}), nullable_value_ptr); + b.CreateBr(join_block); + + b.SetInsertPoint(if_not_null); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * nested_result = this->nested_function->compileGetResult(builder, aggregate_data_ptr_with_prefix_size_offset); + b.CreateStore(b.CreateInsertValue(nullable_value, nested_result, {0}), nullable_value_ptr); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + + result = b.CreateLoad(return_type, nullable_value_ptr); + } + else + { + result = this->nested_function->compileGetResult(b, aggregate_data_ptr); + } + + return result; + } + +#endif + }; diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 813d8abc7df..8c033c8a5e6 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -342,6 +342,7 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const const auto & argument_type = argument_types[column_argument_index]; auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, previous_columns_size + column_argument_index)); data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + data_placeholder.null_init = argument_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; columns.emplace_back(data_placeholder); } @@ -364,6 +365,12 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const { col.data = b.CreatePHI(col.data_init->getType(), 2); col.data->addIncoming(col.data_init, entry); + + if (col.null_init) + { + col.null = b.CreatePHI(col.null_init->getType(), 2); + col.null->addIncoming(col.null_init, entry); + } } auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); @@ -383,7 +390,21 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) { auto * column_argument_data = columns[previous_columns_size + column_argument_index].data; - arguments_values[column_argument_index] = b.CreateLoad(toNativeType(b, arguments_types[column_argument_index]), column_argument_data); + auto * column_argument_null_data = columns[previous_columns_size + column_argument_index].null; + + auto & argument_type = arguments_types[column_argument_index]; + + auto * value = b.CreateLoad(toNativeType(b, removeNullable(argument_type)), column_argument_data); + if (!argument_type->isNullable()) + { + arguments_values[column_argument_index] = value; + continue; + } + + auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), column_argument_null_data), b.getInt8(0)); + auto * nullable_unitilized = llvm::Constant::getNullValue(toNativeType(b, argument_type)); + auto * nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, value, {0}), is_null, {1}); + arguments_values[column_argument_index] = nullable_value; } auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); @@ -468,6 +489,7 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons auto return_type = functions[i].function->getReturnType(); auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(return_type))->getPointerTo()); + columns[i].null_init = return_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; } auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func); @@ -487,6 +509,12 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons { col.data = b.CreatePHI(col.data_init->getType(), 2); col.data->addIncoming(col.data_init, entry); + + if (col.null_init) + { + col.null = b.CreatePHI(col.null_init->getType(), 2); + col.null->addIncoming(col.null_init, entry); + } } for (size_t i = 0; i < functions.size(); ++i) @@ -499,7 +527,16 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons auto column_type = functions[i].function->getArgumentTypes()[0]; auto * final_value = aggregate_function_ptr->compileGetResult(b, aggregation_place_with_offset); - b.CreateStore(final_value, columns[i].data); + + if (columns[i].null_init) + { + b.CreateStore(b.CreateExtractValue(final_value, {0}), columns[i].data); + b.CreateStore(b.CreateSelect(b.CreateExtractValue(final_value, {1}), b.getInt8(1), b.getInt8(0)), columns[i].null); + } + else + { + b.CreateStore(final_value, columns[i].data); + } } /// End of loop @@ -508,14 +545,15 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons for (auto & col : columns) { col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + if (col.null) col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); } auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1), "", true, true); - counter_phi->addIncoming(value, loop); + counter_phi->addIncoming(value, cur_block); - aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), loop); + aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), cur_block); b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); From 905f48ccce454f57f7ebed13c2e237b1a107fd08 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 19:56:50 +0300 Subject: [PATCH 629/931] Compile AggregateFunctionAny --- .../AggregateFunctionMinMaxAny.h | 209 ++++++++++++++++++ .../AggregateFunctionNull.h | 4 +- 2 files changed, 211 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index d4cb23e75e6..dfa553ff8d8 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -7,11 +7,20 @@ #include #include #include +#include #include #include #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -177,6 +186,93 @@ public: { return false; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = true; + + static void compileChange(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo()); + b.CreateStore(b.getInt1(true), has_value_ptr); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(builder); + auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); + auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); + b.CreateStore(value_to_check, value_ptr); + } + + static void compileChangeMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(b); + auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); + auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); + auto * value_src = b.CreateLoad(type, value_src_ptr); + + compileChange(builder, aggregate_data_dst_ptr, value_src); + } + + static void compileChangeFirstTime(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_value = b.CreateLoad(b.getInt1Ty(), has_value_ptr); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_has = llvm::BasicBlock::Create(head->getContext(), "if_has", head->getParent()); + auto * if_has_not = llvm::BasicBlock::Create(head->getContext(), "if_has_not", head->getParent()); + + b.CreateCondBr(has_value_value, if_has, if_has_not); + + b.SetInsertPoint(if_has); + b.CreateBr(join_block); + + b.SetInsertPoint(if_has_not); + compileChange(builder, aggregate_data_ptr, value_to_check); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + } + + static void compileChangeFirstTimeMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(b); + auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); + auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); + auto * value_src = b.CreateLoad(type, value_src_ptr); + + compileChangeFirstTime(builder, aggregate_data_dst_ptr, value_src); + } + + static llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(builder); + auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); + auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); + return b.CreateLoad(type, value_ptr); + } + +#endif + }; @@ -400,6 +496,13 @@ public: { return true; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif + }; static_assert( @@ -576,6 +679,13 @@ public: { return false; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif + }; @@ -593,6 +703,12 @@ struct AggregateFunctionMinData : Data bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfLess(to, arena); } static const char * name() { return "min"; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif }; template @@ -604,6 +720,12 @@ struct AggregateFunctionMaxData : Data bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfGreater(to, arena); } static const char * name() { return "max"; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif }; template @@ -615,6 +737,22 @@ struct AggregateFunctionAnyData : Data bool changeIfBetter(const Self & to, Arena * arena) { return this->changeFirstTime(to, arena); } static const char * name() { return "any"; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = Data::is_compilable; + + static void compileChangeIfBetter(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + Data::compileChangeFirstTime(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfBetterMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + Data::compileChangeFirstTimeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } + +#endif }; template @@ -626,6 +764,12 @@ struct AggregateFunctionAnyLastData : Data bool changeIfBetter(const Self & to, Arena * arena) { return this->changeEveryTime(to, arena); } static const char * name() { return "anyLast"; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif }; @@ -693,6 +837,13 @@ struct AggregateFunctionAnyHeavyData : Data } static const char * name() { return "anyHeavy"; } + +#if USE_EMBEDDED_COMPILER + + static constexpr bool is_compilable = false; + +#endif + }; @@ -725,6 +876,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override { + std::cerr << "AggregateFunctionSingleValue::add sizeof data " << this->sizeOfData() << " align of data " << this->alignOfData() << std::endl; this->data(place).changeIfBetter(*columns[0], row_num, arena); } @@ -752,6 +904,63 @@ public: { this->data(place).insertResultInto(to); } + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + if constexpr (!Data::is_compilable) + return false; + + return canBeNativeType(*type); + } + + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto alignment = llvm::assumeAligned(this->alignOfData()); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), alignment); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector & argument_values) const override + { + if constexpr (Data::is_compilable) + { + Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]); + } + else + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + if constexpr (Data::is_compilable) + { + Data::compileChangeIfBetterMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } + else + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + if constexpr (Data::is_compilable) + { + return Data::compileGetResult(builder, aggregate_data_ptr); + } + else + { + throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + } + } + +#endif }; } diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index db262506066..9cccb5b3ce9 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -250,8 +250,8 @@ public: if constexpr (result_is_nullable) { - auto alignemnt = llvm::assumeAligned(this->alignOfData()); - b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, alignemnt); + auto alignment = llvm::assumeAligned(this->alignOfData()); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, alignment); } auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); From fdfb17370d011c57497966f1dcad65952db1da04 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 20:03:47 +0300 Subject: [PATCH 630/931] Compile AggregateFunctionAnyLast --- .../AggregateFunctionMinMaxAny.h | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index dfa553ff8d8..d205a326b4f 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -259,6 +259,25 @@ public: compileChangeFirstTime(builder, aggregate_data_dst_ptr, value_src); } + static void compileChangeEveryTime(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + compileChange(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeEveryTimeMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(b); + auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); + auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); + auto * value_src = b.CreateLoad(type, value_src_ptr); + + compileChangeEveryTime(builder, aggregate_data_dst_ptr, value_src); + } + static llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) { llvm::IRBuilder<> & b = static_cast &>(builder); @@ -767,7 +786,17 @@ struct AggregateFunctionAnyLastData : Data #if USE_EMBEDDED_COMPILER - static constexpr bool is_compilable = false; + static constexpr bool is_compilable = Data::is_compilable; + + static void compileChangeIfBetter(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + Data::compileChangeEveryTime(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfBetterMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + Data::compileChangeEveryTimeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } #endif }; From 88d536ea5cf36dd15b207afc01bbb9510c042cfa Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 21:54:50 +0300 Subject: [PATCH 631/931] Update compilation for AggregateFunctionAny, AggregateFunctionAnyLast --- .../AggregateFunctionMinMaxAny.h | 106 +++++++++++------- 1 file changed, 68 insertions(+), 38 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index d205a326b4f..73d05f08031 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -191,6 +191,29 @@ public: static constexpr bool is_compilable = true; + static llvm::Value * getValuePtrFromAggregateDataPtr(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + + auto * type = toNativeType(builder); + auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); + auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); + + return value_ptr; + } + + static llvm::Value * getValueFromAggregateDataPtr(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * type = toNativeType(builder); + auto * value_ptr = getValuePtrFromAggregateDataPtr(builder, aggregate_data_ptr); + + return b.CreateLoad(type, value_ptr); + } + static void compileChange(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) { llvm::IRBuilder<> & b = static_cast &>(builder); @@ -198,24 +221,13 @@ public: auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo()); b.CreateStore(b.getInt1(true), has_value_ptr); - static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); - - auto * type = toNativeType(builder); - auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); - auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); + auto * value_ptr = getValuePtrFromAggregateDataPtr(b, aggregate_data_ptr); b.CreateStore(value_to_check, value_ptr); } static void compileChangeMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) { - llvm::IRBuilder<> & b = static_cast &>(builder); - - static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); - - auto * type = toNativeType(b); - auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); - auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); - auto * value_src = b.CreateLoad(type, value_src_ptr); + auto * value_src = getValueFromAggregateDataPtr(builder, aggregate_data_src_ptr); compileChange(builder, aggregate_data_dst_ptr, value_src); } @@ -230,15 +242,15 @@ public: auto * head = b.GetInsertBlock(); auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); - auto * if_has = llvm::BasicBlock::Create(head->getContext(), "if_has", head->getParent()); - auto * if_has_not = llvm::BasicBlock::Create(head->getContext(), "if_has_not", head->getParent()); + auto * if_should_change = llvm::BasicBlock::Create(head->getContext(), "if_should_change", head->getParent()); + auto * if_should_not_change = llvm::BasicBlock::Create(head->getContext(), "if_should_not_change", head->getParent()); - b.CreateCondBr(has_value_value, if_has, if_has_not); + b.CreateCondBr(has_value_value, if_should_not_change, if_should_change); - b.SetInsertPoint(if_has); + b.SetInsertPoint(if_should_not_change); b.CreateBr(join_block); - b.SetInsertPoint(if_has_not); + b.SetInsertPoint(if_should_change); compileChange(builder, aggregate_data_ptr, value_to_check); b.CreateBr(join_block); @@ -249,14 +261,28 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + auto * has_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_dst = b.CreateLoad(b.getInt1Ty(), has_value_dst_ptr); - auto * type = toNativeType(b); - auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); - auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); - auto * value_src = b.CreateLoad(type, value_src_ptr); + auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr); - compileChangeFirstTime(builder, aggregate_data_dst_ptr, value_src); + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_should_change = llvm::BasicBlock::Create(head->getContext(), "if_should_change", head->getParent()); + auto * if_should_not_change = llvm::BasicBlock::Create(head->getContext(), "if_should_not_change", head->getParent()); + + b.CreateCondBr(b.CreateAnd(b.CreateNot(has_value_dst), has_value_src), if_should_change, if_should_not_change); + + b.SetInsertPoint(if_should_change); + compileChangeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + b.CreateBr(join_block); + + b.SetInsertPoint(if_should_not_change); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); } static void compileChangeEveryTime(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) @@ -268,26 +294,30 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); + auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr); - auto * type = toNativeType(b); - auto * value_src_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, value_offset_from_structure); - auto * value_src_ptr = b.CreatePointerCast(value_src_ptr_with_offset, type->getPointerTo()); - auto * value_src = b.CreateLoad(type, value_src_ptr); + auto * head = b.GetInsertBlock(); - compileChangeEveryTime(builder, aggregate_data_dst_ptr, value_src); + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_should_change = llvm::BasicBlock::Create(head->getContext(), "if_should_change", head->getParent()); + auto * if_should_not_change = llvm::BasicBlock::Create(head->getContext(), "if_should_not_change", head->getParent()); + + b.CreateCondBr(has_value_src, if_should_change, if_should_not_change); + + b.SetInsertPoint(if_should_change); + compileChangeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + b.CreateBr(join_block); + + b.SetInsertPoint(if_should_not_change); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); } static llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) { - llvm::IRBuilder<> & b = static_cast &>(builder); - - static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); - - auto * type = toNativeType(builder); - auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); - auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); - return b.CreateLoad(type, value_ptr); + return getValueFromAggregateDataPtr(builder, aggregate_data_ptr); } #endif From ddc96374c842f96502f1daa7342017c8759c3e06 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 22:21:26 +0300 Subject: [PATCH 632/931] Compile AggregateFunctionMin --- .../AggregateFunctionMinMaxAny.h | 90 ++++++++++++++++++- 1 file changed, 88 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 73d05f08031..88f77dfd278 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -315,6 +315,83 @@ public: b.SetInsertPoint(join_block); } + static void compileChangeIfLess(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * has_value_ptr = b.CreatePointerCast(aggregate_data_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_value = b.CreateLoad(b.getInt1Ty(), has_value_ptr); + + auto * value = getValueFromAggregateDataPtr(b, aggregate_data_ptr); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_should_change = llvm::BasicBlock::Create(head->getContext(), "if_should_change", head->getParent()); + auto * if_should_not_change = llvm::BasicBlock::Create(head->getContext(), "if_should_not_change", head->getParent()); + + auto is_signed = std::numeric_limits::is_signed; + + llvm::Value * is_current_value_less = nullptr; + + if (value_to_check->getType()->isIntegerTy()) + is_current_value_less = is_signed ? b.CreateICmpSLT(value_to_check, value) : b.CreateICmpULT(value_to_check, value); + else + is_current_value_less = b.CreateFCmpOLT(value_to_check, value); + + b.CreateCondBr(b.CreateOr(b.CreateNot(has_value_value), is_current_value_less), if_should_change, if_should_not_change); + + b.SetInsertPoint(if_should_change); + compileChange(builder, aggregate_data_ptr, value_to_check); + b.CreateBr(join_block); + + b.SetInsertPoint(if_should_not_change); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + } + + static void compileChangeIfLessMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * has_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_dst = b.CreateLoad(b.getInt1Ty(), has_value_dst_ptr); + + auto * value_dst = getValueFromAggregateDataPtr(b, aggregate_data_dst_ptr); + + auto * has_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, b.getInt1Ty()->getPointerTo()); + auto * has_value_src = b.CreateLoad(b.getInt1Ty(), has_value_src_ptr); + + auto * value_src = getValueFromAggregateDataPtr(b, aggregate_data_src_ptr); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_should_change = llvm::BasicBlock::Create(head->getContext(), "if_should_change", head->getParent()); + auto * if_should_not_change = llvm::BasicBlock::Create(head->getContext(), "if_should_not_change", head->getParent()); + + auto is_signed = std::numeric_limits::is_signed; + + llvm::Value * is_current_value_less = nullptr; + + if (value_src->getType()->isIntegerTy()) + is_current_value_less = is_signed ? b.CreateICmpSLT(value_dst, value_src) : b.CreateICmpULT(value_dst, value_src); + else + is_current_value_less = b.CreateFCmpOLT(value_dst, value_src); + + b.CreateCondBr(b.CreateAnd(has_value_src, b.CreateOr(b.CreateNot(has_value_dst), is_current_value_less)), if_should_change, if_should_not_change); + + b.SetInsertPoint(if_should_change); + compileChangeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + b.CreateBr(join_block); + + b.SetInsertPoint(if_should_not_change); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + } + static llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) { return getValueFromAggregateDataPtr(builder, aggregate_data_ptr); @@ -755,7 +832,17 @@ struct AggregateFunctionMinData : Data #if USE_EMBEDDED_COMPILER - static constexpr bool is_compilable = false; + static constexpr bool is_compilable = Data::is_compilable; + + static void compileChangeIfBetter(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + Data::compileChangeIfLess(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfBetterMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + Data::compileChangeIfLessMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } #endif }; @@ -935,7 +1022,6 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override { - std::cerr << "AggregateFunctionSingleValue::add sizeof data " << this->sizeOfData() << " align of data " << this->alignOfData() << std::endl; this->data(place).changeIfBetter(*columns[0], row_num, arena); } From 44259736ed913abe0cae2a9254d512fcecc258ea Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 22:31:11 +0300 Subject: [PATCH 633/931] Compile AggregateFunctionMax --- .../AggregateFunctionMinMaxAny.h | 84 +++++++++++++++---- 1 file changed, 70 insertions(+), 14 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 88f77dfd278..fa416d80396 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -315,7 +315,8 @@ public: b.SetInsertPoint(join_block); } - static void compileChangeIfLess(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + template + static void compileChangeComparison(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) { llvm::IRBuilder<> & b = static_cast &>(builder); @@ -332,14 +333,24 @@ public: auto is_signed = std::numeric_limits::is_signed; - llvm::Value * is_current_value_less = nullptr; + llvm::Value * should_change_after_comparison = nullptr; - if (value_to_check->getType()->isIntegerTy()) - is_current_value_less = is_signed ? b.CreateICmpSLT(value_to_check, value) : b.CreateICmpULT(value_to_check, value); + if constexpr (is_less) + { + if (value_to_check->getType()->isIntegerTy()) + should_change_after_comparison = is_signed ? b.CreateICmpSLT(value_to_check, value) : b.CreateICmpULT(value_to_check, value); + else + should_change_after_comparison = b.CreateFCmpOLT(value_to_check, value); + } else - is_current_value_less = b.CreateFCmpOLT(value_to_check, value); + { + if (value_to_check->getType()->isIntegerTy()) + should_change_after_comparison = is_signed ? b.CreateICmpSGT(value_to_check, value) : b.CreateICmpUGT(value_to_check, value); + else + should_change_after_comparison = b.CreateFCmpOGT(value_to_check, value); + } - b.CreateCondBr(b.CreateOr(b.CreateNot(has_value_value), is_current_value_less), if_should_change, if_should_not_change); + b.CreateCondBr(b.CreateOr(b.CreateNot(has_value_value), should_change_after_comparison), if_should_change, if_should_not_change); b.SetInsertPoint(if_should_change); compileChange(builder, aggregate_data_ptr, value_to_check); @@ -351,9 +362,10 @@ public: b.SetInsertPoint(join_block); } - static void compileChangeIfLessMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + template + static void compileChangeComparisonMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) { - llvm::IRBuilder<> & b = static_cast &>(builder); + llvm::IRBuilder<> & b = static_cast &>(builder); auto * has_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, b.getInt1Ty()->getPointerTo()); auto * has_value_dst = b.CreateLoad(b.getInt1Ty(), has_value_dst_ptr); @@ -373,14 +385,24 @@ public: auto is_signed = std::numeric_limits::is_signed; - llvm::Value * is_current_value_less = nullptr; + llvm::Value * should_change_after_comparison = nullptr; - if (value_src->getType()->isIntegerTy()) - is_current_value_less = is_signed ? b.CreateICmpSLT(value_dst, value_src) : b.CreateICmpULT(value_dst, value_src); + if constexpr (is_less) + { + if (value_src->getType()->isIntegerTy()) + should_change_after_comparison = is_signed ? b.CreateICmpSLT(value_dst, value_src) : b.CreateICmpULT(value_dst, value_src); + else + should_change_after_comparison = b.CreateFCmpOLT(value_dst, value_src); + } else - is_current_value_less = b.CreateFCmpOLT(value_dst, value_src); + { + if (value_src->getType()->isIntegerTy()) + should_change_after_comparison = is_signed ? b.CreateICmpSGT(value_dst, value_src) : b.CreateICmpUGT(value_dst, value_src); + else + should_change_after_comparison = b.CreateFCmpOGT(value_dst, value_src); + } - b.CreateCondBr(b.CreateAnd(has_value_src, b.CreateOr(b.CreateNot(has_value_dst), is_current_value_less)), if_should_change, if_should_not_change); + b.CreateCondBr(b.CreateAnd(has_value_src, b.CreateOr(b.CreateNot(has_value_dst), should_change_after_comparison)), if_should_change, if_should_not_change); b.SetInsertPoint(if_should_change); compileChangeMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); @@ -392,6 +414,30 @@ public: b.SetInsertPoint(join_block); } + static void compileChangeIfLess(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + static constexpr bool is_less = true; + compileChangeComparison(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfLessMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + static constexpr bool is_less = true; + compileChangeComparisonMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } + + static void compileChangeIfGreater(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + static constexpr bool is_less = false; + compileChangeComparison(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfGreaterMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + static constexpr bool is_less = false; + compileChangeComparisonMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } + static llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) { return getValueFromAggregateDataPtr(builder, aggregate_data_ptr); @@ -859,7 +905,17 @@ struct AggregateFunctionMaxData : Data #if USE_EMBEDDED_COMPILER - static constexpr bool is_compilable = false; + static constexpr bool is_compilable = Data::is_compilable; + + static void compileChangeIfBetter(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, llvm::Value * value_to_check) + { + Data::compileChangeIfGreater(builder, aggregate_data_ptr, value_to_check); + } + + static void compileChangeIfBetterMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) + { + Data::compileChangeIfGreaterMerge(builder, aggregate_data_dst_ptr, aggregate_data_src_ptr); + } #endif }; From a7a7623b36ea107966595e91042e759d3aea0725 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 5 Jun 2021 22:36:48 +0300 Subject: [PATCH 634/931] Fixed style check --- src/AggregateFunctions/AggregateFunctionMinMaxAny.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index fa416d80396..d2a91d6f086 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -29,6 +29,7 @@ struct Settings; namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NOT_IMPLEMENTED; } /** Aggregate functions that store one of passed values. From 56c1a4e4478d70c103d43098710ee8637a8129b0 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 6 Jun 2021 11:33:31 +0300 Subject: [PATCH 635/931] Added tests --- src/AggregateFunctions/AggregateFunctionSum.h | 12 +- ...890_jit_aggregation_function_sum.reference | 26 ++++ .../01890_jit_aggregation_function_sum.sql | 119 ++++++++++++++++++ ...891_jit_aggregation_function_any.reference | 26 ++++ .../01891_jit_aggregation_function_any.sql | 119 ++++++++++++++++++ ...it_aggregation_function_any_last.reference | 26 ++++ ...1892_jit_aggregation_function_any_last.sql | 119 ++++++++++++++++++ ...893_jit_aggregation_function_min.reference | 26 ++++ .../01893_jit_aggregation_function_min.sql | 119 ++++++++++++++++++ ...894_jit_aggregation_function_max.reference | 26 ++++ .../01894_jit_aggregation_function_max.sql | 119 ++++++++++++++++++ ...895_jit_aggregation_function_avg.reference | 26 ++++ .../01895_jit_aggregation_function_avg.sql | 119 ++++++++++++++++++ 13 files changed, 876 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference create mode 100644 tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql create mode 100644 tests/queries/0_stateless/01891_jit_aggregation_function_any.reference create mode 100644 tests/queries/0_stateless/01891_jit_aggregation_function_any.sql create mode 100644 tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference create mode 100644 tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql create mode 100644 tests/queries/0_stateless/01893_jit_aggregation_function_min.reference create mode 100644 tests/queries/0_stateless/01893_jit_aggregation_function_min.sql create mode 100644 tests/queries/0_stateless/01894_jit_aggregation_function_max.reference create mode 100644 tests/queries/0_stateless/01894_jit_aggregation_function_max.sql create mode 100644 tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference create mode 100644 tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 06b43d0551d..49f95781994 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -409,17 +409,17 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * return_type = toNativeType(b, getReturnType()); auto * aggregate_sum_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); - b.CreateStore(llvm::ConstantInt::get(return_type, 0), aggregate_sum_ptr); + b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr); } void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * return_type = toNativeType(b, getReturnType()); auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); auto * sum_value = b.CreateLoad(return_type, sum_value_ptr); @@ -437,7 +437,7 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * return_type = toNativeType(b, getReturnType()); auto * sum_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo()); auto * sum_value_dst = b.CreateLoad(return_type, sum_value_dst_ptr); @@ -445,7 +445,7 @@ public: auto * sum_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo()); auto * sum_value_src = b.CreateLoad(return_type, sum_value_src_ptr); - auto * sum_return_value = b.CreateAdd(sum_value_dst, sum_value_src); + auto * sum_return_value = sum_value_dst->getType()->isIntegerTy() ? b.CreateAdd(sum_value_dst, sum_value_src) : b.CreateFAdd(sum_value_dst, sum_value_src); b.CreateStore(sum_return_value, sum_value_dst_ptr); } @@ -453,7 +453,7 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * return_type = toNativeType(b, removeNullable(getReturnType())); + auto * return_type = toNativeType(b, getReturnType()); auto * sum_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); return b.CreateLoad(return_type, sum_value_ptr); diff --git a/tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference b/tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference new file mode 100644 index 00000000000..4897a71df3c --- /dev/null +++ b/tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 2340 2340 2340 2340 +1 2380 2380 2380 2380 +2 2420 2420 2420 2420 +Test signed integer values +0 2340 2340 2340 2340 +1 2380 2380 2380 2380 +2 2420 2420 2420 2420 +Test float values +0 2340 2340 +1 2380 2380 +2 2420 2420 +Test nullable unsigned integer values +0 2340 2340 2340 2340 +1 2380 2380 2380 2380 +2 2420 2420 2420 2420 +Test nullable signed integer values +0 2340 2340 2340 2340 +1 2380 2380 2380 2380 +2 2420 2420 2420 2420 +Test nullable float values +0 2340 2340 +1 2380 2380 +2 2420 2420 +Test null specifics +0 6 4 \N diff --git a/tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql b/tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql new file mode 100644 index 00000000000..0f61ab168f5 --- /dev/null +++ b/tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4)FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, sum(value1), sum(value2), sum(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01891_jit_aggregation_function_any.reference b/tests/queries/0_stateless/01891_jit_aggregation_function_any.reference new file mode 100644 index 00000000000..d3ea3d46376 --- /dev/null +++ b/tests/queries/0_stateless/01891_jit_aggregation_function_any.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test signed integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test float values +0 0 0 +1 1 1 +2 2 2 +Test nullable unsigned integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test nullable signed integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test nullable float values +0 0 0 +1 1 1 +2 2 2 +Test null specifics +0 1 1 \N diff --git a/tests/queries/0_stateless/01891_jit_aggregation_function_any.sql b/tests/queries/0_stateless/01891_jit_aggregation_function_any.sql new file mode 100644 index 00000000000..28e81640993 --- /dev/null +++ b/tests/queries/0_stateless/01891_jit_aggregation_function_any.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, any(value1), any(value2), any(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference new file mode 100644 index 00000000000..bdf0499e1f3 --- /dev/null +++ b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test signed integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test float values +0 117 117 +1 118 118 +2 119 119 +Test nullable unsigned integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test nullable signed integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test nullable float values +0 117 117 +1 118 118 +2 119 119 +Test null specifics +0 3 3 \N diff --git a/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql new file mode 100644 index 00000000000..c02ed8f18ee --- /dev/null +++ b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01893_jit_aggregation_function_min.reference b/tests/queries/0_stateless/01893_jit_aggregation_function_min.reference new file mode 100644 index 00000000000..d3ea3d46376 --- /dev/null +++ b/tests/queries/0_stateless/01893_jit_aggregation_function_min.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test signed integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test float values +0 0 0 +1 1 1 +2 2 2 +Test nullable unsigned integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test nullable signed integer values +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +Test nullable float values +0 0 0 +1 1 1 +2 2 2 +Test null specifics +0 1 1 \N diff --git a/tests/queries/0_stateless/01893_jit_aggregation_function_min.sql b/tests/queries/0_stateless/01893_jit_aggregation_function_min.sql new file mode 100644 index 00000000000..5e700e537eb --- /dev/null +++ b/tests/queries/0_stateless/01893_jit_aggregation_function_min.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, min(value1), min(value2), min(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01894_jit_aggregation_function_max.reference b/tests/queries/0_stateless/01894_jit_aggregation_function_max.reference new file mode 100644 index 00000000000..321d1fa7196 --- /dev/null +++ b/tests/queries/0_stateless/01894_jit_aggregation_function_max.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test signed integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test float values +0 0 0 +1 1 1 +2 2 2 +Test nullable unsigned integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test nullable signed integer values +0 117 117 117 117 +1 118 118 118 118 +2 119 119 119 119 +Test nullable float values +0 117 117 +1 118 118 +2 119 119 +Test null specifics +0 3 3 \N diff --git a/tests/queries/0_stateless/01894_jit_aggregation_function_max.sql b/tests/queries/0_stateless/01894_jit_aggregation_function_max.sql new file mode 100644 index 00000000000..8ba11f4c643 --- /dev/null +++ b/tests/queries/0_stateless/01894_jit_aggregation_function_max.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, max(value1), max(value2), max(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference b/tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference new file mode 100644 index 00000000000..e1eac2fe41b --- /dev/null +++ b/tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 58.5 58.5 58.5 58.5 +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test signed integer values +0 58.5 58.5 58.5 58.5 +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test float values +0 58.5 58.5 +1 59.5 59.5 +2 60.5 60.5 +Test nullable unsigned integer values +0 58.5 58.5 58.5 58.5 +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test nullable signed integer values +0 58.5 58.5 58.5 58.5 +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test nullable float values +0 58.5 58.5 +1 59.5 59.5 +2 60.5 60.5 +Test null specifics +0 2 2 \N diff --git a/tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql b/tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql new file mode 100644 index 00000000000..903a7c65f21 --- /dev/null +++ b/tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql @@ -0,0 +1,119 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, avg(value1), avg(value2), avg(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; From 507d9405e2872cdcd79a3d4e4796598978ffa611 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 6 Jun 2021 18:43:03 +0300 Subject: [PATCH 636/931] Compile AggregateFunctionAvgWeighted --- src/AggregateFunctions/AggregateFunctionAvg.h | 175 +++++++------ .../AggregateFunctionAvgWeighted.h | 53 +++- .../AggregateFunctionIf.cpp | 19 ++ .../AggregateFunctionNull.h | 236 ++++++++++++------ .../AggregateFunctionSumCount.h | 9 + src/AggregateFunctions/IAggregateFunction.h | 24 ++ src/DataTypes/Native.h | 50 ++++ src/Functions/FunctionsComparison.h | 18 +- src/Interpreters/Aggregator.cpp | 44 +--- ...1896_jit_aggregation_function_if.reference | 12 + .../01896_jit_aggregation_function_if.sql | 141 +++++++++++ ...ggregation_function_avg_weighted.reference | 26 ++ ..._jit_aggregation_function_avg_weighted.sql | 167 +++++++++++++ 13 files changed, 762 insertions(+), 212 deletions(-) create mode 100644 tests/queries/0_stateless/01896_jit_aggregation_function_if.reference create mode 100644 tests/queries/0_stateless/01896_jit_aggregation_function_if.sql create mode 100644 tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference create mode 100644 tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 2ac0a0935a8..3e2b29e3b93 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -93,11 +93,13 @@ struct AvgFraction * @tparam Derived When deriving from this class, use the child class name as in CRTP, e.g. * class Self : Agg. */ -template +template class AggregateFunctionAvgBase : public - IAggregateFunctionDataHelper, Derived> + IAggregateFunctionDataHelper, Derived> { public: + using Numerator = TNumerator; + using Denominator = TDenominator; using Fraction = AvgFraction; using Base = IAggregateFunctionDataHelper; @@ -143,6 +145,89 @@ public: else assert_cast &>(to).getData().push_back(this->data(place).divide()); } + + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + bool can_be_compiled = true; + + for (const auto & argument : this->argument_types) + can_be_compiled &= canBeNativeType(*argument); + + auto return_type = getReturnType(); + can_be_compiled &= canBeNativeType(*return_type); + + return can_be_compiled; + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType(b); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + + auto * denominator_type = toNativeType(b); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + + b.CreateStore(llvm::Constant::getNullValue(numerator_type), numerator_ptr); + b.CreateStore(llvm::Constant::getNullValue(denominator_type), denominator_ptr); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType(b); + + auto * numerator_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); + auto * numerator_dst_value = b.CreateLoad(numerator_type, numerator_dst_ptr); + + auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); + auto * numerator_src_value = b.CreateLoad(numerator_type, numerator_src_ptr); + + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_dst_value, numerator_src_value) : b.CreateFAdd(numerator_dst_value, numerator_src_value); + b.CreateStore(numerator_result_value, numerator_dst_ptr); + + auto * denominator_type = toNativeType(b); + + auto * denominator_dst_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, sizeof(Numerator)); + auto * denominator_src_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, sizeof(Numerator)); + + auto * denominator_dst_ptr = b.CreatePointerCast(denominator_dst_offset_ptr, denominator_type->getPointerTo()); + auto * denominator_src_ptr = b.CreatePointerCast(denominator_src_offset_ptr, denominator_type->getPointerTo()); + + auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); + auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); + + auto * denominator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(denominator_src_value, denominator_dst_value) : b.CreateFAdd(denominator_src_value, denominator_dst_value); + b.CreateStore(denominator_result_value, denominator_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType(b); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + + auto * denominator_type = toNativeType(b); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + + auto * double_numerator = nativeCast(b, numerator_value, b.getDoubleTy()); + auto * double_denominator = nativeCast(b, denominator_value, b.getDoubleTy()); + + return b.CreateFDiv(double_numerator, double_denominator); + } + +#endif + private: UInt32 num_scale; UInt32 denom_scale; @@ -157,7 +242,11 @@ template class AggregateFunctionAvg final : public AggregateFunctionAvgBase, UInt64, AggregateFunctionAvg> { public: - using AggregateFunctionAvgBase, UInt64, AggregateFunctionAvg>::AggregateFunctionAvgBase; + using Base = AggregateFunctionAvgBase, UInt64, AggregateFunctionAvg>; + using Base::Base; + + using Numerator = typename Base::Numerator; + using Denominator = typename Base::Denominator; void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final { @@ -169,36 +258,11 @@ public: #if USE_EMBEDDED_COMPILER - bool isCompilable() const override - { - bool can_be_compiled = true; - - for (const auto & argument : this->argument_types) - can_be_compiled &= canBeNativeType(*argument); - - return can_be_compiled; - } - - void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); - - auto * numerator_type = toNativeType>(b); - auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); - - auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - - b.CreateStore(llvm::Constant::getNullValue(numerator_type), numerator_ptr); - b.CreateStore(llvm::Constant::getNullValue(denominator_type), denominator_ptr); - } - void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { llvm::IRBuilder<> & b = static_cast &>(builder); - auto * numerator_type = toNativeType>(b); + auto * numerator_type = toNativeType(b); auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); @@ -209,9 +273,9 @@ public: auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator); b.CreateStore(numerator_result_value, numerator_ptr); - auto * denominator_type = toNativeType(b); + auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); @@ -220,55 +284,6 @@ public: b.CreateStore(denominator_value_updated, denominator_ptr); } - void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); - - auto * numerator_type = toNativeType>(b); - - auto * numerator_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); - auto * numerator_dst_value = b.CreateLoad(numerator_type, numerator_dst_ptr); - - auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); - auto * numerator_src_value = b.CreateLoad(numerator_type, numerator_src_ptr); - - auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_dst_value, numerator_src_value) : b.CreateFAdd(numerator_dst_value, numerator_src_value); - b.CreateStore(numerator_result_value, numerator_dst_ptr); - - auto * denominator_type = toNativeType(b); - - auto * denominator_dst_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, sizeof(AvgFieldType)); - auto * denominator_src_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, sizeof(AvgFieldType)); - - auto * denominator_dst_ptr = b.CreatePointerCast(denominator_dst_offset_ptr, denominator_type->getPointerTo()); - auto * denominator_src_ptr = b.CreatePointerCast(denominator_src_offset_ptr, denominator_type->getPointerTo()); - - auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); - auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); - - auto * denominator_result_value = b.CreateAdd(denominator_src_value, denominator_dst_value); - b.CreateStore(denominator_result_value, denominator_dst_ptr); - } - - llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); - - auto * numerator_type = toNativeType>(b); - auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); - auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); - - auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(AvgFieldType)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); - - auto * double_numerator = nativeCast>(b, numerator_value, b.getDoubleTy()); - auto * double_denominator = nativeCast(b, denominator_value, b.getDoubleTy()); - - return b.CreateFDiv(double_numerator, double_denominator); - } - #endif }; diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index 5842e7311e9..2a8423cd998 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -28,19 +28,64 @@ public: MaxFieldType, AvgWeightedFieldType, AggregateFunctionAvgWeighted>; using Base::Base; - using ValueT = MaxFieldType; + using Numerator = typename Base::Numerator; + using Denominator = typename Base::Denominator; void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { const auto& weights = static_cast &>(*columns[1]); - this->data(place).numerator += static_cast( + this->data(place).numerator += static_cast( static_cast &>(*columns[0]).getData()[row_num]) * - static_cast(weights.getData()[row_num]); + static_cast(weights.getData()[row_num]); - this->data(place).denominator += static_cast>(weights.getData()[row_num]); + this->data(place).denominator += static_cast(weights.getData()[row_num]); } String getName() const override { return "avgWeighted"; } + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + bool can_be_compiled = Base::isCompilable(); + can_be_compiled &= canBeNativeType(); + + return can_be_compiled; + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * numerator_type = toNativeType(b); + + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + + const auto & argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); + const auto & weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + + llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight); + + /// TODO: Fix accuracy + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication); + b.CreateStore(numerator_result_value, numerator_ptr); + + auto * denominator_type = toNativeType(b); + + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + + auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + + auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + auto * denominator_value_updated = numerator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); + + b.CreateStore(denominator_value_updated, denominator_ptr); + } + +#endif + }; } diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index 6d8a2f308c8..e99928e8f5b 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -106,6 +106,16 @@ public: this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena); } } + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return false; + } + +#endif + }; template @@ -168,6 +178,15 @@ public: } } +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return false; + } + +#endif + private: using Base = AggregateFunctionNullBase>; diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index 9cccb5b3ce9..32ff717dd86 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -192,50 +192,6 @@ public: } AggregateFunctionPtr getNestedFunction() const override { return nested_function; } -}; - - -/** There are two cases: for single argument and variadic. - * Code for single argument is much more efficient. - */ -template -class AggregateFunctionNullUnary final - : public AggregateFunctionNullBase> -{ -public: - AggregateFunctionNullUnary(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params) - : AggregateFunctionNullBase>(std::move(nested_function_), arguments, params) - { - } - - void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override - { - const ColumnNullable * column = assert_cast(columns[0]); - const IColumn * nested_column = &column->getNestedColumn(); - if (!column->isNullAt(row_num)) - { - this->setFlag(place); - this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena); - } - } - - void addBatchSinglePlace( - size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override - { - const ColumnNullable * column = assert_cast(columns[0]); - const IColumn * nested_column = &column->getNestedColumn(); - const UInt8 * null_map = column->getNullMapData().data(); - - this->nested_function->addBatchSinglePlaceNotNull( - batch_size, this->nestedPlace(place), &nested_column, null_map, arena, if_argument_pos); - - if constexpr (result_is_nullable) - if (!memoryIsByte(null_map, batch_size, 1)) - this->setFlag(place); - } - #if USE_EMBEDDED_COMPILER @@ -258,38 +214,6 @@ public: this->nested_function->compileCreate(b, aggregate_data_ptr_with_prefix_size_offset); } - void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override - { - - llvm::IRBuilder<> & b = static_cast &>(builder); - - const auto & nullable_type = arguments_types[0]; - const auto & nullable_value = argument_values[0]; - - auto * wrapped_value = b.CreateExtractValue(nullable_value, {0}); - auto * is_null_value = b.CreateExtractValue(nullable_value, {1}); - - auto * head = b.GetInsertBlock(); - - auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); - auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); - auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); - - b.CreateCondBr(is_null_value, if_null, if_not_null); - - b.SetInsertPoint(if_null); - b.CreateBr(join_block); - - b.SetInsertPoint(if_not_null); - b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); - this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); - b.CreateBr(join_block); - - b.SetInsertPoint(join_block); - - } - void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override { llvm::IRBuilder<> & b = static_cast &>(builder); @@ -357,6 +281,85 @@ public: }; +/** There are two cases: for single argument and variadic. + * Code for single argument is much more efficient. + */ +template +class AggregateFunctionNullUnary final + : public AggregateFunctionNullBase> +{ +public: + AggregateFunctionNullUnary(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params) + : AggregateFunctionNullBase>(std::move(nested_function_), arguments, params) + { + } + + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override + { + const ColumnNullable * column = assert_cast(columns[0]); + const IColumn * nested_column = &column->getNestedColumn(); + if (!column->isNullAt(row_num)) + { + this->setFlag(place); + this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena); + } + } + + void addBatchSinglePlace( + size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override + { + const ColumnNullable * column = assert_cast(columns[0]); + const IColumn * nested_column = &column->getNestedColumn(); + const UInt8 * null_map = column->getNullMapData().data(); + + this->nested_function->addBatchSinglePlaceNotNull( + batch_size, this->nestedPlace(place), &nested_column, null_map, arena, if_argument_pos); + + if constexpr (result_is_nullable) + if (!memoryIsByte(null_map, batch_size, 1)) + this->setFlag(place); + } + +#if USE_EMBEDDED_COMPILER + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + const auto & nullable_type = arguments_types[0]; + const auto & nullable_value = argument_values[0]; + + auto * wrapped_value = b.CreateExtractValue(nullable_value, {0}); + auto * is_null_value = b.CreateExtractValue(nullable_value, {1}); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); + auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); + + b.CreateCondBr(is_null_value, if_null, if_not_null); + + b.SetInsertPoint(if_null); + b.CreateBr(join_block); + + b.SetInsertPoint(if_not_null); + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + + } + +#endif + +}; + + template class AggregateFunctionNullVariadic final : public AggregateFunctionNullBasenested_function->add(this->nestedPlace(place), nested_columns, row_num, arena); } + +#if USE_EMBEDDED_COMPILER + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + size_t arguments_size = arguments_types.size(); + + DataTypes non_nullable_types; + std::vector wrapped_values; + std::vector is_null_values; + + non_nullable_types.resize(arguments_size); + wrapped_values.resize(arguments_size); + is_null_values.resize(arguments_size); + + for (size_t i = 0; i < arguments_size; ++i) + { + const auto & argument_value = argument_values[i]; + + if (is_nullable[i]) + { + auto * wrapped_value = b.CreateExtractValue(argument_value, {0}); + + if constexpr (null_is_skipped) + is_null_values[i] = b.CreateExtractValue(argument_value, {1}); + + wrapped_values[i] = wrapped_value; + non_nullable_types[i] = removeNullable(arguments_types[i]); + } + else + { + wrapped_values[i] = argument_value; + non_nullable_types[i] = arguments_types[i]; + } + } + + if constexpr (null_is_skipped) + { + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); + auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); + + auto * values_have_null_ptr = b.CreateAlloca(b.getInt1Ty()); + b.CreateStore(b.getInt1(false), values_have_null_ptr); + + for (auto * is_null_value : is_null_values) + { + if (!is_null_value) + continue; + + auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr); + b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr); + } + + b.CreateCondBr(b.CreateLoad(b.getInt1Ty(), values_have_null_ptr), if_null, if_not_null); + + b.SetInsertPoint(if_null); + b.CreateBr(join_block); + + b.SetInsertPoint(if_not_null); + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + } + else + { + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values); + } + } + +#endif + private: enum { MAX_ARGS = 8 }; size_t number_of_arguments = 0; diff --git a/src/AggregateFunctions/AggregateFunctionSumCount.h b/src/AggregateFunctions/AggregateFunctionSumCount.h index 1026b6272ba..4a913113ce2 100644 --- a/src/AggregateFunctions/AggregateFunctionSumCount.h +++ b/src/AggregateFunctions/AggregateFunctionSumCount.h @@ -48,6 +48,15 @@ public: String getName() const final { return "sumCount"; } +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return false; + } + +#endif + private: UInt32 scale; }; diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 726ab727a5d..188de6fb518 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -251,6 +251,30 @@ public: // of true window functions, so this hack-ish interface suffices. virtual bool isOnlyWindowFunction() const { return false; } + virtual String getDescription() const + { + String description; + + description += getName(); + description += '('; + + for (const auto & argument_type : argument_types) + { + description += argument_type->getName(); + description += ", "; + } + + if (!argument_types.empty()) + { + description.pop_back(); + description.pop_back(); + } + + description += ')'; + + return description; + } + #if USE_EMBEDDED_COMPILER virtual bool isCompilable() const { return false; } diff --git a/src/DataTypes/Native.h b/src/DataTypes/Native.h index a62c73fa352..88f99b60ed7 100644 --- a/src/DataTypes/Native.h +++ b/src/DataTypes/Native.h @@ -80,6 +80,25 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder) return nullptr; } +template +static inline bool canBeNativeType() +{ + if constexpr (std::is_same_v || std::is_same_v) + return true; + else if constexpr (std::is_same_v || std::is_same_v) + return true; + else if constexpr (std::is_same_v || std::is_same_v) + return true; + else if constexpr (std::is_same_v || std::is_same_v) + return true; + else if constexpr (std::is_same_v) + return true; + else if constexpr (std::is_same_v) + return true; + + return false; +} + static inline bool canBeNativeType(const IDataType & type) { WhichDataType data_type(type); @@ -180,6 +199,37 @@ static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr return nativeCast(b, from, value, n_to); } +static inline std::pair nativeCastToCommon(llvm::IRBuilder<> & b, const DataTypePtr & lhs_type, llvm::Value * lhs, const DataTypePtr & rhs_type, llvm::Value * rhs) +{ + llvm::Type * common; + + bool lhs_is_signed = typeIsSigned(*lhs_type); + bool rhs_is_signed = typeIsSigned(*rhs_type); + + if (lhs->getType()->isIntegerTy() && rhs->getType()->isIntegerTy()) + { + /// if one integer has a sign bit, make sure the other does as well. llvm generates optimal code + /// (e.g. uses overflow flag on x86) for (word size + 1)-bit integer operations. + + size_t lhs_bit_width = lhs->getType()->getIntegerBitWidth() + (!lhs_is_signed && rhs_is_signed); + size_t rhs_bit_width = rhs->getType()->getIntegerBitWidth() + (!rhs_is_signed && lhs_is_signed); + + size_t max_bit_width = std::max(lhs_bit_width, rhs_bit_width); + common = b.getIntNTy(max_bit_width); + } + else + { + /// TODO: Check + /// (double, float) or (double, int_N where N <= double's mantissa width) -> double + common = b.getDoubleTy(); + } + + auto * cast_lhs_to_common = nativeCast(b, lhs_type, lhs, common); + auto * cast_rhs_to_common = nativeCast(b, rhs_type, rhs, common); + + return std::make_pair(cast_lhs_to_common, cast_rhs_to_common); +} + static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index) { if (const auto * constant = typeid_cast(&column)) diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index ce0f580e6f1..239a0b30398 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -1265,23 +1265,7 @@ public: assert(2 == types.size() && 2 == values.size()); auto & b = static_cast &>(builder); - auto * x = values[0]; - auto * y = values[1]; - if (!types[0]->equals(*types[1])) - { - llvm::Type * common; - if (x->getType()->isIntegerTy() && y->getType()->isIntegerTy()) - common = b.getIntNTy(std::max( - /// if one integer has a sign bit, make sure the other does as well. llvm generates optimal code - /// (e.g. uses overflow flag on x86) for (word size + 1)-bit integer operations. - x->getType()->getIntegerBitWidth() + (!typeIsSigned(*types[0]) && typeIsSigned(*types[1])), - y->getType()->getIntegerBitWidth() + (!typeIsSigned(*types[1]) && typeIsSigned(*types[0])))); - else - /// (double, float) or (double, int_N where N <= double's mantissa width) -> double - common = b.getDoubleTy(); - x = nativeCast(b, types[0], x, common); - y = nativeCast(b, types[1], y, common); - } + auto [x, y] = nativeCastToCommon(b, types[0], values[0], types[1], values[1]); auto * result = CompileOp::compile(b, x, y, typeIsSigned(*types[0]) || typeIsSigned(*types[1])); return b.CreateSelect(result, b.getInt8(1), b.getInt8(0)); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 676d9a984ab..27f2bca61ad 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -222,32 +222,6 @@ static CHJIT & getJITInstance() return jit; } -static std::string dumpAggregateFunction(const IAggregateFunction * function) -{ - std::string function_dump; - - auto return_type_name = function->getReturnType()->getName(); - - function_dump += return_type_name; - function_dump += ' '; - function_dump += function->getName(); - function_dump += '('; - - const auto & argument_types = function->getArgumentTypes(); - for (const auto & argument_type : argument_types) - { - function_dump += argument_type->getName(); - function_dump += ','; - } - - if (!argument_types.empty()) - function_dump.pop_back(); - - function_dump += ')'; - - return function_dump; -} - #endif Aggregator::Aggregator(const Params & params_) @@ -317,7 +291,7 @@ void Aggregator::compileAggregateFunctions() std::vector functions_to_compile; size_t aggregate_instructions_size = 0; - std::string functions_dump; + String functions_description; /// Add values to the aggregate functions. for (size_t i = 0; i < aggregate_functions.size(); ++i) @@ -333,11 +307,10 @@ void Aggregator::compileAggregateFunctions() .aggregate_data_offset = offset_of_aggregate_function }; - std::string function_dump = dumpAggregateFunction(function); - functions_dump += function_dump; - functions_dump += ' '; - functions_to_compile.emplace_back(std::move(function_to_compile)); + + functions_description += function->getDescription(); + functions_description += ' '; } ++aggregate_instructions_size; @@ -354,20 +327,21 @@ void Aggregator::compileAggregateFunctions() std::lock_guard lock(mtx); - auto it = aggregation_functions_dump_to_add_compiled.find(functions_dump); + auto it = aggregation_functions_dump_to_add_compiled.find(functions_description); if (it != aggregation_functions_dump_to_add_compiled.end()) { compiled_aggregate_functions = it->second; } else { - LOG_TRACE(log, "Compile expression {}", functions_dump); + LOG_TRACE(log, "Compile expression {}", functions_description); - compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_dump); - aggregation_functions_dump_to_add_compiled[functions_dump] = compiled_aggregate_functions; + compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_description); + aggregation_functions_dump_to_add_compiled[functions_description] = compiled_aggregate_functions; } } + LOG_TRACE(log, "Use compiled expression {}", functions_description); compiled_functions.emplace(std::move(compiled_aggregate_functions)); } diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference b/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference new file mode 100644 index 00000000000..966723c90b9 --- /dev/null +++ b/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference @@ -0,0 +1,12 @@ +Test unsigned integer values +0 1140 1140 1140 1140 +1 1220 1220 1220 1220 +2 1180 1180 1180 1180 +Test signed integer values +0 1140 1140 1140 1140 +1 1220 1220 1220 1220 +2 1180 1180 1180 1180 +Test float values +0 1140 1140 +1 1220 1220 +2 1180 1180 diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql b/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql new file mode 100644 index 00000000000..7691d8f7d2b --- /dev/null +++ b/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql @@ -0,0 +1,141 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value) +FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +-- SELECT 'Test nullable unsigned integer values'; + +-- DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +-- CREATE TABLE test_table_nullable_unsigned_values +-- ( +-- id UInt64, + +-- value1 Nullable(UInt8), +-- value2 Nullable(UInt16), +-- value3 Nullable(UInt32), +-- value4 Nullable(UInt64) +-- ) ENGINE=TinyLog; + +-- INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +-- SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +-- DROP TABLE test_table_nullable_unsigned_values; + +-- SELECT 'Test nullable signed integer values'; + +-- DROP TABLE IF EXISTS test_table_nullable_signed_values; +-- CREATE TABLE test_table_nullable_signed_values +-- ( +-- id UInt64, + +-- value1 Nullable(Int8), +-- value2 Nullable(Int16), +-- value3 Nullable(Int32), +-- value4 Nullable(Int64) +-- ) ENGINE=TinyLog; + +-- INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +-- SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +-- DROP TABLE test_table_nullable_signed_values; + +-- SELECT 'Test nullable float values'; + +-- DROP TABLE IF EXISTS test_table_nullable_float_values; +-- CREATE TABLE test_table_nullable_float_values +-- ( +-- id UInt64, + +-- value1 Nullable(Float32), +-- value2 Nullable(Float64) +-- ) ENGINE=TinyLog; + +-- INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +-- SELECT id, sum(value1), sum(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +-- DROP TABLE test_table_nullable_float_values; + +-- SELECT 'Test null specifics'; + +-- DROP TABLE IF EXISTS test_table_null_specifics; +-- CREATE TABLE test_table_null_specifics +-- ( +-- id UInt64, + +-- value1 Nullable(UInt64), +-- value2 Nullable(UInt64), +-- value3 Nullable(UInt64) +-- ) ENGINE=TinyLog; + +-- INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +-- INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +-- INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +-- SELECT id, sum(value1), sum(value2), sum(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +-- DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference new file mode 100644 index 00000000000..fec5cc09859 --- /dev/null +++ b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference @@ -0,0 +1,26 @@ +Test unsigned integer values +0 nan nan nan nan +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test signed integer values +0 nan nan nan nan +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test float values +0 nan nan +1 59.5 59.5 +2 60.5 60.5 +Test nullable unsigned integer values +0 nan nan nan nan +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test nullable signed integer values +0 nan nan nan nan +1 59.5 59.5 59.5 59.5 +2 60.5 60.5 60.5 60.5 +Test nullable float values +0 nan nan +1 59.5 59.5 +2 60.5 60.5 +Test null specifics +0 2.3333333333333335 2.5 \N 2.5 2.5 \N diff --git a/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql new file mode 100644 index 00000000000..04b8a818382 --- /dev/null +++ b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql @@ -0,0 +1,167 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT id, avgWeighted(value1, weight), avgWeighted(value2, weight) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64), + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64), + + weight UInt64 +) ENGINE=TinyLog; + + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64), + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT id, avgWeighted(value1, weight), avgWeighted(value2, weight) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + weight UInt64, + weight_nullable Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 2, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 3, 3); + +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value1, weight_nullable), + avgWeighted(value2, weight_nullable), + avgWeighted(value3, weight_nullable) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; From e67198c1446a41a95395446a4f8d8285400eb395 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 6 Jun 2021 19:22:55 +0300 Subject: [PATCH 637/931] Compile AggregateFunctionIfNull --- .../AggregateFunctionIf.cpp | 114 ++++++++++++- ...1896_jit_aggregation_function_if.reference | 16 ++ .../01896_jit_aggregation_function_if.sql | 158 ++++++++++++------ 3 files changed, 233 insertions(+), 55 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index e99928e8f5b..ee86a54739c 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -109,9 +109,38 @@ public: #if USE_EMBEDDED_COMPILER - bool isCompilable() const override + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { - return false; + llvm::IRBuilder<> & b = static_cast &>(builder); + + const auto & nullable_type = arguments_types[0]; + const auto & nullable_value = argument_values[0]; + + auto * wrapped_value = b.CreateExtractValue(nullable_value, {0}); + auto * is_null_value = b.CreateExtractValue(nullable_value, {1}); + + const auto & predicate_type = arguments_types[argument_values.size() - 1]; + auto * predicate_value = argument_values[argument_values.size() - 1]; + auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value); + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * if_null = llvm::BasicBlock::Create(head->getContext(), "if_null", head->getParent()); + auto * if_not_null = llvm::BasicBlock::Create(head->getContext(), "if_not_null", head->getParent()); + + b.CreateCondBr(b.CreateAnd(b.CreateNot(is_null_value), is_predicate_true), if_not_null, if_null); + + b.SetInsertPoint(if_null); + b.CreateBr(join_block); + + b.SetInsertPoint(if_not_null); + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); } #endif @@ -151,6 +180,7 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override { + std::cerr << "AggregateFunctionIfNullVariadic::add" << std::endl; /// This container stores the columns we really pass to the nested function. const IColumn * nested_columns[number_of_arguments]; @@ -180,9 +210,85 @@ public: #if USE_EMBEDDED_COMPILER - bool isCompilable() const override + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { - return false; + llvm::IRBuilder<> & b = static_cast &>(builder); + + size_t arguments_size = arguments_types.size(); + + DataTypes non_nullable_types; + std::vector wrapped_values; + std::vector is_null_values; + + non_nullable_types.resize(arguments_size); + wrapped_values.resize(arguments_size); + is_null_values.resize(arguments_size); + + for (size_t i = 0; i < arguments_size; ++i) + { + const auto & argument_value = argument_values[i]; + + if (is_nullable[i]) + { + auto * wrapped_value = b.CreateExtractValue(argument_value, {0}); + + if constexpr (null_is_skipped) + is_null_values[i] = b.CreateExtractValue(argument_value, {1}); + + wrapped_values[i] = wrapped_value; + non_nullable_types[i] = removeNullable(arguments_types[i]); + } + else + { + wrapped_values[i] = argument_value; + non_nullable_types[i] = arguments_types[i]; + } + } + + auto * head = b.GetInsertBlock(); + + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + auto * join_block_after_null_checks = llvm::BasicBlock::Create(head->getContext(), "join_block_after_null_checks", head->getParent()); + + if constexpr (null_is_skipped) + { + auto * values_have_null_ptr = b.CreateAlloca(b.getInt1Ty()); + b.CreateStore(b.getInt1(false), values_have_null_ptr); + + for (auto * is_null_value : is_null_values) + { + if (!is_null_value) + continue; + + auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr); + b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr); + } + + b.CreateCondBr(b.CreateLoad(b.getInt1Ty(), values_have_null_ptr), join_block, join_block_after_null_checks); + } + + b.SetInsertPoint(join_block_after_null_checks); + + const auto & predicate_type = arguments_types[argument_values.size() - 1]; + auto * predicate_value = argument_values[argument_values.size() - 1]; + auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value); + + auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent()); + auto * if_false = llvm::BasicBlock::Create(head->getContext(), "if_false", head->getParent()); + + b.CreateCondBr(is_predicate_true, if_true, if_false); + + b.SetInsertPoint(if_false); + b.CreateBr(join_block); + + b.SetInsertPoint(if_true); + + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); } #endif diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference b/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference index 966723c90b9..153adc0f998 100644 --- a/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference +++ b/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference @@ -10,3 +10,19 @@ Test float values 0 1140 1140 1 1220 1220 2 1180 1180 +Test nullable unsigned integer values +0 1140 1140 1140 1140 +1 1220 1220 1220 1220 +2 1180 1180 1180 1180 +Test nullable signed integer values +0 1140 1140 1140 1140 +1 1220 1220 1220 1220 +2 1180 1180 1180 1180 +Test nullable float values +0 1140 1140 +1 1220 1220 +2 1180 1180 +Test null specifics +0 6 4 \N +Test null variadic +0 2.3333333333333335 2.5 \N diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql b/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql index 7691d8f7d2b..8b5618230f0 100644 --- a/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql +++ b/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql @@ -72,70 +72,126 @@ SELECT FROM test_table_float_values GROUP BY id ORDER BY id; DROP TABLE test_table_float_values; --- SELECT 'Test nullable unsigned integer values'; +SELECT 'Test nullable unsigned integer values'; --- DROP TABLE IF EXISTS test_table_nullable_unsigned_values; --- CREATE TABLE test_table_nullable_unsigned_values --- ( --- id UInt64, +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, --- value1 Nullable(UInt8), --- value2 Nullable(UInt16), --- value3 Nullable(UInt32), --- value4 Nullable(UInt64) --- ) ENGINE=TinyLog; + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64), --- INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; --- SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; --- DROP TABLE test_table_nullable_unsigned_values; + predicate_value UInt8 +) ENGINE=TinyLog; --- SELECT 'Test nullable signed integer values'; +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; --- DROP TABLE IF EXISTS test_table_nullable_signed_values; --- CREATE TABLE test_table_nullable_signed_values --- ( --- id UInt64, +SELECT 'Test nullable signed integer values'; --- value1 Nullable(Int8), --- value2 Nullable(Int16), --- value3 Nullable(Int32), --- value4 Nullable(Int64) --- ) ENGINE=TinyLog; +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, --- INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; --- SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; --- DROP TABLE test_table_nullable_signed_values; + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64), --- SELECT 'Test nullable float values'; + predicate_value UInt8 +) ENGINE=TinyLog; --- DROP TABLE IF EXISTS test_table_nullable_float_values; --- CREATE TABLE test_table_nullable_float_values --- ( --- id UInt64, +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; --- value1 Nullable(Float32), --- value2 Nullable(Float64) --- ) ENGINE=TinyLog; +SELECT 'Test nullable float values'; --- INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; --- SELECT id, sum(value1), sum(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; --- DROP TABLE test_table_nullable_float_values; +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, --- SELECT 'Test null specifics'; + value1 Nullable(Float32), + value2 Nullable(Float64), --- DROP TABLE IF EXISTS test_table_null_specifics; --- CREATE TABLE test_table_null_specifics --- ( --- id UInt64, + predicate_value UInt8 +) ENGINE=TinyLog; --- value1 Nullable(UInt64), --- value2 Nullable(UInt64), --- value3 Nullable(UInt64) --- ) ENGINE=TinyLog; +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value) +FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; --- INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); --- INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); --- INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); +SELECT 'Test null specifics'; --- SELECT id, sum(value1), sum(value2), sum(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; --- DROP TABLE IF EXISTS test_table_null_specifics; +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 1); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 1); + +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; + +SELECT 'Test null variadic'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + predicate_value UInt8, + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 1, 2); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 1, 3); + +SELECT + id, + avgWeightedIf(value1, weight, predicate_value), + avgWeightedIf(value2, weight, predicate_value), + avgWeightedIf(value3, weight, predicate_value) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; From d4742f91e654efb35869f01a5173bb29ec7b8b47 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 6 Jun 2021 19:40:32 +0300 Subject: [PATCH 638/931] Check min_count_to_compile_aggregate_expression setting before compilation --- src/Interpreters/Aggregator.cpp | 16 +++++++++++++--- src/Interpreters/ExpressionJIT.h | 3 --- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 27f2bca61ad..c45b9c0c012 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -286,6 +286,10 @@ Aggregator::Aggregator(const Params & params_) void Aggregator::compileAggregateFunctions() { + static std::unordered_map aggregate_functions_description_to_count; + static std::unordered_map aggregation_functions_dump_to_add_compiled; + static std::mutex mtx; + if (!params.compile_aggregate_expressions || params.overflow_row) return; @@ -321,12 +325,18 @@ void Aggregator::compileAggregateFunctions() CompiledAggregateFunctions compiled_aggregate_functions; - { - static std::unordered_map aggregation_functions_dump_to_add_compiled; - static std::mutex mtx; + SipHash aggregate_function_description_hash; + aggregate_function_description_hash.update(functions_description); + UInt128 aggregate_function_description_hash_result; + aggregate_function_description_hash.get128(aggregate_function_description_hash_result); + + { std::lock_guard lock(mtx); + if (aggregate_functions_description_to_count[aggregate_function_description_hash_result]++ < params.min_count_to_compile_aggregate_expression) + return; + auto it = aggregation_functions_dump_to_add_compiled.find(functions_description); if (it != aggregation_functions_dump_to_add_compiled.end()) { diff --git a/src/Interpreters/ExpressionJIT.h b/src/Interpreters/ExpressionJIT.h index 4f724d2edf3..ab78346cf27 100644 --- a/src/Interpreters/ExpressionJIT.h +++ b/src/Interpreters/ExpressionJIT.h @@ -39,9 +39,6 @@ struct CompiledFunctionWeightFunction } }; -/** This child of LRUCache breaks one of it's invariants: total weight may be changed after insertion. - * We have to do so, because we don't known real memory consumption of generated LLVM code for every function. - */ class CompiledExpressionCache : public LRUCache { public: From eb29490102fe4b2f2eb945bd0f64a8527c7bf999 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 6 Jun 2021 23:51:22 +0300 Subject: [PATCH 639/931] Fix AggregateFunctionAvg denominator type --- src/AggregateFunctions/AggregateFunctionAvg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 3e2b29e3b93..61fbba56a25 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -203,7 +203,7 @@ public: auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); - auto * denominator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(denominator_src_value, denominator_dst_value) : b.CreateFAdd(denominator_src_value, denominator_dst_value); + auto * denominator_result_value = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_src_value, denominator_dst_value) : b.CreateFAdd(denominator_src_value, denominator_dst_value); b.CreateStore(denominator_result_value, denominator_dst_ptr); } From 1e2f22a18319e0d26b5bb4e88f36151c52844a4e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 8 Jun 2021 13:32:32 +0300 Subject: [PATCH 640/931] Aggregator compile part of aggregate functions --- src/AggregateFunctions/AggregateFunctionAvg.h | 55 +++++---- .../AggregateFunctionMinMaxAny.h | 3 +- .../AggregateFunctionNull.h | 9 +- src/AggregateFunctions/IAggregateFunction.h | 50 ++++++++ src/Interpreters/Aggregator.cpp | 115 ++++++++++++++---- src/Interpreters/Aggregator.h | 20 +-- src/Interpreters/JIT/compileFunction.cpp | 20 +-- src/Interpreters/JIT/compileFunction.h | 3 +- 8 files changed, 190 insertions(+), 85 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 61fbba56a25..c028b610878 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -98,10 +98,10 @@ class AggregateFunctionAvgBase : public IAggregateFunctionDataHelper, Derived> { public: + using Base = IAggregateFunctionDataHelper, Derived>; using Numerator = TNumerator; using Denominator = TDenominator; using Fraction = AvgFraction; - using Base = IAggregateFunctionDataHelper; explicit AggregateFunctionAvgBase(const DataTypes & argument_types_, UInt32 num_scale_ = 0, UInt32 denom_scale_ = 0) @@ -163,18 +163,28 @@ public: } void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(Fraction), llvm::assumeAligned(this->alignOfData())); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { llvm::IRBuilder<> & b = static_cast &>(builder); auto * numerator_type = toNativeType(b); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator); + b.CreateStore(numerator_result_value, numerator_ptr); auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - - b.CreateStore(llvm::Constant::getNullValue(numerator_type), numerator_ptr); - b.CreateStore(llvm::Constant::getNullValue(denominator_type), denominator_ptr); + static constexpr size_t denominator_offset = offsetof(Fraction, denominator); + auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo()); + auto * denominator_value_updated = b.CreateAdd(b.CreateLoad(denominator_type, denominator_ptr), llvm::ConstantInt::get(denominator_type, 1)); + b.CreateStore(denominator_value_updated, denominator_ptr); } void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override @@ -186,24 +196,21 @@ public: auto * numerator_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); auto * numerator_dst_value = b.CreateLoad(numerator_type, numerator_dst_ptr); - auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, numerator_type->getPointerTo()); + auto * numerator_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, numerator_type->getPointerTo()); auto * numerator_src_value = b.CreateLoad(numerator_type, numerator_src_ptr); auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_dst_value, numerator_src_value) : b.CreateFAdd(numerator_dst_value, numerator_src_value); b.CreateStore(numerator_result_value, numerator_dst_ptr); auto * denominator_type = toNativeType(b); - - auto * denominator_dst_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, sizeof(Numerator)); - auto * denominator_src_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, sizeof(Numerator)); - - auto * denominator_dst_ptr = b.CreatePointerCast(denominator_dst_offset_ptr, denominator_type->getPointerTo()); - auto * denominator_src_ptr = b.CreatePointerCast(denominator_src_offset_ptr, denominator_type->getPointerTo()); + static constexpr size_t denominator_offset = offsetof(Fraction, denominator); + auto * denominator_dst_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, denominator_offset), denominator_type->getPointerTo()); + auto * denominator_src_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, denominator_offset), denominator_type->getPointerTo()); auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); - auto * denominator_result_value = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_src_value, denominator_dst_value) : b.CreateFAdd(denominator_src_value, denominator_dst_value); + auto * denominator_result_value = b.CreateAdd(denominator_src_value, denominator_dst_value); b.CreateStore(denominator_result_value, denominator_dst_ptr); } @@ -216,8 +223,8 @@ public: auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + static constexpr size_t denominator_offset = offsetof(Fraction, denominator); + auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo()); auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); auto * double_numerator = nativeCast(b, numerator_value, b.getDoubleTy()); @@ -247,6 +254,7 @@ public: using Numerator = typename Base::Numerator; using Denominator = typename Base::Denominator; + using Fraction = typename Base::Fraction; void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final { @@ -266,21 +274,14 @@ public: auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); - - const auto & argument_type = arguments_types[0]; - const auto & argument_value = argument_values[0]; - auto * value_cast_to_numerator = nativeCast(b, argument_type, argument_value, numerator_type); + auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator); b.CreateStore(numerator_result_value, numerator_ptr); auto * denominator_type = toNativeType(b); - - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - - auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); - auto * denominator_value_updated = b.CreateAdd(denominator_value, llvm::ConstantInt::get(denominator_type, 1)); - + static constexpr size_t denominator_offset = offsetof(Fraction, denominator); + auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo()); + auto * denominator_value_updated = b.CreateAdd(b.CreateLoad(denominator_type, denominator_ptr), llvm::ConstantInt::get(denominator_type, 1)); b.CreateStore(denominator_value_updated, denominator_ptr); } diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index d2a91d6f086..147961b0be3 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -1122,8 +1122,7 @@ public: { llvm::IRBuilder<> & b = static_cast &>(builder); - auto alignment = llvm::assumeAligned(this->alignOfData()); - b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), alignment); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData())); } void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector & argument_values) const override diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index 32ff717dd86..443c2c2968d 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -205,10 +205,7 @@ public: llvm::IRBuilder<> & b = static_cast &>(builder); if constexpr (result_is_nullable) - { - auto alignment = llvm::assumeAligned(this->alignOfData()); - b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, alignment); - } + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, llvm::assumeAligned(this->alignOfData())); auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileCreate(b, aggregate_data_ptr_with_prefix_size_offset); @@ -220,8 +217,8 @@ public: if constexpr (result_is_nullable) { - auto alignment = llvm::assumeAligned(this->alignOfData()); - b.CreateMemCpy(aggregate_data_dst_ptr, alignment, aggregate_data_src_ptr, alignment, this->prefix_size); + auto align_of_data = llvm::assumeAligned(this->alignOfData()); + b.CreateMemCpy(aggregate_data_dst_ptr, align_of_data, aggregate_data_src_ptr, align_of_data, this->prefix_size); } auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, this->prefix_size); diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 188de6fb518..446850e83c3 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -218,6 +218,25 @@ public: const IColumn ** columns, Arena * arena) const = 0; + /** Insert result of aggregate function into places with batch size. + * Also all places must be destroyed if there was exception during insert. + * If destroy_place_after_insert == true. Then client must not destroy aggregate place if insert does not throw exception. + */ + virtual void insertResultIntoAndDestroyBatch( + size_t batch_size, + AggregateDataPtr * places, + size_t place_offset, + IColumn & to, + Arena * arena, + bool destroy_place_after_insert) const = 0; + + /** Destroy batch of aggregate places. + */ + virtual void destroyBatch( + size_t batch_size, + AggregateDataPtr * places, + size_t place_offset) const noexcept = 0; + /** By default all NULLs are skipped during aggregation. * If it returns nullptr, the default one will be used. * If an aggregate function wants to use something instead of the default one, it overrides this function and returns its own null adapter. @@ -475,6 +494,37 @@ public: static_cast(this)->add(place + place_offset, columns, i, arena); } } + + void insertResultIntoAndDestroyBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, bool destroy_place_after_insert) const override + { + size_t batch_index = 0; + + try + { + for (; batch_index < batch_size; ++batch_index) + { + static_cast(this)->insertResultInto(places[batch_index] + place_offset, to, arena); + + if (destroy_place_after_insert) + static_cast(this)->destroy(places[batch_index] + place_offset); + } + } + catch (...) + { + for (; batch_index < batch_size; ++batch_index) + static_cast(this)->destroy(places[batch_index] + place_offset); + + throw; + } + } + + void destroyBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset) const noexcept override + { + for (size_t i = 0; i < batch_size; ++i) + { + static_cast(this)->destroy(places[i] + place_offset); + } + } }; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index c45b9c0c012..13ffd2e0f14 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -320,7 +320,8 @@ void Aggregator::compileAggregateFunctions() ++aggregate_instructions_size; } - if (functions_to_compile.empty() || functions_to_compile.size() != aggregate_instructions_size) + /// TODO: Probably better to compile more than 2 functions + if (functions_to_compile.empty()) return; CompiledAggregateFunctions compiled_aggregate_functions; @@ -523,10 +524,9 @@ AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() return AggregatedDataVariants::Type::serialized; } - -void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const +void Aggregator::createAggregateStates(size_t aggregate_function_start_index, AggregateDataPtr & aggregate_data) const { - for (size_t j = 0; j < params.aggregates_size; ++j) + for (size_t j = aggregate_function_start_index; j < params.aggregates_size; ++j) { try { @@ -546,6 +546,10 @@ void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const } } +void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const +{ + createAggregateStates(0, aggregate_data); +} /** It's interesting - if you remove `noinline`, then gcc for some reason will inline this function, and the performance decreases (~ 10%). * (Probably because after the inline of this function, more internal functions no longer be inlined.) @@ -593,13 +597,16 @@ void NO_INLINE Aggregator::handleAggregationJIT( auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function; auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; - auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr + std::unique_ptr places(new AggregateDataPtr[rows]); + + /// For all rows. + for (size_t i = 0; i < rows; ++i) { - AggregateDataPtr aggregate_data; + AggregateDataPtr aggregate_data = nullptr; if constexpr (!no_more_keys) { - auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); + auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. if (emplace_result.isInserted()) @@ -608,8 +615,11 @@ void NO_INLINE Aggregator::handleAggregationJIT( emplace_result.setMapped(nullptr); aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + create_aggregate_states_function(aggregate_data); + createAggregateStates(compiled_functions->functions_count, aggregate_data); + emplace_result.setMapped(aggregate_data); } else @@ -620,20 +630,25 @@ void NO_INLINE Aggregator::handleAggregationJIT( else { /// Add only if the key already exists. - /// Overflow row is disabled for JIT. - auto find_result = state.findKey(method.data, row, *aggregates_pool); - assert(find_result.getMapped() != nullptr); - - aggregate_data = find_result.getMapped(); + auto find_result = state.findKey(method.data, i, *aggregates_pool); + if (find_result.isFound()) + aggregate_data = find_result.getMapped(); } - return aggregate_data; - }; + places[i] = aggregate_data; + } - GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; - GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); + add_into_aggregate_states_function(rows, columns_data.data(), places.get()); - add_into_aggregate_states_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); + /// Add values to the aggregate functions. + AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions->functions_count; + for (; inst->that; ++inst) + { + if (inst->offsets) + inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + else + inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); + } } #endif @@ -1367,19 +1382,65 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); places[place_index] = mapped; ++place_index; + + /// Mark the cell as destroyed so it will not be destroyed in destructor. + mapped = nullptr; }); - std::vector columns_data; - columns_data.reserve(final_aggregate_columns.size()); + std::exception_ptr exception; + size_t aggregate_functions_destroy_index = 0; - for (auto & final_aggregate_column : final_aggregate_columns) + try { - final_aggregate_column = final_aggregate_column->cloneResized(data.size()); - columns_data.emplace_back(getColumnData(final_aggregate_column.get())); + /** For JIT compiled functions we need to resize columns before pass them into compiled code. + * insert_aggregates_into_columns_function function does not throw exception. + */ + std::vector columns_data; + columns_data.reserve(final_aggregate_columns.size()); + + for (size_t i = 0; i < compiled_functions->functions_count; ++i) + { + auto & final_aggregate_column = final_aggregate_columns[i]; + final_aggregate_column = final_aggregate_column->cloneResized(data.size()); + columns_data.emplace_back(getColumnData(final_aggregate_column.get())); + } + + auto insert_aggregate_states_function = compiled_functions->insert_aggregates_into_columns_function; + insert_aggregate_states_function(data.size(), columns_data.data(), places.get()); + + aggregate_functions_destroy_index = compiled_functions->functions_count; + + for (; aggregate_functions_destroy_index < params.aggregates_size;) + { + auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index]; + size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; + + /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoAndDestroyBatch + * throws exception, it also must destroy all necessary states. + * Then code need to continue to destroy other aggregate function states with next function index. + */ + size_t destroy_index = aggregate_functions_destroy_index; + ++aggregate_functions_destroy_index; + + bool is_state = aggregate_functions[destroy_index]->isState(); + bool destroy_place_after_insert = !is_state; + + aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch(data.size(), places.get(), offset, *final_aggregate_column, arena, destroy_place_after_insert); + } + } + catch (...) + { + exception = std::current_exception(); } - auto insert_aggregate_states_function = compiled_functions->insert_aggregates_into_columns_function; - insert_aggregate_states_function(data.size(), columns_data.data(), places.get()); + for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) + { + size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; + aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(data.size(), places.get(), offset); + } + + if (exception) + std::rethrow_exception(exception); } else #endif @@ -1828,6 +1889,12 @@ void NO_INLINE Aggregator::mergeDataImpl( if (!inserted) { merge_aggregate_states_function_typed(dst, src); + + for (size_t i = compiled_functions->functions_count; i < params.aggregates_size; ++i) + aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); + + for (size_t i = compiled_functions->functions_count; i < params.aggregates_size; ++i) + aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); } else { diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3a67f9fd9a1..17d0b5b752a 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1096,6 +1096,8 @@ private: /** Create states of aggregate functions for one key. */ + void createAggregateStates(size_t aggregate_function_start_index, AggregateDataPtr & aggregate_data) const; + void createAggregateStates(AggregateDataPtr & aggregate_data) const; /** Call `destroy` methods for states of aggregate functions. @@ -1133,24 +1135,6 @@ private: size_t rows, AggregateFunctionInstruction * aggregate_instructions) const; - // template - // void handleAggregationJITV2( - // Method & method, - // typename Method::State & state, - // Arena * aggregates_pool, - // size_t rows, - // AggregateFunctionInstruction * aggregate_instructions, - // AggregateDataPtr overflow_row) const; - - // template - // void handleAggregationJITV3( - // Method & method, - // typename Method::State & state, - // Arena * aggregates_pool, - // size_t rows, - // AggregateFunctionInstruction * aggregate_instructions, - // AggregateDataPtr overflow_row) const; - template void handleAggregationDefault( Method & method, diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 8c033c8a5e6..25198bebca6 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -308,18 +308,16 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const llvm::IRBuilder<> b(context); auto * size_type = b.getIntNTy(sizeof(size_t) * 8); - + auto * places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); - auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); - auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), places_type }, false); auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); auto * arguments = aggregate_loop_func_definition->args().begin(); llvm::Value * rows_count_arg = arguments++; llvm::Value * columns_arg = arguments++; - llvm::Value * get_place_function_arg = arguments++; - llvm::Value * get_place_function_context_arg = arguments++; + llvm::Value * places_arg = arguments++; /// Initialize ColumnDataPlaceholder llvm representation of ColumnData @@ -361,6 +359,9 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + auto * places_phi = b.CreatePHI(places_arg->getType(), 2); + places_phi->addIncoming(places_arg, entry); + for (auto & col : columns) { col.data = b.CreatePHI(col.data_init->getType(), 2); @@ -373,7 +374,7 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const } } - auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); + auto * aggregation_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), places_phi); previous_columns_size = 0; for (const auto & function : functions) @@ -424,6 +425,8 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); } + places_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, places_phi, 1), cur_block); + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); counter_phi->addIncoming(value, cur_block); @@ -591,7 +594,10 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect .create_aggregate_states_function = create_aggregate_states_function, .add_into_aggregate_states_function = add_into_aggregate_states_function, .merge_aggregate_states_function = merge_aggregate_states_function, - .insert_aggregates_into_columns_function = insert_aggregate_states_function + .insert_aggregates_into_columns_function = insert_aggregate_states_function, + + .functions_count = functions.size(), + .compiled_module = std::move(compiled_module) }; return compiled_aggregate_functions; diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 788b614e551..0520029990e 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -61,7 +61,7 @@ using GetAggregateDataContext = char *; using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr); -using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); +using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr); using JITInsertAggregatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); @@ -72,6 +72,7 @@ struct CompiledAggregateFunctions JITMergeAggregateStatesFunction merge_aggregate_states_function; JITInsertAggregatesIntoColumnsFunction insert_aggregates_into_columns_function; + size_t functions_count; CHJIT::CompiledModule compiled_module; }; From d24d3ae992abe2afe1108397194d41a5fa3a6a9f Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 9 Jun 2021 10:40:39 +0300 Subject: [PATCH 641/931] Added second variant of compilation --- src/Core/Settings.h | 1 + src/DataStreams/TTLAggregationAlgorithm.cpp | 2 +- src/Interpreters/Aggregator.cpp | 91 +++++++++++- src/Interpreters/Aggregator.h | 15 +- src/Interpreters/InterpreterSelectQuery.cpp | 6 +- src/Interpreters/JIT/compileFunction.cpp | 136 ++++++++++++++++++ src/Interpreters/JIT/compileFunction.h | 4 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 +- 8 files changed, 250 insertions(+), 9 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index c260aa41230..feebaad3ce7 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -108,6 +108,7 @@ class IColumn; M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggreagte expressions before they are JIT-compiled", 0) \ + M(UInt64, aggregation_method, 0, "Aggregation method", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index 66792dcfdb2..ceebae1ab1c 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -34,7 +34,7 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm( false, settings.max_rows_to_group_by, settings.group_by_overflow_mode, 0, 0, settings.max_bytes_before_external_group_by, settings.empty_result_for_aggregation_by_empty_set, storage_.getContext()->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data, - settings.compile_aggregate_expressions, settings.min_count_to_compile_aggregate_expression); + settings.compile_aggregate_expressions, settings.min_count_to_compile_aggregate_expression, settings.aggregation_method); aggregator = std::make_unique(params); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 13ffd2e0f14..30f3b96dc97 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -321,7 +321,7 @@ void Aggregator::compileAggregateFunctions() } /// TODO: Probably better to compile more than 2 functions - if (functions_to_compile.empty()) + if (functions_to_compile.empty() || functions_to_compile.size() != aggregate_functions.size()) return; CompiledAggregateFunctions compiled_aggregate_functions; @@ -597,6 +597,90 @@ void NO_INLINE Aggregator::handleAggregationJIT( auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function; auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; + std::unique_ptr places; + + bool not_all_functions_compiled = compiled_functions->functions_count != offsets_of_aggregate_states.size(); + if (not_all_functions_compiled) + places.reset(new AggregateDataPtr[rows]); + + auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr + { + AggregateDataPtr aggregate_data; + + if constexpr (!no_more_keys) + { + auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + create_aggregate_states_function(aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + assert(aggregate_data != nullptr); + } + else + { + /// Add only if the key already exists. + /// Overflow row is disabled for JIT. + auto find_result = state.findKey(method.data, row, *aggregates_pool); + assert(find_result.getMapped() != nullptr); + + aggregate_data = find_result.getMapped(); + } + + if (not_all_functions_compiled) + places[row] = aggregate_data; + + return aggregate_data; + }; + + GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; + GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); + + add_into_aggregate_states_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); + + /// Add values to the aggregate functions. + AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions->functions_count; + for (; inst->that; ++inst) + { + if (inst->offsets) + inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + else + inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); + } +} + +template +void NO_INLINE Aggregator::handleAggregationJITV2( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const +{ + std::vector columns_data; + columns_data.reserve(aggregate_functions.size()); + + /// Add values to the aggregate functions. + for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + { + size_t arguments_size = inst->that->getArgumentTypes().size(); + for (size_t i = 0; i < arguments_size; ++i) + columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); + } + + auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function_v2; + auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; + std::unique_ptr places(new AggregateDataPtr[rows]); /// For all rows. @@ -772,7 +856,10 @@ void NO_INLINE Aggregator::executeImplBatch( #if USE_EMBEDDED_COMPILER if (compiled_functions) { - handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); + if (params.aggregation_method == 0) + handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); + else + handleAggregationJITV2(method, state, aggregates_pool, rows, aggregate_instructions); } else #endif diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 17d0b5b752a..dd9a11cf4ae 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -911,6 +911,7 @@ public: bool compile_aggregate_expressions; size_t min_count_to_compile_aggregate_expression; + size_t aggregation_method; Params( const Block & src_header_, @@ -923,6 +924,7 @@ public: size_t min_free_disk_space_, bool compile_aggregate_expressions_, size_t min_count_to_compile_aggregate_expression_, + size_t aggregation_method_, const Block & intermediate_header_ = {}) : src_header(src_header_), intermediate_header(intermediate_header_), @@ -934,14 +936,15 @@ public: tmp_volume(tmp_volume_), max_threads(max_threads_), min_free_disk_space(min_free_disk_space_), compile_aggregate_expressions(compile_aggregate_expressions_), - min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_) + min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_), + aggregation_method(aggregation_method_) { } /// Only parameters that matter during merge. Params(const Block & intermediate_header_, const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_) - : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0) + : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, 0) { intermediate_header = intermediate_header_; } @@ -1135,6 +1138,14 @@ private: size_t rows, AggregateFunctionInstruction * aggregate_instructions) const; + template + void handleAggregationJITV2( + Method & method, + typename Method::State & state, + Arena * aggregates_pool, + size_t rows, + AggregateFunctionInstruction * aggregate_instructions) const; + template void handleAggregationDefault( Method & method, diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 900820fb209..aae69426391 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2040,7 +2040,8 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression); + settings.min_count_to_compile_aggregate_expression, + settings.aggregation_method); SortDescription group_by_sort_description; @@ -2144,7 +2145,8 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression); + settings.min_count_to_compile_aggregate_expression, + settings.aggregation_method); auto transform_params = std::make_shared(params, true); diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 25198bebca6..3e326e82246 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -307,6 +307,137 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto & context = module.getContext(); llvm::IRBuilder<> b(context); + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); + auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); + + auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); + + auto * arguments = aggregate_loop_func_definition->args().begin(); + llvm::Value * rows_count_arg = arguments++; + llvm::Value * columns_arg = arguments++; + llvm::Value * get_place_function_arg = arguments++; + llvm::Value * get_place_function_context_arg = arguments++; + + /// Initialize ColumnDataPlaceholder llvm representation of ColumnData + + auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); + b.SetInsertPoint(entry); + + std::vector columns; + size_t previous_columns_size = 0; + + for (const auto & function : functions) + { + auto argument_types = function.function->getArgumentTypes(); + + ColumnDataPlaceholder data_placeholder; + + size_t function_arguments_size = argument_types.size(); + + for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) + { + const auto & argument_type = argument_types[column_argument_index]; + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, previous_columns_size + column_argument_index)); + data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); + data_placeholder.null_init = argument_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; + columns.emplace_back(data_placeholder); + } + + previous_columns_size += function_arguments_size; + } + + /// Initialize loop + + auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); + auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); + + b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); + + b.SetInsertPoint(loop); + + auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); + counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); + + for (auto & col : columns) + { + col.data = b.CreatePHI(col.data_init->getType(), 2); + col.data->addIncoming(col.data_init, entry); + + if (col.null_init) + { + col.null = b.CreatePHI(col.null_init->getType(), 2); + col.null->addIncoming(col.null_init, entry); + } + } + + auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); + + previous_columns_size = 0; + for (const auto & function : functions) + { + size_t aggregate_function_offset = function.aggregate_data_offset; + const auto * aggregate_function_ptr = function.function; + + auto arguments_types = function.function->getArgumentTypes(); + std::vector arguments_values; + + size_t function_arguments_size = arguments_types.size(); + arguments_values.resize(function_arguments_size); + + for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) + { + auto * column_argument_data = columns[previous_columns_size + column_argument_index].data; + auto * column_argument_null_data = columns[previous_columns_size + column_argument_index].null; + + auto & argument_type = arguments_types[column_argument_index]; + + auto * value = b.CreateLoad(toNativeType(b, removeNullable(argument_type)), column_argument_data); + if (!argument_type->isNullable()) + { + arguments_values[column_argument_index] = value; + continue; + } + + auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), column_argument_null_data), b.getInt8(0)); + auto * nullable_unitilized = llvm::Constant::getNullValue(toNativeType(b, argument_type)); + auto * nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, value, {0}), is_null, {1}); + arguments_values[column_argument_index] = nullable_value; + } + + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); + aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, arguments_types, arguments_values); + + previous_columns_size += function_arguments_size; + } + + /// End of loop + + auto * cur_block = b.GetInsertBlock(); + for (auto & col : columns) + { + col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + + if (col.null) + col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + } + + auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); + counter_phi->addIncoming(value, cur_block); + + b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); + + b.SetInsertPoint(end); + b.CreateRetVoid(); +} + +static void compileAddIntoAggregateStatesFunctionsV2(llvm::Module & module, const std::vector & functions, const std::string & name) +{ + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); auto * places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); @@ -568,6 +699,7 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { std::string create_aggregate_states_functions_name = functions_dump_name + "_create"; std::string add_aggregate_states_functions_name = functions_dump_name + "_add"; + std::string add_aggregate_states_functions_name_v2 = functions_dump_name + "_add_v2"; std::string merge_aggregate_states_functions_name = functions_dump_name + "_merge"; std::string insert_aggregate_states_functions_name = functions_dump_name + "_insert"; @@ -575,17 +707,20 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { compileCreateAggregateStatesFunctions(module, functions, create_aggregate_states_functions_name); compileAddIntoAggregateStatesFunctions(module, functions, add_aggregate_states_functions_name); + compileAddIntoAggregateStatesFunctionsV2(module, functions, add_aggregate_states_functions_name_v2); compileMergeAggregatesStates(module, functions, merge_aggregate_states_functions_name); compileInsertAggregatesIntoResultColumns(module, functions, insert_aggregate_states_functions_name); }); auto create_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[create_aggregate_states_functions_name]); auto add_into_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name]); + auto add_into_aggregate_states_function_v2 = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name_v2]); auto merge_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[merge_aggregate_states_functions_name]); auto insert_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[insert_aggregate_states_functions_name]); assert(create_aggregate_states_function); assert(add_into_aggregate_states_function); + assert(add_into_aggregate_states_function_v2); assert(merge_aggregate_states_function); assert(insert_aggregate_states_function); @@ -593,6 +728,7 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { .create_aggregate_states_function = create_aggregate_states_function, .add_into_aggregate_states_function = add_into_aggregate_states_function, + .add_into_aggregate_states_function_v2 = add_into_aggregate_states_function_v2, .merge_aggregate_states_function = merge_aggregate_states_function, .insert_aggregates_into_columns_function = insert_aggregate_states_function, diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 0520029990e..3a0435b098c 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -61,7 +61,8 @@ using GetAggregateDataContext = char *; using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr); -using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); +using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); +using JITAddIntoAggregateStatesFunctionV2 = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr); using JITInsertAggregatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); @@ -69,6 +70,7 @@ struct CompiledAggregateFunctions { JITCreateAggregateStatesFunction create_aggregate_states_function; JITAddIntoAggregateStatesFunction add_into_aggregate_states_function; + JITAddIntoAggregateStatesFunctionV2 add_into_aggregate_states_function_v2; JITMergeAggregateStatesFunction merge_aggregate_states_function; JITInsertAggregatesIntoColumnsFunction insert_aggregates_into_columns_function; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index cffedf44823..1d781dbc08e 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -303,6 +303,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( settings.min_free_disk_space_for_temporary_data, settings.compile_expressions, settings.min_count_to_compile_aggregate_expression, + settings.aggregation_method, header_before_aggregation); // The source header is also an intermediate header transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); @@ -333,7 +334,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression); + settings.min_count_to_compile_aggregate_expression, + settings.aggregation_method); transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); } From 0fb8ea530f6de8b50beec6c9f04b1ff5203f7c18 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 10 Jun 2021 09:08:38 +0300 Subject: [PATCH 642/931] Avoid using exception unsafe method --- src/Interpreters/Aggregator.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 30f3b96dc97..3f706bf1ea6 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -856,9 +856,9 @@ void NO_INLINE Aggregator::executeImplBatch( #if USE_EMBEDDED_COMPILER if (compiled_functions) { - if (params.aggregation_method == 0) - handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); - else + // if (params.aggregation_method == 0) + // handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); + // else handleAggregationJITV2(method, state, aggregates_pool, rows, aggregate_instructions); } else From 7b3952adda75a797fdcd2392c7889a89e4880c11 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 10 Jun 2021 22:53:51 +0300 Subject: [PATCH 643/931] Fixed tests --- src/AggregateFunctions/AggregateFunctionAvg.h | 2 +- .../AggregateFunctionAvgWeighted.h | 2 +- src/Interpreters/Aggregator.cpp | 73 ++----------------- 3 files changed, 9 insertions(+), 68 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index c028b610878..e8f29a88af9 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -210,7 +210,7 @@ public: auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); - auto * denominator_result_value = b.CreateAdd(denominator_src_value, denominator_dst_value); + auto * denominator_result_value = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_src_value, denominator_dst_value) : b.CreateFAdd(denominator_src_value, denominator_dst_value); b.CreateStore(denominator_result_value, denominator_dst_ptr); } diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index 2a8423cd998..d9df661ab18 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -80,7 +80,7 @@ public: auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); - auto * denominator_value_updated = numerator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); + auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); b.CreateStore(denominator_value_updated, denominator_ptr); } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 3f706bf1ea6..673b9d600f4 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -699,11 +699,8 @@ void NO_INLINE Aggregator::handleAggregationJITV2( emplace_result.setMapped(nullptr); aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - create_aggregate_states_function(aggregate_data); - createAggregateStates(compiled_functions->functions_count, aggregate_data); - emplace_result.setMapped(aggregate_data); } else @@ -968,39 +965,6 @@ bool Aggregator::executeOnBlock(const Block & block, AggregatedDataVariants & re bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedDataVariants & result, ColumnRawPtrs & key_columns, AggregateColumns & aggregate_columns, bool & no_more_keys) { - // std::cerr << "Aggregator::executeOnBlock" << std::endl; - // std::cerr << "Columns " << columns.size() << std::endl; - - // for (const auto & column : columns) - // { - // if (column) - // std::cerr << column->dumpStructure() << "\n"; - // } - - // std::cerr << "Num rows " << num_rows << std::endl; - // std::cerr << "Key columns before " << key_columns.size() << std::endl; - // for (const auto & column : key_columns) - // { - // if (column) - // std::cerr << column->dumpStructure() << "\n"; - // } - - // std::cerr << "Aggregate columns before " << aggregate_columns.size() << std::endl; - // for (size_t i = 0; i < aggregate_columns.size(); ++i) - // { - // const auto & aggregate_function_columns = aggregate_columns[i]; - - // for (const auto & aggregate_function_column : aggregate_function_columns) - // { - // if (aggregate_function_column) - // { - // std::cerr << "Aggregate function column " << static_cast(aggregate_function_column) << std::endl; - // std::cerr << aggregate_function_column->dumpStructure() << "\n"; - // } - // } - // } - // std::cerr << "No more keys " << no_more_keys << std::endl; - /// `result` will destroy the states of aggregate functions in the destructor result.aggregator = this; @@ -1046,28 +1010,6 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData result.without_key = place; } - // std::cerr << "Key columns after " << key_columns.size() << std::endl; - // for (const auto & column : key_columns) - // { - // if (column) - // std::cerr << column->dumpStructure() << "\n"; - // } - - // std::cerr << "Aggregate columns after " << aggregate_columns.size() << std::endl; - // for (size_t i = 0; i < aggregate_columns.size(); ++i) - // { - // const auto & aggregate_function_columns = aggregate_columns[i]; - - // for (const auto & aggregate_function_column : aggregate_function_columns) - // { - // if (aggregate_function_column) - // { - // std::cerr << "Aggregate function column " << static_cast(aggregate_function_column) << std::endl; - // std::cerr << aggregate_function_column->dumpStructure() << "\n"; - // } - // } - // } - /// We select one of the aggregation methods and call it. /// For the case when there are no keys (all aggregate into one row). @@ -1461,14 +1403,13 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( #if USE_EMBEDDED_COMPILER if (compiled_functions) { - std::unique_ptr places(new AggregateDataPtr[data.size()]); - size_t place_index = 0; + PaddedPODArray places; + places.reserve(data.size()); data.forEachValue([&](const auto & key, auto & mapped) { method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); - places[place_index] = mapped; - ++place_index; + places.emplace_back(mapped); /// Mark the cell as destroyed so it will not be destroyed in destructor. mapped = nullptr; @@ -1488,12 +1429,12 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( for (size_t i = 0; i < compiled_functions->functions_count; ++i) { auto & final_aggregate_column = final_aggregate_columns[i]; - final_aggregate_column = final_aggregate_column->cloneResized(data.size()); + final_aggregate_column = final_aggregate_column->cloneResized(places.size()); columns_data.emplace_back(getColumnData(final_aggregate_column.get())); } auto insert_aggregate_states_function = compiled_functions->insert_aggregates_into_columns_function; - insert_aggregate_states_function(data.size(), columns_data.data(), places.get()); + insert_aggregate_states_function(places.size(), columns_data.data(), places.data()); aggregate_functions_destroy_index = compiled_functions->functions_count; @@ -1512,7 +1453,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( bool is_state = aggregate_functions[destroy_index]->isState(); bool destroy_place_after_insert = !is_state; - aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch(data.size(), places.get(), offset, *final_aggregate_column, arena, destroy_place_after_insert); + aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch(places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert); } } catch (...) @@ -1523,7 +1464,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) { size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(data.size(), places.get(), offset); + aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); } if (exception) From 0c395bc1efc06172734f01d05a2ebad691a5f915 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 11 Jun 2021 14:06:33 +0300 Subject: [PATCH 644/931] Added performance tests --- tests/performance/jit_aggregate_functions.xml | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 tests/performance/jit_aggregate_functions.xml diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml new file mode 100644 index 00000000000..460b36288d0 --- /dev/null +++ b/tests/performance/jit_aggregate_functions.xml @@ -0,0 +1,136 @@ + + + CREATE TABLE jit_test_memory ( + key UInt64, + value_1 UInt64, + value_2 UInt64, + value_3 UInt64, + value_4 UInt64, + value_5 UInt64, + predicate UInt8 + ) Engine = Memory + + + + CREATE TABLE jit_test_merge_tree ( + key UInt64, + value_1 UInt64, + value_2 UInt64, + value_3 UInt64, + value_4 UInt64, + value_5 UInt64, + predicate UInt8 + ) Engine = MergeTree + ORDER BY key + + + + CREATE TABLE jit_test_merge_tree_nullable ( + key UInt64, + value_1 Nullable(UInt64), + value_2 Nullable(UInt64), + value_3 Nullable(UInt64), + value_4 Nullable(UInt64), + value_5 Nullable(UInt64), + predicate UInt8 + ) Engine = Memory + + + + CREATE TABLE jit_test_memory_nullable ( + key UInt64, + value_1 Nullable(UInt64), + value_2 Nullable(UInt64), + value_3 Nullable(UInt64), + value_4 Nullable(UInt64), + value_5 Nullable(UInt64), + predicate UInt8 + ) Engine = MergeTree + ORDER BY key + + + + + function + + sum + min + max + avg + any + anyLast + + + + + table + + jit_test_memory + jit_test_merge_tree + jit_test_memory_nullable + jit_test_merge_tree_nullable + + + + + + INSERT INTO {table} + SELECT + number % 1000000, + number, + number, + number, + number, + number, + if (number % 2 == 0, 1, 0) + FROM + system.numbers_mt + LIMIT 10000000 + + + + SELECT + {function}(value_1), + {function}(value_2), + {function}(value_3) + FROM {table} + GROUP BY key + FORMAT Null + + + + SELECT + {function}If(value_1, predicate), + {function}If(value_2, predicate), + {function}If(value_3, predicate) + FROM {table} + GROUP BY key + FORMAT Null + + + + SELECT + {function}(value_1), + {function}(value_2), + {function}(value_3), + {function}(value_4), + {function}(value_5) + FROM {table} + GROUP BY key + FORMAT Null + + + + SELECT + {function}If(value_1), + {function}If(value_2), + {function}If(value_3), + {function}If(value_4), + {function}If(value_5) + FROM {table} + GROUP BY key + FORMAT Null + + + DROP TABLE IF EXISTS {table} + \ No newline at end of file From f17e212a7245e6e0e6e47c14dd09faa17e3eacd0 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 11 Jun 2021 16:54:30 +0300 Subject: [PATCH 645/931] Fixed build --- src/AggregateFunctions/AggregateFunctionMinMaxAny.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 147961b0be3..d9093a83211 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -1114,7 +1114,7 @@ public: if constexpr (!Data::is_compilable) return false; - return canBeNativeType(*type); + return canBeNativeType(*this->argument_types[0]); } From da8c95716781f82129dbe857b539694e589f30ab Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 13 Jun 2021 15:38:57 +0300 Subject: [PATCH 646/931] Aggregator added CompiledExpressionCache --- programs/server/Server.cpp | 2 +- src/Core/Settings.h | 1 - src/DataStreams/TTLAggregationAlgorithm.cpp | 2 +- src/Interpreters/ActionsDAG.cpp | 1 - src/Interpreters/Aggregator.cpp | 599 ++++++++---------- src/Interpreters/Aggregator.h | 42 +- src/Interpreters/AsynchronousMetrics.cpp | 2 +- src/Interpreters/Context.cpp | 1 - src/Interpreters/ExpressionJIT.cpp | 42 +- src/Interpreters/ExpressionJIT.h | 63 -- src/Interpreters/InterpreterSelectQuery.cpp | 6 +- src/Interpreters/InterpreterSystemQuery.cpp | 2 +- .../JIT/CompiledExpressionCache.cpp | 34 + .../JIT/CompiledExpressionCache.h | 61 ++ src/Interpreters/JIT/compileFunction.cpp | 136 ---- src/Interpreters/JIT/compileFunction.h | 16 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 +- 17 files changed, 384 insertions(+), 630 deletions(-) delete mode 100644 src/Interpreters/ExpressionJIT.h create mode 100644 src/Interpreters/JIT/CompiledExpressionCache.cpp create mode 100644 src/Interpreters/JIT/CompiledExpressionCache.h diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 88f7564a7f2..9d7a4275dc1 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Core/Settings.h b/src/Core/Settings.h index feebaad3ce7..c260aa41230 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -108,7 +108,6 @@ class IColumn; M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggreagte expressions before they are JIT-compiled", 0) \ - M(UInt64, aggregation_method, 0, "Aggregation method", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index ceebae1ab1c..66792dcfdb2 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -34,7 +34,7 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm( false, settings.max_rows_to_group_by, settings.group_by_overflow_mode, 0, 0, settings.max_bytes_before_external_group_by, settings.empty_result_for_aggregation_by_empty_set, storage_.getContext()->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data, - settings.compile_aggregate_expressions, settings.min_count_to_compile_aggregate_expression, settings.aggregation_method); + settings.compile_aggregate_expressions, settings.min_count_to_compile_aggregate_expression); aggregator = std::make_unique(params); } diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 9fa48f6ceab..1518706f0a6 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 673b9d600f4..3543783494b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -24,6 +24,7 @@ #include #include #include +#include namespace ProfileEvents @@ -222,6 +223,30 @@ static CHJIT & getJITInstance() return jit; } +class CompiledAggregateFunctionsHolder final : public CompiledExpressionCacheEntry +{ +public: + explicit CompiledAggregateFunctionsHolder(CompiledAggregateFunctions compiled_function_) + : CompiledExpressionCacheEntry(compiled_function_.compiled_module.size) + , compiled_aggregate_functions(compiled_function_) + {} + + ~CompiledAggregateFunctionsHolder() override + { + std::string symbol_names; + for (const auto & [name, _] : compiled_aggregate_functions.compiled_module.function_name_to_symbol) + { + symbol_names += name; + symbol_names += ' '; + } + + std::cerr << "CompiledAggregateFunctionsHolder::~CompiledAggregateFunctionsHolder " << symbol_names << std::endl; + getJITInstance().deleteCompiledModule(compiled_aggregate_functions.compiled_module); + } + + CompiledAggregateFunctions compiled_aggregate_functions; +}; + #endif Aggregator::Aggregator(const Params & params_) @@ -287,7 +312,6 @@ Aggregator::Aggregator(const Params & params_) void Aggregator::compileAggregateFunctions() { static std::unordered_map aggregate_functions_description_to_count; - static std::unordered_map aggregation_functions_dump_to_add_compiled; static std::mutex mtx; if (!params.compile_aggregate_expressions || params.overflow_row) @@ -324,36 +348,37 @@ void Aggregator::compileAggregateFunctions() if (functions_to_compile.empty() || functions_to_compile.size() != aggregate_functions.size()) return; - CompiledAggregateFunctions compiled_aggregate_functions; + SipHash aggregate_functions_description_hash; + aggregate_functions_description_hash.update(functions_description); - SipHash aggregate_function_description_hash; - aggregate_function_description_hash.update(functions_description); - - UInt128 aggregate_function_description_hash_result; - aggregate_function_description_hash.get128(aggregate_function_description_hash_result); + UInt128 aggregate_functions_description_hash_key; + aggregate_functions_description_hash.get128(aggregate_functions_description_hash_key); { std::lock_guard lock(mtx); - if (aggregate_functions_description_to_count[aggregate_function_description_hash_result]++ < params.min_count_to_compile_aggregate_expression) + if (aggregate_functions_description_to_count[aggregate_functions_description_hash_key]++ < params.min_count_to_compile_aggregate_expression) return; - auto it = aggregation_functions_dump_to_add_compiled.find(functions_description); - if (it != aggregation_functions_dump_to_add_compiled.end()) + if (auto * compilation_cache = CompiledExpressionCacheFactory::instance().tryGetCache()) { - compiled_aggregate_functions = it->second; + auto [compiled_function_cache_entry, _] = compilation_cache->getOrSet(aggregate_functions_description_hash_key, [&] () + { + LOG_TRACE(log, "Compile expression {}", functions_description); + + auto compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_description); + return std::make_shared(std::move(compiled_aggregate_functions)); + }); + + compiled_aggregate_functions_holder = std::static_pointer_cast(compiled_function_cache_entry); } else { LOG_TRACE(log, "Compile expression {}", functions_description); - - compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_description); - aggregation_functions_dump_to_add_compiled[functions_description] = compiled_aggregate_functions; + auto compiled_aggregate_functions = compileAggregateFunctons(getJITInstance(), functions_to_compile, functions_description); + compiled_aggregate_functions_holder = std::make_shared(std::move(compiled_aggregate_functions)); } } - - LOG_TRACE(log, "Use compiled expression {}", functions_description); - compiled_functions.emplace(std::move(compiled_aggregate_functions)); } #endif @@ -568,232 +593,34 @@ void NO_INLINE Aggregator::executeImpl( typename Method::State state(key_columns, key_sizes, aggregation_state_cache); if (!no_more_keys) - executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); - else - executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); -} - + { #if USE_EMBEDDED_COMPILER - -template -void NO_INLINE Aggregator::handleAggregationJIT( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions) const -{ - std::vector columns_data; - columns_data.reserve(aggregate_functions.size()); - - /// Add values to the aggregate functions. - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - { - size_t arguments_size = inst->that->getArgumentTypes().size(); - for (size_t i = 0; i < arguments_size; ++i) - columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); - } - - auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function; - auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; - - std::unique_ptr places; - - bool not_all_functions_compiled = compiled_functions->functions_count != offsets_of_aggregate_states.size(); - if (not_all_functions_compiled) - places.reset(new AggregateDataPtr[rows]); - - auto get_aggregate_data = [&](size_t row) -> AggregateDataPtr - { - AggregateDataPtr aggregate_data; - - if constexpr (!no_more_keys) + if (compiled_aggregate_functions_holder) { - auto emplace_result = state.emplaceKey(method.data, row, *aggregates_pool); - - /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. - if (emplace_result.isInserted()) - { - /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. - emplace_result.setMapped(nullptr); - - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - create_aggregate_states_function(aggregate_data); - - emplace_result.setMapped(aggregate_data); - } - else - aggregate_data = emplace_result.getMapped(); - - assert(aggregate_data != nullptr); + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } else - { - /// Add only if the key already exists. - /// Overflow row is disabled for JIT. - auto find_result = state.findKey(method.data, row, *aggregates_pool); - assert(find_result.getMapped() != nullptr); - - aggregate_data = find_result.getMapped(); - } - - if (not_all_functions_compiled) - places[row] = aggregate_data; - - return aggregate_data; - }; - - GetAggregateDataFunction get_aggregate_data_function = FunctorToStaticMethodAdaptor::unsafeCall; - GetAggregateDataContext get_aggregate_data_context = reinterpret_cast(&get_aggregate_data); - - add_into_aggregate_states_function(rows, columns_data.data(), get_aggregate_data_function, get_aggregate_data_context); - - /// Add values to the aggregate functions. - AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions->functions_count; - for (; inst->that; ++inst) - { - if (inst->offsets) - inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); - else - inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); - } -} - -template -void NO_INLINE Aggregator::handleAggregationJITV2( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions) const -{ - std::vector columns_data; - columns_data.reserve(aggregate_functions.size()); - - /// Add values to the aggregate functions. - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) - { - size_t arguments_size = inst->that->getArgumentTypes().size(); - for (size_t i = 0; i < arguments_size; ++i) - columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); - } - - auto add_into_aggregate_states_function = compiled_functions->add_into_aggregate_states_function_v2; - auto create_aggregate_states_function = compiled_functions->create_aggregate_states_function; - - std::unique_ptr places(new AggregateDataPtr[rows]); - - /// For all rows. - for (size_t i = 0; i < rows; ++i) - { - AggregateDataPtr aggregate_data = nullptr; - - if constexpr (!no_more_keys) - { - auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); - - /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. - if (emplace_result.isInserted()) - { - /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. - emplace_result.setMapped(nullptr); - - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - create_aggregate_states_function(aggregate_data); - createAggregateStates(compiled_functions->functions_count, aggregate_data); - emplace_result.setMapped(aggregate_data); - } - else - aggregate_data = emplace_result.getMapped(); - - assert(aggregate_data != nullptr); - } - else - { - /// Add only if the key already exists. - auto find_result = state.findKey(method.data, i, *aggregates_pool); - if (find_result.isFound()) - aggregate_data = find_result.getMapped(); - } - - places[i] = aggregate_data; - } - - add_into_aggregate_states_function(rows, columns_data.data(), places.get()); - - /// Add values to the aggregate functions. - AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions->functions_count; - for (; inst->that; ++inst) - { - if (inst->offsets) - inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); - else - inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); - } -} - #endif - -template -void NO_INLINE Aggregator::handleAggregationDefault( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions, - AggregateDataPtr overflow_row) const -{ - std::unique_ptr places(new AggregateDataPtr[rows]); - - /// For all rows. - for (size_t i = 0; i < rows; ++i) - { - AggregateDataPtr aggregate_data; - - if constexpr (!no_more_keys) { - auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); - - /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. - if (emplace_result.isInserted()) - { - /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. - emplace_result.setMapped(nullptr); - - aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); - createAggregateStates(aggregate_data); - - emplace_result.setMapped(aggregate_data); - } - else - aggregate_data = emplace_result.getMapped(); - - assert(aggregate_data != nullptr); + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } - else - { - /// Add only if the key already exists. - auto find_result = state.findKey(method.data, i, *aggregates_pool); - if (find_result.isFound()) - aggregate_data = find_result.getMapped(); - else - aggregate_data = overflow_row; - } - - places[i] = aggregate_data; } - - /// Add values to the aggregate functions. - for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) + else { - if (inst->offsets) - inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); +#if USE_EMBEDDED_COMPILER + if (compiled_aggregate_functions_holder) + { + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); + } else - inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); +#endif + { + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); + } } } -template +template void NO_INLINE Aggregator::executeImplBatch( Method & method, typename Method::State & state, @@ -850,18 +677,86 @@ void NO_INLINE Aggregator::executeImplBatch( } } + size_t compiled_functions_count = 0; + #if USE_EMBEDDED_COMPILER - if (compiled_functions) - { - // if (params.aggregation_method == 0) - // handleAggregationJIT(method, state, aggregates_pool, rows, aggregate_instructions); - // else - handleAggregationJITV2(method, state, aggregates_pool, rows, aggregate_instructions); - } - else + if constexpr (use_compiled_functions) + compiled_functions_count = compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count; #endif + + std::unique_ptr places(new AggregateDataPtr[rows]); + + /// For all rows. + for (size_t i = 0; i < rows; ++i) { - handleAggregationDefault(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); + AggregateDataPtr aggregate_data = nullptr; + + if constexpr (!no_more_keys) + { + auto emplace_result = state.emplaceKey(method.data, i, *aggregates_pool); + + /// If a new key is inserted, initialize the states of the aggregate functions, and possibly something related to the key. + if (emplace_result.isInserted()) + { + /// exception-safety - if you can not allocate memory or create states, then destructors will not be called. + emplace_result.setMapped(nullptr); + + aggregate_data = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states); + +#if USE_EMBEDDED_COMPILER + if constexpr (use_compiled_functions) + compiled_aggregate_functions_holder->compiled_aggregate_functions.create_aggregate_states_function(aggregate_data); +#endif + createAggregateStates(compiled_functions_count, aggregate_data); + + emplace_result.setMapped(aggregate_data); + } + else + aggregate_data = emplace_result.getMapped(); + + assert(aggregate_data != nullptr); + } + else + { + /// Add only if the key already exists. + auto find_result = state.findKey(method.data, i, *aggregates_pool); + if (find_result.isFound()) + aggregate_data = find_result.getMapped(); + else + aggregate_data = overflow_row; + } + + places[i] = aggregate_data; + } + +#if USE_EMBEDDED_COMPILER + if constexpr (use_compiled_functions) + { + std::vector columns_data; + columns_data.reserve(aggregate_functions.size()); + + for (size_t compiled_function_index = 0; compiled_function_index < compiled_functions_count; ++compiled_function_index) + { + AggregateFunctionInstruction * inst = aggregate_instructions + compiled_function_index; + size_t arguments_size = inst->that->getArgumentTypes().size(); + + for (size_t i = 0; i < arguments_size; ++i) + columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); + } + + auto add_into_aggregate_states_function = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function; + add_into_aggregate_states_function(rows, columns_data.data(), places.get()); + } +#endif + + /// Add values to the aggregate functions. + AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions_count; + for (; inst->that; ++inst) + { + if (inst->offsets) + inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); + else + inst->batch_that->addBatch(rows, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); } } @@ -1304,9 +1199,22 @@ void Aggregator::convertToBlockImpl( raw_key_columns.push_back(column.get()); if (final) - convertToBlockImplFinal(method, data, std::move(raw_key_columns), final_aggregate_columns, arena); + { +#if USE_EMBEDDED_COMPILER + if (compiled_aggregate_functions_holder) + { + convertToBlockImplFinal(method, data, std::move(raw_key_columns), final_aggregate_columns, arena); + } + else +#endif + { + convertToBlockImplFinal(method, data, std::move(raw_key_columns), final_aggregate_columns, arena); + } + } else + { convertToBlockImplNotFinal(method, data, std::move(raw_key_columns), aggregate_columns); + } /// In order to release memory early. data.clearAndShrink(); } @@ -1380,7 +1288,7 @@ inline void Aggregator::insertAggregatesIntoColumns( } -template +template void NO_INLINE Aggregator::convertToBlockImplFinal( Method & method, Table & data, @@ -1400,85 +1308,81 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto shuffled_key_sizes = method.shuffleKeyColumns(key_columns, key_sizes); const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes; -#if USE_EMBEDDED_COMPILER - if (compiled_functions) + PaddedPODArray places; + places.reserve(data.size()); + + data.forEachValue([&](const auto & key, auto & mapped) { - PaddedPODArray places; - places.reserve(data.size()); + method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); + places.emplace_back(mapped); - data.forEachValue([&](const auto & key, auto & mapped) - { - method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); - places.emplace_back(mapped); + /// Mark the cell as destroyed so it will not be destroyed in destructor. + mapped = nullptr; + }); - /// Mark the cell as destroyed so it will not be destroyed in destructor. - mapped = nullptr; - }); + std::exception_ptr exception; + size_t aggregate_functions_destroy_index = 0; - std::exception_ptr exception; - size_t aggregate_functions_destroy_index = 0; - - try + try + { +#if USE_EMBEDDED_COMPILER + if constexpr (use_compiled_functions) { /** For JIT compiled functions we need to resize columns before pass them into compiled code. * insert_aggregates_into_columns_function function does not throw exception. */ std::vector columns_data; + + auto compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; + columns_data.reserve(final_aggregate_columns.size()); - for (size_t i = 0; i < compiled_functions->functions_count; ++i) + for (size_t i = 0; i < compiled_functions.functions_count; ++i) { auto & final_aggregate_column = final_aggregate_columns[i]; final_aggregate_column = final_aggregate_column->cloneResized(places.size()); columns_data.emplace_back(getColumnData(final_aggregate_column.get())); } - auto insert_aggregate_states_function = compiled_functions->insert_aggregates_into_columns_function; - insert_aggregate_states_function(places.size(), columns_data.data(), places.data()); + auto insert_aggregates_into_columns_function = compiled_functions.insert_aggregates_into_columns_function; + insert_aggregates_into_columns_function(places.size(), columns_data.data(), places.data()); - aggregate_functions_destroy_index = compiled_functions->functions_count; + aggregate_functions_destroy_index = compiled_functions.functions_count; + } +#endif - for (; aggregate_functions_destroy_index < params.aggregates_size;) - { - auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index]; - size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; + for (; aggregate_functions_destroy_index < params.aggregates_size;) + { + auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index]; + size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoAndDestroyBatch + /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoAndDestroyBatch * throws exception, it also must destroy all necessary states. * Then code need to continue to destroy other aggregate function states with next function index. */ - size_t destroy_index = aggregate_functions_destroy_index; - ++aggregate_functions_destroy_index; + size_t destroy_index = aggregate_functions_destroy_index; + ++aggregate_functions_destroy_index; - bool is_state = aggregate_functions[destroy_index]->isState(); - bool destroy_place_after_insert = !is_state; + bool is_state = aggregate_functions[destroy_index]->isState(); + bool destroy_place_after_insert = !is_state; - aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch(places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert); - } + aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch( + places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert); } - catch (...) - { - exception = std::current_exception(); - } - - for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) - { - size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); - } - - if (exception) - std::rethrow_exception(exception); } - else -#endif + catch (...) { - data.forEachValue([&](const auto & key, auto & mapped) - { - method.insertKeyIntoColumns(key, key_columns, key_sizes_ref); - insertAggregatesIntoColumns(mapped, final_aggregate_columns, arena); - }); + exception = std::current_exception(); } + + for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) + { + size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; + aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); + } + + if (exception) + std::rethrow_exception(exception); } template @@ -1898,7 +1802,7 @@ void NO_INLINE Aggregator::mergeDataNullKey( } -template +template void NO_INLINE Aggregator::mergeDataImpl( Table & table_dst, Table & table_src, @@ -1907,52 +1811,34 @@ void NO_INLINE Aggregator::mergeDataImpl( if constexpr (Method::low_cardinality_optimization) mergeDataNullKey(table_dst, table_src, arena); + table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) + { + if (!inserted) + { + size_t compiled_functions_count = 0; + #if USE_EMBEDDED_COMPILER - if (compiled_functions) - { - auto merge_aggregate_states_function_typed = compiled_functions->merge_aggregate_states_function; - - table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) - { - if (!inserted) + if constexpr (use_compiled_functions) { - merge_aggregate_states_function_typed(dst, src); - - for (size_t i = compiled_functions->functions_count; i < params.aggregates_size; ++i) - aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); - - for (size_t i = compiled_functions->functions_count; i < params.aggregates_size; ++i) - aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + const auto & compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; + compiled_functions.merge_aggregate_states_function(dst, src); + compiled_functions_count = compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count; } - else - { - dst = src; - } - - src = nullptr; - }); - } - else #endif - { - table_src.mergeToViaEmplace(table_dst, [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted) + + for (size_t i = compiled_functions_count; i < params.aggregates_size; ++i) + aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); + + for (size_t i = compiled_functions_count; i < params.aggregates_size; ++i) + aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + } + else { - if (!inserted) - { - for (size_t i = 0; i < params.aggregates_size; ++i) - aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); + dst = src; + } - for (size_t i = 0; i < params.aggregates_size; ++i) - aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); - } - else - { - dst = src; - } - - src = nullptr; - }); - } + src = nullptr; + }); table_src.clearAndShrink(); } @@ -2056,21 +1942,41 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( AggregatedDataVariants & current = *non_empty_data[result_num]; if (!no_more_keys) - mergeDataImpl( - getDataVariant(*res).data, - getDataVariant(current).data, - res->aggregates_pool); + { +#if USE_EMBEDDED_COMPILER + if (compiled_aggregate_functions_holder) + { + mergeDataImpl( + getDataVariant(*res).data, + getDataVariant(current).data, + res->aggregates_pool); + } + else +#endif + { + mergeDataImpl( + getDataVariant(*res).data, + getDataVariant(current).data, + res->aggregates_pool); + } + } else if (res->without_key) + { + /// TODO: Use compile function mergeDataNoMoreKeysImpl( getDataVariant(*res).data, res->without_key, getDataVariant(current).data, res->aggregates_pool); + } else + { + /// TODO: Use compile function mergeDataOnlyExistingKeysImpl( getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool); + } /// `current` will not destroy the states of aggregate functions in the destructor current.aggregator = nullptr; @@ -2095,11 +2001,22 @@ void NO_INLINE Aggregator::mergeBucketImpl( return; AggregatedDataVariants & current = *data[result_num]; - - mergeDataImpl( - getDataVariant(*res).data.impls[bucket], - getDataVariant(current).data.impls[bucket], - arena); +#if USE_EMBEDDED_COMPILER + if (compiled_aggregate_functions_holder) + { + mergeDataImpl( + getDataVariant(*res).data.impls[bucket], + getDataVariant(current).data.impls[bucket], + arena); + } + else +#endif + { + mergeDataImpl( + getDataVariant(*res).data.impls[bucket], + getDataVariant(current).data.impls[bucket], + arena); + } } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index dd9a11cf4ae..19600d6aeb9 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -852,6 +852,8 @@ using AggregatedDataVariantsPtr = std::shared_ptr; using ManyAggregatedDataVariants = std::vector; using ManyAggregatedDataVariantsPtr = std::shared_ptr; +class CompiledAggregateFunctionsHolder; + /** How are "total" values calculated with WITH TOTALS? * (For more details, see TotalsHavingTransform.) * @@ -911,7 +913,6 @@ public: bool compile_aggregate_expressions; size_t min_count_to_compile_aggregate_expression; - size_t aggregation_method; Params( const Block & src_header_, @@ -924,7 +925,6 @@ public: size_t min_free_disk_space_, bool compile_aggregate_expressions_, size_t min_count_to_compile_aggregate_expression_, - size_t aggregation_method_, const Block & intermediate_header_ = {}) : src_header(src_header_), intermediate_header(intermediate_header_), @@ -936,15 +936,14 @@ public: tmp_volume(tmp_volume_), max_threads(max_threads_), min_free_disk_space(min_free_disk_space_), compile_aggregate_expressions(compile_aggregate_expressions_), - min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_), - aggregation_method(aggregation_method_) + min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_) { } /// Only parameters that matter during merge. Params(const Block & intermediate_header_, const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_) - : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, 0) + : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0) { intermediate_header = intermediate_header_; } @@ -1087,7 +1086,7 @@ private: TemporaryFiles temporary_files; #if USE_EMBEDDED_COMPILER - std::optional compiled_functions; + std::shared_ptr compiled_aggregate_functions_holder; #endif /** Try to compile aggregate functions. @@ -1121,7 +1120,7 @@ private: AggregateDataPtr overflow_row) const; /// Specialization for a particular value no_more_keys. - template + template void executeImplBatch( Method & method, typename Method::State & state, @@ -1130,31 +1129,6 @@ private: AggregateFunctionInstruction * aggregate_instructions, AggregateDataPtr overflow_row) const; - template - void handleAggregationJIT( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions) const; - - template - void handleAggregationJITV2( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions) const; - - template - void handleAggregationDefault( - Method & method, - typename Method::State & state, - Arena * aggregates_pool, - size_t rows, - AggregateFunctionInstruction * aggregate_instructions, - AggregateDataPtr overflow_row) const; - /// For case when there are no keys (all aggregate into one row). static void executeWithoutKeyImpl( AggregatedDataWithoutKey & res, @@ -1183,7 +1157,7 @@ private: Arena * arena) const; /// Merge data from hash table `src` into `dst`. - template + template void mergeDataImpl( Table & table_dst, Table & table_src, @@ -1227,7 +1201,7 @@ private: MutableColumns & final_aggregate_columns, Arena * arena) const; - template + template void convertToBlockImplFinal( Method & method, Table & data, diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 2216be4390c..9803a2d8e9b 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index a0fce58b472..2992bc010ab 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp index d30bab0e6df..497aa56ab13 100644 --- a/src/Interpreters/ExpressionJIT.cpp +++ b/src/Interpreters/ExpressionJIT.cpp @@ -1,4 +1,6 @@ -#include +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif #if USE_EMBEDDED_COMPILER @@ -20,6 +22,7 @@ #include #include #include +#include #include namespace DB @@ -42,15 +45,16 @@ static Poco::Logger * getLogger() return &logger; } -class CompiledFunctionHolder +class CompiledFunctionHolder : public CompiledExpressionCacheEntry { public: explicit CompiledFunctionHolder(CompiledFunction compiled_function_) - : compiled_function(compiled_function_) + : CompiledExpressionCacheEntry(compiled_function_.compiled_module.size) + , compiled_function(compiled_function_) {} - ~CompiledFunctionHolder() + ~CompiledFunctionHolder() override { getJITInstance().deleteCompiledModule(compiled_function.compiled_module); } @@ -287,19 +291,18 @@ static FunctionBasePtr compile( { LOG_TRACE(getLogger(), "Compile expression {}", llvm_function->getName()); auto compiled_function = compileFunction(getJITInstance(), *llvm_function); - auto compiled_function_holder = std::make_shared(compiled_function); - - return std::make_shared(std::move(compiled_function_holder), compiled_function.compiled_module.size); + return std::make_shared(compiled_function); }); - llvm_function->setCompiledFunction(compiled_function_cache_entry->getCompiledFunctionHolder()); + std::shared_ptr compiled_function_holder = std::static_pointer_cast(compiled_function_cache_entry); + llvm_function->setCompiledFunction(std::move(compiled_function_holder)); } else { auto compiled_function = compileFunction(getJITInstance(), *llvm_function); - auto compiled_function_ptr = std::make_shared(compiled_function); + auto compiled_function_holder = std::make_shared(compiled_function); - llvm_function->setCompiledFunction(compiled_function_ptr); + llvm_function->setCompiledFunction(std::move(compiled_function_holder)); } return llvm_function; @@ -568,25 +571,6 @@ void ActionsDAG::compileFunctions(size_t min_count_to_compile_expression) } } -CompiledExpressionCacheFactory & CompiledExpressionCacheFactory::instance() -{ - static CompiledExpressionCacheFactory factory; - return factory; -} - -void CompiledExpressionCacheFactory::init(size_t cache_size) -{ - if (cache) - throw Exception(ErrorCodes::LOGICAL_ERROR, "CompiledExpressionCache was already initialized"); - - cache = std::make_unique(cache_size); -} - -CompiledExpressionCache * CompiledExpressionCacheFactory::tryGetCache() -{ - return cache.get(); -} - } #endif diff --git a/src/Interpreters/ExpressionJIT.h b/src/Interpreters/ExpressionJIT.h deleted file mode 100644 index ab78346cf27..00000000000 --- a/src/Interpreters/ExpressionJIT.h +++ /dev/null @@ -1,63 +0,0 @@ -#pragma once - -#if !defined(ARCADIA_BUILD) -# include "config_core.h" -#endif - -#if USE_EMBEDDED_COMPILER -# include -# include - -namespace DB -{ - -class CompiledFunctionHolder; - -class CompiledFunctionCacheEntry -{ -public: - CompiledFunctionCacheEntry(std::shared_ptr compiled_function_holder_, size_t compiled_function_size_) - : compiled_function_holder(std::move(compiled_function_holder_)) - , compiled_function_size(compiled_function_size_) - {} - - std::shared_ptr getCompiledFunctionHolder() const { return compiled_function_holder; } - - size_t getCompiledFunctionSize() const { return compiled_function_size; } - -private: - std::shared_ptr compiled_function_holder; - - size_t compiled_function_size; -}; - -struct CompiledFunctionWeightFunction -{ - size_t operator()(const CompiledFunctionCacheEntry & compiled_function) const - { - return compiled_function.getCompiledFunctionSize(); - } -}; - -class CompiledExpressionCache : public LRUCache -{ -public: - using Base = LRUCache; - using Base::Base; -}; - -class CompiledExpressionCacheFactory -{ -private: - std::unique_ptr cache; - -public: - static CompiledExpressionCacheFactory & instance(); - - void init(size_t cache_size); - CompiledExpressionCache * tryGetCache(); -}; - -} - -#endif diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index aae69426391..900820fb209 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2040,8 +2040,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression, - settings.aggregation_method); + settings.min_count_to_compile_aggregate_expression); SortDescription group_by_sort_description; @@ -2145,8 +2144,7 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression, - settings.aggregation_method); + settings.min_count_to_compile_aggregate_expression); auto transform_params = std::make_shared(params, true); diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index f76d51e765b..bdeb4a30e9e 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Interpreters/JIT/CompiledExpressionCache.cpp b/src/Interpreters/JIT/CompiledExpressionCache.cpp new file mode 100644 index 00000000000..98f4eec982d --- /dev/null +++ b/src/Interpreters/JIT/CompiledExpressionCache.cpp @@ -0,0 +1,34 @@ +#include "CompiledExpressionCache.h" + +#if USE_EMBEDDED_COMPILER + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +CompiledExpressionCacheFactory & CompiledExpressionCacheFactory::instance() +{ + static CompiledExpressionCacheFactory factory; + return factory; +} + +void CompiledExpressionCacheFactory::init(size_t cache_size) +{ + if (cache) + throw Exception(ErrorCodes::LOGICAL_ERROR, "CompiledExpressionCache was already initialized"); + + cache = std::make_unique(cache_size); +} + +CompiledExpressionCache * CompiledExpressionCacheFactory::tryGetCache() +{ + return cache.get(); +} + +} + +#endif diff --git a/src/Interpreters/JIT/CompiledExpressionCache.h b/src/Interpreters/JIT/CompiledExpressionCache.h new file mode 100644 index 00000000000..5182a77d77a --- /dev/null +++ b/src/Interpreters/JIT/CompiledExpressionCache.h @@ -0,0 +1,61 @@ +#pragma once + +#if !defined(ARCADIA_BUILD) +# include "config_core.h" +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +# include + +namespace DB +{ + +class CompiledExpressionCacheEntry +{ +public: + explicit CompiledExpressionCacheEntry(size_t compiled_expression_size_) + : compiled_expression_size(compiled_expression_size_) + {} + + size_t getCompiledExpressionSize() const { return compiled_expression_size; } + + virtual ~CompiledExpressionCacheEntry() {} + +private: + + size_t compiled_expression_size = 0; + +}; + +struct CompiledFunctionWeightFunction +{ + size_t operator()(const CompiledExpressionCacheEntry & compiled_function) const + { + return compiled_function.getCompiledExpressionSize(); + } +}; + +class CompiledExpressionCache : public LRUCache +{ +public: + using Base = LRUCache; + using Base::Base; +}; + +class CompiledExpressionCacheFactory +{ +private: + std::unique_ptr cache; + +public: + static CompiledExpressionCacheFactory & instance(); + + void init(size_t cache_size); + CompiledExpressionCache * tryGetCache(); +}; + +} + +#endif diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 3e326e82246..25198bebca6 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -307,137 +307,6 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto & context = module.getContext(); llvm::IRBuilder<> b(context); - auto * size_type = b.getIntNTy(sizeof(size_t) * 8); - - auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); - auto * get_place_func_declaration = llvm::FunctionType::get(b.getInt8Ty()->getPointerTo(), { b.getInt8Ty()->getPointerTo(), size_type }, /*isVarArg=*/false); - auto * aggregate_loop_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { size_type, column_data_type->getPointerTo(), get_place_func_declaration->getPointerTo(), b.getInt8Ty()->getPointerTo() }, false); - - auto * aggregate_loop_func_definition = llvm::Function::Create(aggregate_loop_func_declaration, llvm::Function::ExternalLinkage, name, module); - - auto * arguments = aggregate_loop_func_definition->args().begin(); - llvm::Value * rows_count_arg = arguments++; - llvm::Value * columns_arg = arguments++; - llvm::Value * get_place_function_arg = arguments++; - llvm::Value * get_place_function_context_arg = arguments++; - - /// Initialize ColumnDataPlaceholder llvm representation of ColumnData - - auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", aggregate_loop_func_definition); - b.SetInsertPoint(entry); - - std::vector columns; - size_t previous_columns_size = 0; - - for (const auto & function : functions) - { - auto argument_types = function.function->getArgumentTypes(); - - ColumnDataPlaceholder data_placeholder; - - size_t function_arguments_size = argument_types.size(); - - for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) - { - const auto & argument_type = argument_types[column_argument_index]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, previous_columns_size + column_argument_index)); - data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); - data_placeholder.null_init = argument_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; - columns.emplace_back(data_placeholder); - } - - previous_columns_size += function_arguments_size; - } - - /// Initialize loop - - auto * end = llvm::BasicBlock::Create(b.getContext(), "end", aggregate_loop_func_definition); - auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", aggregate_loop_func_definition); - - b.CreateCondBr(b.CreateICmpEQ(rows_count_arg, llvm::ConstantInt::get(size_type, 0)), end, loop); - - b.SetInsertPoint(loop); - - auto * counter_phi = b.CreatePHI(rows_count_arg->getType(), 2); - counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry); - - for (auto & col : columns) - { - col.data = b.CreatePHI(col.data_init->getType(), 2); - col.data->addIncoming(col.data_init, entry); - - if (col.null_init) - { - col.null = b.CreatePHI(col.null_init->getType(), 2); - col.null->addIncoming(col.null_init, entry); - } - } - - auto * aggregation_place = b.CreateCall(get_place_func_declaration, get_place_function_arg, { get_place_function_context_arg, counter_phi }); - - previous_columns_size = 0; - for (const auto & function : functions) - { - size_t aggregate_function_offset = function.aggregate_data_offset; - const auto * aggregate_function_ptr = function.function; - - auto arguments_types = function.function->getArgumentTypes(); - std::vector arguments_values; - - size_t function_arguments_size = arguments_types.size(); - arguments_values.resize(function_arguments_size); - - for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) - { - auto * column_argument_data = columns[previous_columns_size + column_argument_index].data; - auto * column_argument_null_data = columns[previous_columns_size + column_argument_index].null; - - auto & argument_type = arguments_types[column_argument_index]; - - auto * value = b.CreateLoad(toNativeType(b, removeNullable(argument_type)), column_argument_data); - if (!argument_type->isNullable()) - { - arguments_values[column_argument_index] = value; - continue; - } - - auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), column_argument_null_data), b.getInt8(0)); - auto * nullable_unitilized = llvm::Constant::getNullValue(toNativeType(b, argument_type)); - auto * nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, value, {0}), is_null, {1}); - arguments_values[column_argument_index] = nullable_value; - } - - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); - aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, arguments_types, arguments_values); - - previous_columns_size += function_arguments_size; - } - - /// End of loop - - auto * cur_block = b.GetInsertBlock(); - for (auto & col : columns) - { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); - - if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); - } - - auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); - counter_phi->addIncoming(value, cur_block); - - b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); - - b.SetInsertPoint(end); - b.CreateRetVoid(); -} - -static void compileAddIntoAggregateStatesFunctionsV2(llvm::Module & module, const std::vector & functions, const std::string & name) -{ - auto & context = module.getContext(); - llvm::IRBuilder<> b(context); - auto * size_type = b.getIntNTy(sizeof(size_t) * 8); auto * places_type = b.getInt8Ty()->getPointerTo()->getPointerTo(); auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); @@ -699,7 +568,6 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { std::string create_aggregate_states_functions_name = functions_dump_name + "_create"; std::string add_aggregate_states_functions_name = functions_dump_name + "_add"; - std::string add_aggregate_states_functions_name_v2 = functions_dump_name + "_add_v2"; std::string merge_aggregate_states_functions_name = functions_dump_name + "_merge"; std::string insert_aggregate_states_functions_name = functions_dump_name + "_insert"; @@ -707,20 +575,17 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { compileCreateAggregateStatesFunctions(module, functions, create_aggregate_states_functions_name); compileAddIntoAggregateStatesFunctions(module, functions, add_aggregate_states_functions_name); - compileAddIntoAggregateStatesFunctionsV2(module, functions, add_aggregate_states_functions_name_v2); compileMergeAggregatesStates(module, functions, merge_aggregate_states_functions_name); compileInsertAggregatesIntoResultColumns(module, functions, insert_aggregate_states_functions_name); }); auto create_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[create_aggregate_states_functions_name]); auto add_into_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name]); - auto add_into_aggregate_states_function_v2 = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name_v2]); auto merge_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[merge_aggregate_states_functions_name]); auto insert_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[insert_aggregate_states_functions_name]); assert(create_aggregate_states_function); assert(add_into_aggregate_states_function); - assert(add_into_aggregate_states_function_v2); assert(merge_aggregate_states_function); assert(insert_aggregate_states_function); @@ -728,7 +593,6 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect { .create_aggregate_states_function = create_aggregate_states_function, .add_into_aggregate_states_function = add_into_aggregate_states_function, - .add_into_aggregate_states_function_v2 = add_into_aggregate_states_function_v2, .merge_aggregate_states_function = merge_aggregate_states_function, .insert_aggregates_into_columns_function = insert_aggregate_states_function, diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 3a0435b098c..2a2f2a0d20a 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -41,13 +41,8 @@ struct CompiledFunction }; /** Compile function to native jit code using CHJIT instance. - * Function is compiled as single module. - * After this function execution, code for function will be compiled and can be queried using - * findCompiledFunction with function name. - * Compiled function can be safely casted to JITCompiledFunction type and must be called with - * valid ColumnData and ColumnDataRowsSize. - * It is important that ColumnData parameter of JITCompiledFunction is result column, - * and will be filled by compiled function. + * It is client responsibility to match ColumnData arguments size with + * function arguments size and additional ColumnData for result. */ CompiledFunction compileFunction(CHJIT & jit, const IFunctionBase & function); @@ -57,12 +52,8 @@ struct AggregateFunctionWithOffset size_t aggregate_data_offset; }; -using GetAggregateDataContext = char *; -using GetAggregateDataFunction = AggregateDataPtr (*)(GetAggregateDataContext, size_t); - using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr); -using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, GetAggregateDataFunction, GetAggregateDataContext); -using JITAddIntoAggregateStatesFunctionV2 = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); +using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr); using JITInsertAggregatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); @@ -70,7 +61,6 @@ struct CompiledAggregateFunctions { JITCreateAggregateStatesFunction create_aggregate_states_function; JITAddIntoAggregateStatesFunction add_into_aggregate_states_function; - JITAddIntoAggregateStatesFunctionV2 add_into_aggregate_states_function_v2; JITMergeAggregateStatesFunction merge_aggregate_states_function; JITInsertAggregatesIntoColumnsFunction insert_aggregates_into_columns_function; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 1d781dbc08e..cffedf44823 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -303,7 +303,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( settings.min_free_disk_space_for_temporary_data, settings.compile_expressions, settings.min_count_to_compile_aggregate_expression, - settings.aggregation_method, header_before_aggregation); // The source header is also an intermediate header transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); @@ -334,8 +333,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( settings.max_threads, settings.min_free_disk_space_for_temporary_data, settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression, - settings.aggregation_method); + settings.min_count_to_compile_aggregate_expression); transform_params = std::make_shared(std::move(params), query_info.projection->aggregate_final); } From 30021f0335d80352db978e97fe328b5aab283d34 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 13 Jun 2021 17:49:56 +0300 Subject: [PATCH 647/931] Fixed tests --- src/Interpreters/Aggregator.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 3543783494b..d0d073f9905 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -240,7 +240,6 @@ public: symbol_names += ' '; } - std::cerr << "CompiledAggregateFunctionsHolder::~CompiledAggregateFunctionsHolder " << symbol_names << std::endl; getJITInstance().deleteCompiledModule(compiled_aggregate_functions.compiled_module); } From 61a5c4f49361b5e73ec029480100afc42d8b2f5f Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 15 Jun 2021 13:35:23 +0300 Subject: [PATCH 648/931] Fix min, max aggregate functions merge --- .../AggregateFunctionAvgWeighted.h | 48 ++++++----- .../AggregateFunctionMinMaxAny.h | 8 +- .../00165_jit_aggregate_functions.reference | 86 +++++++++++++++++++ .../00165_jit_aggregate_functions.sql | 29 +++++++ 4 files changed, 144 insertions(+), 27 deletions(-) create mode 100644 tests/queries/1_stateful/00165_jit_aggregate_functions.reference create mode 100644 tests/queries/1_stateful/00165_jit_aggregate_functions.sql diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index d9df661ab18..71b0cb6a735 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -48,42 +48,44 @@ public: bool isCompilable() const override { - bool can_be_compiled = Base::isCompilable(); - can_be_compiled &= canBeNativeType(); + /// TODO: FIX + // bool can_be_compiled = Base::isCompilable(); + // can_be_compiled &= canBeNativeType(); - return can_be_compiled; + return false; } - void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); + // void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + // { + /// TODO: FIX + // llvm::IRBuilder<> & b = static_cast &>(builder); - auto * numerator_type = toNativeType(b); + // auto * numerator_type = toNativeType(b); - auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); - auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + // auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + // auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); - const auto & argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); - const auto & weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + // const auto & argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); + // const auto & weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); - llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight); + // llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight); - /// TODO: Fix accuracy - auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication); - b.CreateStore(numerator_result_value, numerator_ptr); + // /// TODO: Fix accuracy + // auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication); + // b.CreateStore(numerator_result_value, numerator_ptr); - auto * denominator_type = toNativeType(b); + // auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); - auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + // auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + // auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + // auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); - auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); - auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); + // auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + // auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); - b.CreateStore(denominator_value_updated, denominator_ptr); - } + // b.CreateStore(denominator_value_updated, denominator_ptr); + // } #endif diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index d9093a83211..23dad0c097c 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -391,16 +391,16 @@ public: if constexpr (is_less) { if (value_src->getType()->isIntegerTy()) - should_change_after_comparison = is_signed ? b.CreateICmpSLT(value_dst, value_src) : b.CreateICmpULT(value_dst, value_src); + should_change_after_comparison = is_signed ? b.CreateICmpSLT(value_src, value_dst) : b.CreateICmpULT(value_src, value_dst); else - should_change_after_comparison = b.CreateFCmpOLT(value_dst, value_src); + should_change_after_comparison = b.CreateFCmpOLT(value_src, value_dst); } else { if (value_src->getType()->isIntegerTy()) - should_change_after_comparison = is_signed ? b.CreateICmpSGT(value_dst, value_src) : b.CreateICmpUGT(value_dst, value_src); + should_change_after_comparison = is_signed ? b.CreateICmpSGT(value_src, value_dst) : b.CreateICmpUGT(value_src, value_dst); else - should_change_after_comparison = b.CreateFCmpOGT(value_dst, value_src); + should_change_after_comparison = b.CreateFCmpOGT(value_src, value_dst); } b.CreateCondBr(b.CreateAnd(has_value_src, b.CreateOr(b.CreateNot(has_value_dst), should_change_after_comparison)), if_should_change, if_should_not_change); diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference new file mode 100644 index 00000000000..f6dea736ccd --- /dev/null +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference @@ -0,0 +1,86 @@ +Aggregation using JIT compilation +Simple functions +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 +Simple functions if combinator +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 +Aggregation without JIT compilation +Simple functions +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 +Simple functions if combinator +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql new file mode 100644 index 00000000000..a0523186850 --- /dev/null +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -0,0 +1,29 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Aggregation using JIT compilation'; +SELECT 'Simple functions'; + +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions if combinator'; + +WITH (WatchID % 2 == 0) AS predicate +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SET compile_aggregate_expressions = 0; + +SELECT 'Aggregation without JIT compilation'; + +SELECT 'Simple functions'; + +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions if combinator'; + +WITH (WatchID % 2 == 0) AS predicate +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; From 246ba45082e7ecea51e2afed57346a5dcc3bd4ad Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 15 Jun 2021 13:59:24 +0300 Subject: [PATCH 649/931] Added hits_100m_single benchmark --- tests/performance/jit_aggregate_functions.xml | 69 +++++++++++++++++-- 1 file changed, 63 insertions(+), 6 deletions(-) diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index 460b36288d0..ac623656f26 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -1,4 +1,8 @@ + + hits_100m_single + + CREATE TABLE jit_test_memory ( key UInt64, @@ -71,6 +75,13 @@ jit_test_merge_tree_nullable + + + group_scale + + 1000000 + + @@ -122,15 +133,61 @@ SELECT - {function}If(value_1), - {function}If(value_2), - {function}If(value_3), - {function}If(value_4), - {function}If(value_5) + {function}If(value_1, predicate), + {function}If(value_2, predicate), + {function}If(value_3, predicate), + {function}If(value_4, predicate), + {function}If(value_5, predicate) FROM {table} GROUP BY key FORMAT Null + + SELECT + {function}(WatchID), + {function}(CounterID), + {function}(ClientIP) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + SELECT + {function}(WatchID), + {function}(CounterID), + {function}(ClientIP), + {function}(GoodEvent), + {function}(CounterClass) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + WITH (WatchID % 2 == 0) AS predicate + SELECT + {function}If(WatchID, predicate), + {function}If(CounterID, predicate), + {function}If(ClientIP, predicate) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + WITH (WatchID % 2 == 0) AS predicate + SELECT + {function}If(WatchID, predicate), + {function}If(CounterID, predicate), + {function}If(ClientIP, predicate), + {function}If(GoodEvent, predicate), + {function}If(CounterClass, predicate) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + DROP TABLE IF EXISTS {table} - \ No newline at end of file + From fdf172a61eab53057723eddd16f75f2473592b7a Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 16 Jun 2021 12:12:23 +0300 Subject: [PATCH 650/931] Fixed tests --- src/Interpreters/Aggregator.cpp | 26 +++++++++++++++++++ ...t_aggregation_function_sum_long.reference} | 0 ...890_jit_aggregation_function_sum_long.sql} | 0 ...t_aggregation_function_any_long.reference} | 0 ...891_jit_aggregation_function_any_long.sql} | 0 ...regation_function_any_last_long.reference} | 0 ...it_aggregation_function_any_last_long.sql} | 0 ...t_aggregation_function_min_long.reference} | 0 ...893_jit_aggregation_function_min_long.sql} | 0 ...t_aggregation_function_max_long.reference} | 0 ...894_jit_aggregation_function_max_long.sql} | 0 ...t_aggregation_function_avg_long.reference} | 0 ...895_jit_aggregation_function_avg_long.sql} | 0 ...it_aggregation_function_if_long.reference} | 0 ...1896_jit_aggregation_function_if_long.sql} | 0 ...tion_function_avg_weighted_long.reference} | 0 ...ggregation_function_avg_weighted_long.sql} | 0 17 files changed, 26 insertions(+) rename tests/queries/0_stateless/{01890_jit_aggregation_function_sum.reference => 01890_jit_aggregation_function_sum_long.reference} (100%) rename tests/queries/0_stateless/{01890_jit_aggregation_function_sum.sql => 01890_jit_aggregation_function_sum_long.sql} (100%) rename tests/queries/0_stateless/{01891_jit_aggregation_function_any.reference => 01891_jit_aggregation_function_any_long.reference} (100%) rename tests/queries/0_stateless/{01891_jit_aggregation_function_any.sql => 01891_jit_aggregation_function_any_long.sql} (100%) rename tests/queries/0_stateless/{01892_jit_aggregation_function_any_last.reference => 01892_jit_aggregation_function_any_last_long.reference} (100%) rename tests/queries/0_stateless/{01892_jit_aggregation_function_any_last.sql => 01892_jit_aggregation_function_any_last_long.sql} (100%) rename tests/queries/0_stateless/{01893_jit_aggregation_function_min.reference => 01893_jit_aggregation_function_min_long.reference} (100%) rename tests/queries/0_stateless/{01893_jit_aggregation_function_min.sql => 01893_jit_aggregation_function_min_long.sql} (100%) rename tests/queries/0_stateless/{01894_jit_aggregation_function_max.reference => 01894_jit_aggregation_function_max_long.reference} (100%) rename tests/queries/0_stateless/{01894_jit_aggregation_function_max.sql => 01894_jit_aggregation_function_max_long.sql} (100%) rename tests/queries/0_stateless/{01895_jit_aggregation_function_avg.reference => 01895_jit_aggregation_function_avg_long.reference} (100%) rename tests/queries/0_stateless/{01895_jit_aggregation_function_avg.sql => 01895_jit_aggregation_function_avg_long.sql} (100%) rename tests/queries/0_stateless/{01896_jit_aggregation_function_if.reference => 01896_jit_aggregation_function_if_long.reference} (100%) rename tests/queries/0_stateless/{01896_jit_aggregation_function_if.sql => 01896_jit_aggregation_function_if_long.sql} (100%) rename tests/queries/0_stateless/{01897_jit_aggregation_function_avg_weighted.reference => 01897_jit_aggregation_function_avg_weighted_long.reference} (100%) rename tests/queries/0_stateless/{01897_jit_aggregation_function_avg_weighted.sql => 01897_jit_aggregation_function_avg_weighted_long.sql} (100%) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index d0d073f9905..bb69504b9f7 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -679,8 +679,27 @@ void NO_INLINE Aggregator::executeImplBatch( size_t compiled_functions_count = 0; #if USE_EMBEDDED_COMPILER + +#if defined(MEMORY_SANITIZER) + size_t compiled_functions_places_size = 0; +#endif + if constexpr (use_compiled_functions) + { compiled_functions_count = compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count; + +#if defined(MEMORY_SANITIZER) + + if (compiled_functions_count < offsets_of_aggregate_states.size()) + { + compiled_functions_places_size = offsets_of_aggregate_states[compiled_functions_count]; + } + else + { + compiled_functions_places_size = total_size_of_aggregate_states; + } +#endif + } #endif std::unique_ptr places(new AggregateDataPtr[rows]); @@ -704,8 +723,15 @@ void NO_INLINE Aggregator::executeImplBatch( #if USE_EMBEDDED_COMPILER if constexpr (use_compiled_functions) + { compiled_aggregate_functions_holder->compiled_aggregate_functions.create_aggregate_states_function(aggregate_data); + +#if defined(MEMORY_SANITIZER) + __msan_unpoison(aggregate_data, compiled_functions_places_size); #endif + } +#endif + createAggregateStates(compiled_functions_count, aggregate_data); emplace_result.setMapped(aggregate_data); diff --git a/tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference b/tests/queries/0_stateless/01890_jit_aggregation_function_sum_long.reference similarity index 100% rename from tests/queries/0_stateless/01890_jit_aggregation_function_sum.reference rename to tests/queries/0_stateless/01890_jit_aggregation_function_sum_long.reference diff --git a/tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql b/tests/queries/0_stateless/01890_jit_aggregation_function_sum_long.sql similarity index 100% rename from tests/queries/0_stateless/01890_jit_aggregation_function_sum.sql rename to tests/queries/0_stateless/01890_jit_aggregation_function_sum_long.sql diff --git a/tests/queries/0_stateless/01891_jit_aggregation_function_any.reference b/tests/queries/0_stateless/01891_jit_aggregation_function_any_long.reference similarity index 100% rename from tests/queries/0_stateless/01891_jit_aggregation_function_any.reference rename to tests/queries/0_stateless/01891_jit_aggregation_function_any_long.reference diff --git a/tests/queries/0_stateless/01891_jit_aggregation_function_any.sql b/tests/queries/0_stateless/01891_jit_aggregation_function_any_long.sql similarity index 100% rename from tests/queries/0_stateless/01891_jit_aggregation_function_any.sql rename to tests/queries/0_stateless/01891_jit_aggregation_function_any_long.sql diff --git a/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last_long.reference similarity index 100% rename from tests/queries/0_stateless/01892_jit_aggregation_function_any_last.reference rename to tests/queries/0_stateless/01892_jit_aggregation_function_any_last_long.reference diff --git a/tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql b/tests/queries/0_stateless/01892_jit_aggregation_function_any_last_long.sql similarity index 100% rename from tests/queries/0_stateless/01892_jit_aggregation_function_any_last.sql rename to tests/queries/0_stateless/01892_jit_aggregation_function_any_last_long.sql diff --git a/tests/queries/0_stateless/01893_jit_aggregation_function_min.reference b/tests/queries/0_stateless/01893_jit_aggregation_function_min_long.reference similarity index 100% rename from tests/queries/0_stateless/01893_jit_aggregation_function_min.reference rename to tests/queries/0_stateless/01893_jit_aggregation_function_min_long.reference diff --git a/tests/queries/0_stateless/01893_jit_aggregation_function_min.sql b/tests/queries/0_stateless/01893_jit_aggregation_function_min_long.sql similarity index 100% rename from tests/queries/0_stateless/01893_jit_aggregation_function_min.sql rename to tests/queries/0_stateless/01893_jit_aggregation_function_min_long.sql diff --git a/tests/queries/0_stateless/01894_jit_aggregation_function_max.reference b/tests/queries/0_stateless/01894_jit_aggregation_function_max_long.reference similarity index 100% rename from tests/queries/0_stateless/01894_jit_aggregation_function_max.reference rename to tests/queries/0_stateless/01894_jit_aggregation_function_max_long.reference diff --git a/tests/queries/0_stateless/01894_jit_aggregation_function_max.sql b/tests/queries/0_stateless/01894_jit_aggregation_function_max_long.sql similarity index 100% rename from tests/queries/0_stateless/01894_jit_aggregation_function_max.sql rename to tests/queries/0_stateless/01894_jit_aggregation_function_max_long.sql diff --git a/tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference b/tests/queries/0_stateless/01895_jit_aggregation_function_avg_long.reference similarity index 100% rename from tests/queries/0_stateless/01895_jit_aggregation_function_avg.reference rename to tests/queries/0_stateless/01895_jit_aggregation_function_avg_long.reference diff --git a/tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql b/tests/queries/0_stateless/01895_jit_aggregation_function_avg_long.sql similarity index 100% rename from tests/queries/0_stateless/01895_jit_aggregation_function_avg.sql rename to tests/queries/0_stateless/01895_jit_aggregation_function_avg_long.sql diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.reference b/tests/queries/0_stateless/01896_jit_aggregation_function_if_long.reference similarity index 100% rename from tests/queries/0_stateless/01896_jit_aggregation_function_if.reference rename to tests/queries/0_stateless/01896_jit_aggregation_function_if_long.reference diff --git a/tests/queries/0_stateless/01896_jit_aggregation_function_if.sql b/tests/queries/0_stateless/01896_jit_aggregation_function_if_long.sql similarity index 100% rename from tests/queries/0_stateless/01896_jit_aggregation_function_if.sql rename to tests/queries/0_stateless/01896_jit_aggregation_function_if_long.sql diff --git a/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted_long.reference similarity index 100% rename from tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.reference rename to tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted_long.reference diff --git a/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql b/tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted_long.sql similarity index 100% rename from tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted.sql rename to tests/queries/0_stateless/01897_jit_aggregation_function_avg_weighted_long.sql From 22491c8adc167c03337fd24389bae5f9b3cd5fa6 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 26 Jun 2021 18:12:33 +0300 Subject: [PATCH 651/931] Compile AggregateFunctionCount --- docker/test/fasttest/run.sh | 6 - .../AggregateFunctionCount.h | 125 ++++++++++++++ src/Interpreters/JIT/compileFunction.cpp | 1 - tests/performance/jit_aggregate_functions.xml | 1 + ..._aggregation_function_count_long.reference | 3 + ...25_jit_aggregation_function_count_long.sql | 15 ++ .../00165_jit_aggregate_functions.reference | 160 +++++++++--------- .../00165_jit_aggregate_functions.sql | 8 +- 8 files changed, 228 insertions(+), 91 deletions(-) create mode 100644 tests/queries/0_stateless/01925_jit_aggregation_function_count_long.reference create mode 100644 tests/queries/0_stateless/01925_jit_aggregation_function_count_long.sql diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 9fd2212e2dc..688dc0b455d 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -374,12 +374,6 @@ function run_tests # Depends on AWS 01801_s3_cluster - # Depends on LLVM JIT - 01072_nullable_jit - 01852_jit_if - 01865_jit_comparison_constant_result - 01871_merge_tree_compile_expressions - # needs psql 01889_postgresql_protocol_null_fields ) diff --git a/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h index 9886b4e6828..b5d5b69a1f4 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.h +++ b/src/AggregateFunctions/AggregateFunctionCount.h @@ -10,6 +10,15 @@ #include #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif + namespace DB { @@ -107,6 +116,62 @@ public: AggregateFunctionPtr getOwnNullAdapter( const AggregateFunctionPtr &, const DataTypes & types, const Array & params, const AggregateFunctionProperties & /*properties*/) const override; + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return true; + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData())); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector &) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + auto * count_value = b.CreateLoad(return_type, count_value_ptr); + auto * updated_count_value = b.CreateAdd(count_value, llvm::ConstantInt::get(return_type, 1)); + + b.CreateStore(updated_count_value, count_value_ptr); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * count_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo()); + auto * count_value_dst = b.CreateLoad(return_type, count_value_dst_ptr); + + auto * count_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo()); + auto * count_value_src = b.CreateLoad(return_type, count_value_src_ptr); + + auto * count_value_dst_updated = b.CreateAdd(count_value_dst, count_value_src); + + b.CreateStore(count_value_dst_updated, count_value_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + + return b.CreateLoad(return_type, count_value_ptr); + } + +#endif + }; @@ -155,6 +220,66 @@ public: { assert_cast(to).getData().push_back(data(place).count); } + + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + return true; + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData())); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector & values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * is_null_value = b.CreateExtractValue(values[0], {1}); + + auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + auto * count_value = b.CreateLoad(return_type, count_value_ptr); + auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1)); + auto * updated_count_value = b.CreateAdd(count_value, increment_value); + + b.CreateStore(updated_count_value, count_value_ptr); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * count_value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo()); + auto * count_value_dst = b.CreateLoad(return_type, count_value_dst_ptr); + + auto * count_value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo()); + auto * count_value_src = b.CreateLoad(return_type, count_value_src_ptr); + + auto * count_value_dst_updated = b.CreateAdd(count_value_dst, count_value_src); + + b.CreateStore(count_value_dst_updated, count_value_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + + return b.CreateLoad(return_type, count_value_ptr); + } + +#endif + }; } diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 25198bebca6..f2e017a0b43 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -528,7 +528,6 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons auto * aggregate_data_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), aggregate_data_place_phi); auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place, aggregate_function_offset); - auto column_type = functions[i].function->getArgumentTypes()[0]; auto * final_value = aggregate_function_ptr->compileGetResult(b, aggregation_place_with_offset); if (columns[i].null_init) diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index ac623656f26..cf64ee62fd1 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -63,6 +63,7 @@ avg any anyLast + count diff --git a/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.reference b/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.reference new file mode 100644 index 00000000000..f3b78aeb71e --- /dev/null +++ b/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.reference @@ -0,0 +1,3 @@ +0 40 20 +1 40 20 +2 40 20 diff --git a/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.sql b/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.sql new file mode 100644 index 00000000000..03a3c4f8b65 --- /dev/null +++ b/tests/queries/0_stateless/01925_jit_aggregation_function_count_long.sql @@ -0,0 +1,15 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + + value UInt8, + value_nullable Nullable(UInt8) +) ENGINE=TinyLog; + +INSERT INTO test_table SELECT number % 3, number, if (number % 2 == 0, number, NULL) FROM system.numbers LIMIT 120; +SELECT id, count(value), count(value_nullable) FROM test_table GROUP BY id ORDER BY id; +DROP TABLE test_table; diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference index f6dea736ccd..18894481e86 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference @@ -1,86 +1,86 @@ Aggregation using JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 23939 Aggregation without JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 23939 diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql index a0523186850..406e9857311 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -4,13 +4,13 @@ SET min_count_to_compile_aggregate_expression = 0; SELECT 'Aggregation using JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SET compile_aggregate_expressions = 0; @@ -19,11 +19,11 @@ SELECT 'Aggregation without JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; From 92a36e38f15ee2ce705ec8f35d769b65f84368d3 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 26 Jun 2021 19:26:32 +0300 Subject: [PATCH 652/931] AggregateFunctionAvgWeighted fix accuracy --- .../AggregateFunctionAvgWeighted.h | 48 +++--- .../00165_jit_aggregate_functions.reference | 160 +++++++++--------- .../00165_jit_aggregate_functions.sql | 8 +- 3 files changed, 106 insertions(+), 110 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index 71b0cb6a735..762aa23c21f 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -48,44 +48,40 @@ public: bool isCompilable() const override { - /// TODO: FIX - // bool can_be_compiled = Base::isCompilable(); - // can_be_compiled &= canBeNativeType(); + bool can_be_compiled = Base::isCompilable(); + can_be_compiled &= canBeNativeType(); - return false; + return can_be_compiled; } - // void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override - // { - /// TODO: FIX - // llvm::IRBuilder<> & b = static_cast &>(builder); + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); - // auto * numerator_type = toNativeType(b); + auto * numerator_type = toNativeType(b); - // auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); - // auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); + auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); + auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); - // const auto & argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); - // const auto & weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); + auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); - // llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight); + llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight); + auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication); + b.CreateStore(numerator_result_value, numerator_ptr); - // /// TODO: Fix accuracy - // auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication); - // b.CreateStore(numerator_result_value, numerator_ptr); + auto * denominator_type = toNativeType(b); - // auto * denominator_type = toNativeType(b); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - // auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); - // auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); + auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); - // auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); + auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); - // auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); - // auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); - - // b.CreateStore(denominator_value_updated, denominator_ptr); - // } + b.CreateStore(denominator_value_updated, denominator_ptr); + } #endif diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference index 18894481e86..dab1b1d10a8 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference @@ -1,86 +1,86 @@ Aggregation using JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 47782393610906.42 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 13207929959132.072 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 33882422470487.816 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 144395676294829.06 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 111553091480739.77 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 69940119305585.88 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 149997856481568.4 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 191637641678484.4 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 270118717805588.94 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 331337406660376.1 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 316843341306825.3 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 365076991941184.7 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 455302271742203.5 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 143744750966090.75 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 414236870552090.9 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 238607624382296.62 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 378452730629064.3 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 190228928613426.25 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 287000202568456.94 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 493926654425846.94 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 nan 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 nan 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 nan 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 nan 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 nan 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 nan 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 nan 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 nan 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 nan 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 nan 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 nan 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 nan 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 nan 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 nan 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 nan 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 nan 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 nan 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 nan 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 nan 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 nan 23939 Aggregation without JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 47782393610906.42 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 13207929959132.072 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 33882422470487.816 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 144395676294829.06 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 111553091480739.77 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 69940119305585.88 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 149997856481568.4 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 191637641678484.4 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 270118717805588.94 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 331337406660376.1 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 316843341306825.3 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 365076991941184.7 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 455302271742203.5 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 143744750966090.75 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 414236870552090.9 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 238607624382296.62 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 378452730629064.3 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 190228928613426.25 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 287000202568456.94 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 493926654425846.94 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 nan 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 nan 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 nan 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 nan 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 nan 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 nan 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 nan 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 nan 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 nan 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 nan 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 nan 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 nan 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 nan 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 nan 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 nan 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 nan 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 nan 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 nan 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 nan 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 nan 23939 diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql index 406e9857311..6994b7bdaa6 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -4,13 +4,13 @@ SET min_count_to_compile_aggregate_expression = 0; SELECT 'Aggregation using JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), count(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, if(WatchID % 2 == 0, 0, 1)), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), countIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), avgWeightedIf(WatchID, if(WatchID % 2 == 0, 0, 1), predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SET compile_aggregate_expressions = 0; @@ -19,11 +19,11 @@ SELECT 'Aggregation without JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), count(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, if(WatchID % 2 == 0, 0, 1)), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), countIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgWeightedIf(WatchID, if(WatchID % 2 == 0, 0, 1), predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; From 325b54f623fb551f1798184d19626be99ab3a0a0 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 27 Jun 2021 20:43:47 +0300 Subject: [PATCH 653/931] Aggregator compile only part of aggregate functions --- src/Interpreters/Aggregator.cpp | 164 ++++++++------ src/Interpreters/Aggregator.h | 5 +- .../00165_jit_aggregate_functions.reference | 202 +++++++++++------- .../00165_jit_aggregate_functions.sql | 18 +- 4 files changed, 241 insertions(+), 148 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index bb69504b9f7..73f6ed87a55 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -320,13 +320,15 @@ void Aggregator::compileAggregateFunctions() size_t aggregate_instructions_size = 0; String functions_description; + is_aggregate_function_compiled.resize(aggregate_functions.size()); + /// Add values to the aggregate functions. for (size_t i = 0; i < aggregate_functions.size(); ++i) { const auto * function = aggregate_functions[i]; size_t offset_of_aggregate_function = offsets_of_aggregate_states[i]; - if (function && function->isCompilable()) + if (function->isCompilable()) { AggregateFunctionWithOffset function_to_compile { @@ -338,13 +340,17 @@ void Aggregator::compileAggregateFunctions() functions_description += function->getDescription(); functions_description += ' '; + + functions_description += std::to_string(offset_of_aggregate_function); + functions_description += ' '; } ++aggregate_instructions_size; + is_aggregate_function_compiled[i] = function->isCompilable(); } /// TODO: Probably better to compile more than 2 functions - if (functions_to_compile.empty() || functions_to_compile.size() != aggregate_functions.size()) + if (functions_to_compile.empty()) return; SipHash aggregate_functions_description_hash; @@ -548,10 +554,15 @@ AggregatedDataVariants::Type Aggregator::chooseAggregationMethod() return AggregatedDataVariants::Type::serialized; } -void Aggregator::createAggregateStates(size_t aggregate_function_start_index, AggregateDataPtr & aggregate_data) const +template +void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const { - for (size_t j = aggregate_function_start_index; j < params.aggregates_size; ++j) + for (size_t j = 0; j < params.aggregates_size; ++j) { + if constexpr (skip_compiled_aggregate_functions) + if (is_aggregate_function_compiled[j]) + continue; + try { /** An exception may occur if there is a shortage of memory. @@ -563,18 +574,19 @@ void Aggregator::createAggregateStates(size_t aggregate_function_start_index, Ag catch (...) { for (size_t rollback_j = 0; rollback_j < j; ++rollback_j) + { + if constexpr (skip_compiled_aggregate_functions) + if (is_aggregate_function_compiled[j]) + continue; + aggregate_functions[rollback_j]->destroy(aggregate_data + offsets_of_aggregate_states[rollback_j]); + } throw; } } } -void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const -{ - createAggregateStates(0, aggregate_data); -} - /** It's interesting - if you remove `noinline`, then gcc for some reason will inline this function, and the performance decreases (~ 10%). * (Probably because after the inline of this function, more internal functions no longer be inlined.) * Inline does not make sense, since the inner loop is entirely inside this function. @@ -676,32 +688,6 @@ void NO_INLINE Aggregator::executeImplBatch( } } - size_t compiled_functions_count = 0; - -#if USE_EMBEDDED_COMPILER - -#if defined(MEMORY_SANITIZER) - size_t compiled_functions_places_size = 0; -#endif - - if constexpr (use_compiled_functions) - { - compiled_functions_count = compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count; - -#if defined(MEMORY_SANITIZER) - - if (compiled_functions_count < offsets_of_aggregate_states.size()) - { - compiled_functions_places_size = offsets_of_aggregate_states[compiled_functions_count]; - } - else - { - compiled_functions_places_size = total_size_of_aggregate_states; - } -#endif - } -#endif - std::unique_ptr places(new AggregateDataPtr[rows]); /// For all rows. @@ -724,15 +710,29 @@ void NO_INLINE Aggregator::executeImplBatch( #if USE_EMBEDDED_COMPILER if constexpr (use_compiled_functions) { - compiled_aggregate_functions_holder->compiled_aggregate_functions.create_aggregate_states_function(aggregate_data); + const auto & compiled_aggregate_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; + compiled_aggregate_functions.create_aggregate_states_function(aggregate_data); + if (compiled_aggregate_functions.functions_count != aggregate_functions.size()) + { + static constexpr bool skip_compiled_aggregate_functions = true; + createAggregateStates(aggregate_data); + } #if defined(MEMORY_SANITIZER) - __msan_unpoison(aggregate_data, compiled_functions_places_size); + for (size_t i = 0; i < aggregate_functions.size(); ++i) + { + if (!is_aggregate_function_compiled[i]) + continue; + + __msan_unpoison(aggregate_data + offsets_of_aggregate_states[i], params.aggregates[i].function->sizeOfData()); + } #endif } + else #endif - - createAggregateStates(compiled_functions_count, aggregate_data); + { + createAggregateStates(aggregate_data); + } emplace_result.setMapped(aggregate_data); } @@ -758,15 +758,17 @@ void NO_INLINE Aggregator::executeImplBatch( if constexpr (use_compiled_functions) { std::vector columns_data; - columns_data.reserve(aggregate_functions.size()); - for (size_t compiled_function_index = 0; compiled_function_index < compiled_functions_count; ++compiled_function_index) + for (size_t i = 0; i < aggregate_functions.size(); ++i) { - AggregateFunctionInstruction * inst = aggregate_instructions + compiled_function_index; + if (!is_aggregate_function_compiled[i]) + continue; + + AggregateFunctionInstruction * inst = aggregate_instructions + i; size_t arguments_size = inst->that->getArgumentTypes().size(); - for (size_t i = 0; i < arguments_size; ++i) - columns_data.emplace_back(getColumnData(inst->batch_arguments[i])); + for (size_t argument_index = 0; argument_index < arguments_size; ++argument_index) + columns_data.emplace_back(getColumnData(inst->batch_arguments[argument_index])); } auto add_into_aggregate_states_function = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function; @@ -775,9 +777,16 @@ void NO_INLINE Aggregator::executeImplBatch( #endif /// Add values to the aggregate functions. - AggregateFunctionInstruction * inst = aggregate_instructions + compiled_functions_count; - for (; inst->that; ++inst) + for (size_t i = 0; i < aggregate_functions.size(); ++i) { +#if USE_EMBEDDED_COMPILER + if constexpr (use_compiled_functions) + if (is_aggregate_function_compiled[i]) + continue; +#endif + + AggregateFunctionInstruction * inst = aggregate_instructions + i; + if (inst->offsets) inst->batch_that->addBatchArray(rows, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); else @@ -1360,10 +1369,11 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; - columns_data.reserve(final_aggregate_columns.size()); - - for (size_t i = 0; i < compiled_functions.functions_count; ++i) + for (size_t i = 0; i < params.aggregates_size; ++i) { + if (!is_aggregate_function_compiled[i]) + continue; + auto & final_aggregate_column = final_aggregate_columns[i]; final_aggregate_column = final_aggregate_column->cloneResized(places.size()); columns_data.emplace_back(getColumnData(final_aggregate_column.get())); @@ -1371,20 +1381,27 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto insert_aggregates_into_columns_function = compiled_functions.insert_aggregates_into_columns_function; insert_aggregates_into_columns_function(places.size(), columns_data.data(), places.data()); - - aggregate_functions_destroy_index = compiled_functions.functions_count; } #endif for (; aggregate_functions_destroy_index < params.aggregates_size;) { + if constexpr (use_compiled_functions) + { + if (is_aggregate_function_compiled[aggregate_functions_destroy_index]) + { + ++aggregate_functions_destroy_index; + continue; + } + } + auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index]; size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoAndDestroyBatch - * throws exception, it also must destroy all necessary states. - * Then code need to continue to destroy other aggregate function states with next function index. - */ + * throws exception, it also must destroy all necessary states. + * Then code need to continue to destroy other aggregate function states with next function index. + */ size_t destroy_index = aggregate_functions_destroy_index; ++aggregate_functions_destroy_index; @@ -1402,6 +1419,15 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( for (; aggregate_functions_destroy_index < params.aggregates_size; ++aggregate_functions_destroy_index) { + if constexpr (use_compiled_functions) + { + if (is_aggregate_function_compiled[aggregate_functions_destroy_index]) + { + ++aggregate_functions_destroy_index; + continue; + } + } + size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); } @@ -1840,22 +1866,36 @@ void NO_INLINE Aggregator::mergeDataImpl( { if (!inserted) { - size_t compiled_functions_count = 0; - #if USE_EMBEDDED_COMPILER if constexpr (use_compiled_functions) { const auto & compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions; compiled_functions.merge_aggregate_states_function(dst, src); - compiled_functions_count = compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count; + + if (compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count != params.aggregates_size) + { + for (size_t i = 0; i < params.aggregates_size; ++i) + { + if (!is_aggregate_function_compiled[i]) + aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); + } + + for (size_t i = 0; i < params.aggregates_size; ++i) + { + if (!is_aggregate_function_compiled[i]) + aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + } + } } + else #endif + { + for (size_t i = 0; i < params.aggregates_size; ++i) + aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); - for (size_t i = compiled_functions_count; i < params.aggregates_size; ++i) - aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena); - - for (size_t i = compiled_functions_count; i < params.aggregates_size; ++i) - aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + for (size_t i = 0; i < params.aggregates_size; ++i) + aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]); + } } else { diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 19600d6aeb9..bb36ae54a5d 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1089,6 +1089,8 @@ private: std::shared_ptr compiled_aggregate_functions_holder; #endif + std::vector is_aggregate_function_compiled; + /** Try to compile aggregate functions. */ void compileAggregateFunctions(); @@ -1098,8 +1100,7 @@ private: /** Create states of aggregate functions for one key. */ - void createAggregateStates(size_t aggregate_function_start_index, AggregateDataPtr & aggregate_data) const; - + template void createAggregateStates(AggregateDataPtr & aggregate_data) const; /** Call `destroy` methods for states of aggregate functions. diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference index dab1b1d10a8..2d94ad190ca 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference @@ -1,86 +1,128 @@ Aggregation using JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 47782393610906.42 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 13207929959132.072 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 33882422470487.816 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 144395676294829.06 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 111553091480739.77 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 69940119305585.88 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 149997856481568.4 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 191637641678484.4 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 270118717805588.94 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 331337406660376.1 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 316843341306825.3 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 365076991941184.7 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 455302271742203.5 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 143744750966090.75 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 414236870552090.9 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 238607624382296.62 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 378452730629064.3 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 190228928613426.25 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 287000202568456.94 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 493926654425846.94 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 +Simple functions with non compilable function +1704509 4611700827100483880 9223360787015464643 10441337359398154812 4611686018427387904 19954243669348.844 9648741.579254271 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 4611686018427387904 2054229034942.3723 51998323.94457991 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 4611686018427387904 27615161624211.875 12261797.824844675 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 4611686018427387904 27479710385933.586 53095331.60360441 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 4611686018427387904 85240848090850.69 22373416.533275086 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 4611686018427387904 67568783303242.086 3154349.826950714 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 4611686018427387904 170693446547158.72 201431892.4773785 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 4611686018427387904 94622946187035.42 1425270865.0901496 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 4611686018427387904 26532987929602.555 3695122.4062526934 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 4611686018427387904 233352070043266.62 36535786.81446395 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 4611686018427387904 68246505203164.63 6316535.831023813 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 4611686018427387904 185015319325648.16 9962165.34831339 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 4611686018427387904 255019232629204.38 7937191.271698021 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 4611686018427387904 38308020331864.36 14590240.469105456 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 4611686018427387904 214467085941034.7 7257521.096258734 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 4611686018427387904 231724477077663.4 4737362.521046629 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 4611686018427387904 141657635880324.8 1613795518.1065989 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 4611686018427387904 283531099960470.8 9938452.835998287 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 4611686018427387904 120762239817777.88 579655378.4603049 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 4611686018427387904 123712996438970.34 867841.595541967 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 nan 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 nan 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 nan 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 nan 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 nan 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 nan 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 nan 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 nan 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 nan 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 nan 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 nan 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 nan 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 nan 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 nan 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 nan 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 nan 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 nan 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 nan 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 nan 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 nan 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 2224726.7626273884 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 5898616.931652982 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 53771550.26565126 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 92835869.96920013 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 39794091.419183925 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 9276773.708181158 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 291083243.75407773 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 5925109959.715378 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 12412830.045471078 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 53535427.52018088 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 10496765.20741332 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 18966925.191309396 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 6271211.193812284 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 6885575.861759452 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 12220152.393889504 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 2482202.163802278 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 2515144222.953728 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 34845264.2080656 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 7825349797.6059 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 26049107.15514301 23939 Aggregation without JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 47782393610906.42 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 13207929959132.072 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 33882422470487.816 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 144395676294829.06 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 111553091480739.77 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 69940119305585.88 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 149997856481568.4 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 191637641678484.4 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 270118717805588.94 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 331337406660376.1 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 316843341306825.3 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 365076991941184.7 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 455302271742203.5 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 143744750966090.75 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 414236870552090.9 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 238607624382296.62 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 378452730629064.3 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 190228928613426.25 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 287000202568456.94 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 493926654425846.94 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 +Simple functions with non compilable function +1704509 4611700827100483880 9223360787015464643 10441337359398154812 4611686018427387904 19954243669348.844 9648741.579254271 523264 +732797 4611701940806302259 9223355550934604746 977192643464016658 4611686018427387904 2054229034942.3723 51998323.94457991 475698 +598875 4611701407242345792 9223362250391155632 9312163881623734456 4611686018427387904 27615161624211.875 12261797.824844675 337212 +792887 4611699550286611812 9223290551912005343 6930300520201292824 4611686018427387904 27479710385933.586 53095331.60360441 252197 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 4611686018427387904 85240848090850.69 22373416.533275086 196036 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 4611686018427387904 67568783303242.086 3154349.826950714 147211 +716829 4611852156092872082 9223361623076951140 15381015774917924786 4611686018427387904 170693446547158.72 201431892.4773785 90109 +59183 4611730685242027332 9223354909338698162 8078812522502896568 4611686018427387904 94622946187035.42 1425270865.0901496 85379 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 4611686018427387904 26532987929602.555 3695122.4062526934 77807 +800784 4611752907938305166 9223340418389788041 18082918611792817587 4611686018427387904 233352070043266.62 36535786.81446395 77492 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 4611686018427387904 68246505203164.63 6316535.831023813 73213 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 4611686018427387904 185015319325648.16 9962165.34831339 68945 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 4611686018427387904 255019232629204.38 7937191.271698021 67570 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 4611686018427387904 38308020331864.36 14590240.469105456 64174 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 4611686018427387904 214467085941034.7 7257521.096258734 60456 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 4611686018427387904 231724477077663.4 4737362.521046629 58389 +170282 4611833225706935900 9223371583739401906 8076893424988479310 4611686018427387904 141657635880324.8 1613795518.1065989 57017 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 4611686018427387904 283531099960470.8 9938452.835998287 52345 +63469 4611695097019173921 9223353530156141191 6296784708578574520 4611686018427387904 120762239817777.88 579655378.4603049 52142 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 4611686018427387904 123712996438970.34 867841.595541967 47758 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 nan 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 nan 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 nan 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 nan 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 nan 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 nan 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 nan 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 nan 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 nan 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 nan 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 nan 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 nan 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 nan 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 nan 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 nan 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 nan 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 nan 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 nan 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 nan 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 nan 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 2224726.7626273884 261874 +732797 4611721382223060002 9223355550934604746 16281585268876620522 5898616.931652982 237784 +598875 4611701407242345792 9223362250391155632 3577699408183553052 53771550.26565126 167966 +792887 4611699550286611812 9223164887726235740 7088177025760385824 92835869.96920013 125539 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 39794091.419183925 97845 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 9276773.708181158 73368 +716829 4611852156092872082 9223361623076951140 8613712481895484190 291083243.75407773 44993 +59183 4611730685242027332 9223354909338698162 18369075291092794110 5925109959.715378 42817 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 12412830.045471078 38861 +800784 4611752907938305166 9223309994342931384 5251877538869750510 53535427.52018088 38767 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 10496765.20741332 36477 +25843850 4611744529689964352 9223346023778617822 127137885677350808 18966925.191309396 34353 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 6271211.193812284 33768 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 6885575.861759452 32156 +32077710 4612033458080771112 9223352444952988904 421072759851674408 12220152.393889504 30172 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 2482202.163802278 29249 +170282 4611833225706935900 9223371583739401906 15764226366913732386 2515144222.953728 28587 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 34845264.2080656 25993 +63469 4612175339998036670 9222961628400798084 17239621485933250238 7825349797.6059 25996 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 26049107.15514301 23939 diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql index 6994b7bdaa6..90917209d1b 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -2,15 +2,21 @@ SET compile_aggregate_expressions = 1; SET min_count_to_compile_aggregate_expression = 0; SELECT 'Aggregation using JIT compilation'; + SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, if(WatchID % 2 == 0, 0, 1)), count(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions with non compilable function'; + +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), groupBitAnd(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), avgWeightedIf(WatchID, if(WatchID % 2 == 0, 0, 1), predicate), countIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), avgWeightedIf(WatchID, CounterID, predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SET compile_aggregate_expressions = 0; @@ -19,11 +25,15 @@ SELECT 'Aggregation without JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, if(WatchID % 2 == 0, 0, 1)), count(WatchID) FROM test.hits +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions with non compilable function'; +SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), groupBitAnd(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgWeightedIf(WatchID, if(WatchID % 2 == 0, 0, 1), predicate), countIf(WatchID, predicate) FROM test.hits +SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgWeightedIf(WatchID, CounterID, predicate), countIf(WatchID, predicate) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; From f33a38381d48673860307cc7a96adf09dd871941 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 27 Jun 2021 23:48:57 +0300 Subject: [PATCH 654/931] Fixed tests --- src/AggregateFunctions/AggregateFunctionCount.h | 13 +++++++++++-- src/Interpreters/Aggregator.cpp | 8 +++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h index b5d5b69a1f4..1055036bf52 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.h +++ b/src/AggregateFunctions/AggregateFunctionCount.h @@ -121,7 +121,11 @@ public: bool isCompilable() const override { - return true; + bool is_compilable = true; + for (const auto & argument_type : argument_types) + is_compilable &= canBeNativeType(*argument_type); + + return is_compilable; } void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override @@ -226,7 +230,12 @@ public: bool isCompilable() const override { - return true; + bool is_compilable = true; + for (const auto & argument_type : argument_types) + is_compilable &= canBeNativeType(*argument_type); + + + return is_compilable; } void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 73f6ed87a55..1445c2e7c9e 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -719,12 +719,14 @@ void NO_INLINE Aggregator::executeImplBatch( } #if defined(MEMORY_SANITIZER) - for (size_t i = 0; i < aggregate_functions.size(); ++i) + for (size_t aggregate_function_index = 0; aggregate_function_index < aggregate_functions.size(); ++aggregate_function_index) { - if (!is_aggregate_function_compiled[i]) + if (!is_aggregate_function_compiled[aggregate_function_index]) continue; - __msan_unpoison(aggregate_data + offsets_of_aggregate_states[i], params.aggregates[i].function->sizeOfData()); + auto aggregate_data_with_offset = aggregate_data + offsets_of_aggregate_states[aggregate_function_index]; + auto data_size = params.aggregates[aggregate_function_index].function->sizeOfData(); + __msan_unpoison(aggregate_data_with_offset, data_size); } #endif } From 31fe3dad7ac2a70506dd579dbfd4f120f398c8ee Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 28 Jun 2021 11:25:26 +0300 Subject: [PATCH 655/931] Fixed tests --- src/AggregateFunctions/AggregateFunctionSum.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index 49f95781994..4be2455d71e 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -400,9 +400,15 @@ public: if constexpr (Type == AggregateFunctionTypeSumKahan) return false; - auto return_type = getReturnType(); + bool can_be_compiled = true; - return canBeNativeType(*return_type); + for (const auto & argument_type : this->argument_types) + can_be_compiled &= canBeNativeType(*argument_type); + + auto return_type = getReturnType(); + can_be_compiled &= canBeNativeType(*return_type); + + return can_be_compiled; } void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override From 39ca0ceb81000efba0bfdb7f01137312435cdddf Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 29 Jun 2021 01:55:12 +0300 Subject: [PATCH 656/931] Fixed tests --- .../AggregateFunctionIf.cpp | 9 +++++++-- .../AggregateFunctionNull.h | 19 ++++++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index ee86a54739c..a4108199b16 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -135,7 +135,10 @@ public: b.CreateBr(join_block); b.SetInsertPoint(if_not_null); - b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + + if constexpr (result_is_nullable) + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); b.CreateBr(join_block); @@ -283,7 +286,9 @@ public: b.SetInsertPoint(if_true); - b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + if constexpr (result_is_nullable) + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values); b.CreateBr(join_block); diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index 443c2c2968d..b7a67f2cc1b 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -217,8 +217,12 @@ public: if constexpr (result_is_nullable) { - auto align_of_data = llvm::assumeAligned(this->alignOfData()); - b.CreateMemCpy(aggregate_data_dst_ptr, align_of_data, aggregate_data_src_ptr, align_of_data, this->prefix_size); + auto * aggregate_data_is_null_dst_value = b.CreateLoad(aggregate_data_dst_ptr); + auto * aggregate_data_is_null_src_value = b.CreateLoad(aggregate_data_src_ptr); + + auto * is_src_null = nativeBoolCast(b, std::make_shared(), aggregate_data_is_null_src_value); + auto * is_null_result_value = b.CreateSelect(is_src_null, llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_is_null_dst_value); + b.CreateStore(is_null_result_value, aggregate_data_dst_ptr); } auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, this->prefix_size); @@ -343,13 +347,15 @@ public: b.CreateBr(join_block); b.SetInsertPoint(if_not_null); - b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + + if constexpr (result_is_nullable) + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); b.CreateBr(join_block); b.SetInsertPoint(join_block); - } #endif @@ -469,7 +475,10 @@ public: b.CreateBr(join_block); b.SetInsertPoint(if_not_null); - b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + + if constexpr (result_is_nullable) + b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values); b.CreateBr(join_block); From 4bd398c0e5211e334255ac73325e19e030bfb68b Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 29 Jun 2021 13:04:14 +0300 Subject: [PATCH 657/931] Fixed tests --- src/Interpreters/Aggregator.cpp | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 1445c2e7c9e..9afcc5c1d82 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -313,7 +313,7 @@ void Aggregator::compileAggregateFunctions() static std::unordered_map aggregate_functions_description_to_count; static std::mutex mtx; - if (!params.compile_aggregate_expressions || params.overflow_row) + if (!params.compile_aggregate_expressions) return; std::vector functions_to_compile; @@ -618,16 +618,7 @@ void NO_INLINE Aggregator::executeImpl( } else { -#if USE_EMBEDDED_COMPILER - if (compiled_aggregate_functions_holder) - { - executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); - } - else -#endif - { - executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); - } + executeImplBatch(method, state, aggregates_pool, rows, aggregate_instructions, overflow_row); } } @@ -1239,7 +1230,8 @@ void Aggregator::convertToBlockImpl( #if USE_EMBEDDED_COMPILER if (compiled_aggregate_functions_holder) { - convertToBlockImplFinal(method, data, std::move(raw_key_columns), final_aggregate_columns, arena); + static constexpr bool use_compiled_functions = !Method::low_cardinality_optimization; + convertToBlockImplFinal(method, data, std::move(raw_key_columns), final_aggregate_columns, arena); } else #endif From 97e1ddbe947ac45993c2a416a8f79a929c686a11 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 30 Jun 2021 14:44:45 +0300 Subject: [PATCH 658/931] Fix before merge --- src/AggregateFunctions/AggregateFunctionAvg.h | 19 ---- .../AggregateFunctionAvgWeighted.h | 6 +- .../AggregateFunctionCount.h | 2 +- .../AggregateFunctionIf.cpp | 3 +- src/AggregateFunctions/IAggregateFunction.h | 41 ++++++-- src/Interpreters/Aggregator.cpp | 18 ++-- src/Interpreters/JIT/compileFunction.cpp | 2 +- src/Interpreters/JIT/compileFunction.h | 11 ++- tests/performance/jit_aggregate_functions.xml | 99 +++++++++++++++++++ 9 files changed, 154 insertions(+), 47 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index e8f29a88af9..3835fd58c77 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -168,25 +168,6 @@ public: b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(Fraction), llvm::assumeAligned(this->alignOfData())); } - void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override - { - llvm::IRBuilder<> & b = static_cast &>(builder); - - auto * numerator_type = toNativeType(b); - - auto * numerator_ptr = b.CreatePointerCast(aggregate_data_ptr, numerator_type->getPointerTo()); - auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr); - auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type); - auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator); - b.CreateStore(numerator_result_value, numerator_ptr); - - auto * denominator_type = toNativeType(b); - static constexpr size_t denominator_offset = offsetof(Fraction, denominator); - auto * denominator_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset), denominator_type->getPointerTo()); - auto * denominator_value_updated = b.CreateAdd(b.CreateLoad(denominator_type, denominator_ptr), llvm::ConstantInt::get(denominator_type, 1)); - b.CreateStore(denominator_value_updated, denominator_ptr); - } - void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override { llvm::IRBuilder<> & b = static_cast &>(builder); diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index 762aa23c21f..80e18f1a141 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -30,6 +30,7 @@ public: using Numerator = typename Base::Numerator; using Denominator = typename Base::Denominator; + using Fraction = typename Base::Fraction; void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { @@ -72,10 +73,11 @@ public: auto * denominator_type = toNativeType(b); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, sizeof(Numerator)); + static constexpr size_t denominator_offset = offsetof(Fraction, denominator); + auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset); auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); - auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], numerator_type); + auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type); auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr); auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator); diff --git a/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h index 1055036bf52..0b4052c987b 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.h +++ b/src/AggregateFunctions/AggregateFunctionCount.h @@ -251,10 +251,10 @@ public: auto * return_type = toNativeType(b, getReturnType()); auto * is_null_value = b.CreateExtractValue(values[0], {1}); + auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1)); auto * count_value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); auto * count_value = b.CreateLoad(return_type, count_value_ptr); - auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1)); auto * updated_count_value = b.CreateAdd(count_value, increment_value); b.CreateStore(updated_count_value, count_value_ptr); diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index a4108199b16..e7c48c8988c 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -183,7 +183,6 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override { - std::cerr << "AggregateFunctionIfNullVariadic::add" << std::endl; /// This container stores the columns we really pass to the nested function. const IColumn * nested_columns[number_of_arguments]; @@ -215,6 +214,8 @@ public: void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override { + /// TODO: Check + llvm::IRBuilder<> & b = static_cast &>(builder); size_t arguments_size = arguments_types.size(); diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 446850e83c3..4085675598b 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -220,15 +220,15 @@ public: /** Insert result of aggregate function into places with batch size. * Also all places must be destroyed if there was exception during insert. - * If destroy_place_after_insert == true. Then client must not destroy aggregate place if insert does not throw exception. + * If destroy_place is true. Then client must destroy aggregate places if insert throws exception. */ - virtual void insertResultIntoAndDestroyBatch( + virtual void insertResultIntoBatch( size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, - bool destroy_place_after_insert) const = 0; + bool destroy_place) const = 0; /** Destroy batch of aggregate places. */ @@ -270,6 +270,7 @@ public: // of true window functions, so this hack-ish interface suffices. virtual bool isOnlyWindowFunction() const { return false; } + /// Description of AggregateFunction in form of name(argument_types)(parameters). virtual String getDescription() const { String description; @@ -291,34 +292,55 @@ public: description += ')'; + description += '('; + + for (const auto & parameter : parameters) + { + description += parameter.dump(); + description += ", "; + } + + if (!parameters.empty()) + { + description.pop_back(); + description.pop_back(); + } + + description += ')'; + return description; } - #if USE_EMBEDDED_COMPILER +#if USE_EMBEDDED_COMPILER + /// Is function JIT compilable virtual bool isCompilable() const { return false; } + /// compileCreate should generate code for initialization of aggregate function state in aggregate_data_ptr virtual void compileCreate(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } + /// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector & /*arguments_values*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } + /// compileMerge should generate code for merging aggregate function states stored in aggregate_data_dst_ptr and aggregate_data_src_ptr virtual void compileMerge(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_dst_ptr*/, llvm::Value * /*aggregate_data_src_ptr*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } + /// compileGetResult should generate code for getting result value from aggregate function state stored in aggregate_data_ptr virtual llvm::Value * compileGetResult(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const { throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); } - #endif +#endif protected: DataTypes argument_types; @@ -495,7 +517,7 @@ public: } } - void insertResultIntoAndDestroyBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, bool destroy_place_after_insert) const override + void insertResultIntoBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, bool destroy_place) const override { size_t batch_index = 0; @@ -505,14 +527,15 @@ public: { static_cast(this)->insertResultInto(places[batch_index] + place_offset, to, arena); - if (destroy_place_after_insert) + if (destroy_place) static_cast(this)->destroy(places[batch_index] + place_offset); } } catch (...) { - for (; batch_index < batch_size; ++batch_index) - static_cast(this)->destroy(places[batch_index] + place_offset); + for (size_t destroy_index = 0; destroy_index < batch_index; ++destroy_index) + if (destroy_place) + static_cast(this)->destroy(places[batch_index] + place_offset); throw; } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 9afcc5c1d82..800145cf330 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1,7 +1,6 @@ #include #include -#include #include #include #include @@ -233,13 +232,6 @@ public: ~CompiledAggregateFunctionsHolder() override { - std::string symbol_names; - for (const auto & [name, _] : compiled_aggregate_functions.compiled_module.function_name_to_symbol) - { - symbol_names += name; - symbol_names += ' '; - } - getJITInstance().deleteCompiledModule(compiled_aggregate_functions.compiled_module); } @@ -1400,10 +1392,9 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( ++aggregate_functions_destroy_index; bool is_state = aggregate_functions[destroy_index]->isState(); - bool destroy_place_after_insert = !is_state; + bool destroy_place = !is_state; - aggregate_functions[destroy_index]->insertResultIntoAndDestroyBatch( - places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert); + aggregate_functions[destroy_index]->insertResultIntoBatch(places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place); } } catch (...) @@ -1423,7 +1414,10 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( } size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); + + bool is_state = aggregate_functions[aggregate_functions_destroy_index]->isState(); + if (!is_state) + aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); } if (exception) diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index f2e017a0b43..766c2290e42 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -581,7 +581,7 @@ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vect auto create_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[create_aggregate_states_functions_name]); auto add_into_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[add_aggregate_states_functions_name]); auto merge_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[merge_aggregate_states_functions_name]); - auto insert_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[insert_aggregate_states_functions_name]); + auto insert_aggregate_states_function = reinterpret_cast(compiled_module.function_name_to_symbol[insert_aggregate_states_functions_name]); assert(create_aggregate_states_function); assert(add_into_aggregate_states_function); diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 2a2f2a0d20a..92c8cd93b35 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -55,19 +55,26 @@ struct AggregateFunctionWithOffset using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr); using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr); -using JITInsertAggregatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); +using JITInsertAggregateStatesIntoColumnsFunction = void (*)(ColumnDataRowsSize, ColumnData *, AggregateDataPtr *); struct CompiledAggregateFunctions { JITCreateAggregateStatesFunction create_aggregate_states_function; JITAddIntoAggregateStatesFunction add_into_aggregate_states_function; JITMergeAggregateStatesFunction merge_aggregate_states_function; - JITInsertAggregatesIntoColumnsFunction insert_aggregates_into_columns_function; + JITInsertAggregateStatesIntoColumnsFunction insert_aggregates_into_columns_function; size_t functions_count; CHJIT::CompiledModule compiled_module; }; +/** Compile aggregate function to native jit code using CHJIT instance. + * + * JITCreateAggregateStatesFunction will initialize aggregate data ptr with initial aggregate states values. + * JITAddIntoAggregateStatesFunction will update aggregate states for aggregate functions with specified ColumnData. + * JITMergeAggregateStatesFunction will merge aggregate states for aggregate functions. + * JITInsertAggregateStatesIntoColumnsFunction will insert aggregate states for aggregate functions into result columns. + */ CompiledAggregateFunctions compileAggregateFunctons(CHJIT & jit, const std::vector & functions, std::string functions_dump_name); } diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index cf64ee62fd1..31b621f7258 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -110,6 +110,17 @@ FORMAT Null + + SELECT + {function}(value_1), + {function}(value_2), + groupBitAnd(value_3), + {function}(value_3) + FROM {table} + GROUP BY key + FORMAT Null + + SELECT {function}If(value_1, predicate), @@ -120,6 +131,17 @@ FORMAT Null + + SELECT + {function}If(value_1, predicate), + {function}If(value_2, predicate), + groupBitAndIf(value_3, predicate), + {function}If(value_3, predicate) + FROM {table} + GROUP BY key + FORMAT Null + + SELECT {function}(value_1), @@ -132,6 +154,19 @@ FORMAT Null + + SELECT + {function}(value_1), + {function}(value_2), + groupBitAnd(value_3), + {function}(value_3), + {function}(value_4), + {function}(value_5) + FROM {table} + GROUP BY key + FORMAT Null + + SELECT {function}If(value_1, predicate), @@ -144,6 +179,20 @@ FORMAT Null + + SELECT + {function}If(value_1, predicate), + {function}If(value_2, predicate), + groupBitAndIf(value_3, predicate), + {function}If(value_3, predicate), + {function}If(value_4, predicate), + {function}If(value_5, predicate) + FROM {table} + GROUP BY key + FORMAT Null + + + SELECT {function}(WatchID), @@ -158,6 +207,30 @@ SELECT {function}(WatchID), {function}(CounterID), + groupBitAnd(ClientIP), + {function}(ClientIP) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + SELECT + {function}(WatchID), + {function}(CounterID), + {function}(ClientIP), + {function}(GoodEvent), + {function}(CounterClass) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + SELECT + {function}(WatchID), + {function}(CounterID), + groupBitAnd(ClientIP), {function}(ClientIP), {function}(GoodEvent), {function}(CounterClass) @@ -182,6 +255,32 @@ SELECT {function}If(WatchID, predicate), {function}If(CounterID, predicate), + groupBitAndIf(ClientIP, predicate), + {function}If(ClientIP, predicate) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + WITH (WatchID % 2 == 0) AS predicate + SELECT + {function}If(WatchID, predicate), + {function}If(CounterID, predicate), + {function}If(ClientIP, predicate), + {function}If(GoodEvent, predicate), + {function}If(CounterClass, predicate) + FROM hits_100m_single + GROUP BY intHash32(UserID) % {group_scale} + FORMAT Null + + + + WITH (WatchID % 2 == 0) AS predicate + SELECT + {function}If(WatchID, predicate), + {function}If(CounterID, predicate), + groupBitAndIf(ClientIP, predicate), {function}If(ClientIP, predicate), {function}If(GoodEvent, predicate), {function}If(CounterClass, predicate) From 012f67e6f865f81ec15c08fde846885e6eb63885 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 1 Jul 2021 23:29:00 +0300 Subject: [PATCH 659/931] Update libpq --- contrib/libpq | 2 +- contrib/libpq-cmake/CMakeLists.txt | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/contrib/libpq b/contrib/libpq index c7624588ddd..69e8a80e98f 160000 --- a/contrib/libpq +++ b/contrib/libpq @@ -1 +1 @@ -Subproject commit c7624588ddd84f153dd5990e81b886e4568bddde +Subproject commit 69e8a80e98f27e3a5deec617334e31db2b9ed7d7 diff --git a/contrib/libpq-cmake/CMakeLists.txt b/contrib/libpq-cmake/CMakeLists.txt index 028fabe52b8..4f6a1554d10 100644 --- a/contrib/libpq-cmake/CMakeLists.txt +++ b/contrib/libpq-cmake/CMakeLists.txt @@ -8,7 +8,7 @@ set(SRCS "${LIBPQ_SOURCE_DIR}/fe-lobj.c" "${LIBPQ_SOURCE_DIR}/fe-misc.c" "${LIBPQ_SOURCE_DIR}/fe-print.c" - "${LIBPQ_SOURCE_DIR}/fe-protocol2.c" + "${LIBPQ_SOURCE_DIR}/fe-trace.c" "${LIBPQ_SOURCE_DIR}/fe-protocol3.c" "${LIBPQ_SOURCE_DIR}/fe-secure.c" "${LIBPQ_SOURCE_DIR}/fe-secure-common.c" @@ -18,8 +18,12 @@ set(SRCS "${LIBPQ_SOURCE_DIR}/pqexpbuffer.c" "${LIBPQ_SOURCE_DIR}/common/scram-common.c" - "${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c" + "${LIBPQ_SOURCE_DIR}/common/sha2.c" + "${LIBPQ_SOURCE_DIR}/common/sha1.c" "${LIBPQ_SOURCE_DIR}/common/md5.c" + "${LIBPQ_SOURCE_DIR}/common/md5_common.c" + "${LIBPQ_SOURCE_DIR}/common/hmac_openssl.c" + "${LIBPQ_SOURCE_DIR}/common/cryptohash.c" "${LIBPQ_SOURCE_DIR}/common/saslprep.c" "${LIBPQ_SOURCE_DIR}/common/unicode_norm.c" "${LIBPQ_SOURCE_DIR}/common/ip.c" From 531e48afa34409fdb942b22aaa939816f17ef346 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 2 Jul 2021 00:39:41 +0300 Subject: [PATCH 660/931] Update 01923_network_receive_time_metric_insert.sh --- .../0_stateless/01923_network_receive_time_metric_insert.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index 2a9807af10d..bcb3775f86a 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -11,7 +11,7 @@ seq 1 1000 | pv --quiet --rate-limit 1000 | ${CLICKHOUSE_CLIENT} --query "INSERT # We check that the value of NetworkReceiveElapsedMicroseconds correctly includes the time spent waiting data from the client. ${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; - WITH ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')] AS time + WITH ProfileEvents['NetworkReceiveElapsedMicroseconds'] AS time SELECT time >= 1000000 ? 1 : time FROM system.query_log WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" From 2866d45681e6dd3b9429d5fd4347b1fb2216da98 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 02:01:13 +0300 Subject: [PATCH 661/931] Add support for queries with `null` quoted identifier and ON CLUSTER --- src/Common/StringUtils/StringUtils.h | 6 +++++- .../0_stateless/01932_null_valid_identifier.reference | 3 +++ tests/queries/0_stateless/01932_null_valid_identifier.sql | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01932_null_valid_identifier.reference create mode 100644 tests/queries/0_stateless/01932_null_valid_identifier.sql diff --git a/src/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h index 20c0a5ca380..f6ad61f8fd9 100644 --- a/src/Common/StringUtils/StringUtils.h +++ b/src/Common/StringUtils/StringUtils.h @@ -149,7 +149,11 @@ inline bool isPunctuationASCII(char c) inline bool isValidIdentifier(const std::string_view & str) { - return !str.empty() && isValidIdentifierBegin(str[0]) && std::all_of(str.begin() + 1, str.end(), isWordCharASCII); + return !str.empty() + && isValidIdentifierBegin(str[0]) + && std::all_of(str.begin() + 1, str.end(), isWordCharASCII) + /// NULL is not a valid identifier in SQL, any case. + && !(str.size() == strlen("null") && 0 == strncasecmp(str.data(), "null", strlen("null"))); } /// Works assuming isAlphaASCII. diff --git a/tests/queries/0_stateless/01932_null_valid_identifier.reference b/tests/queries/0_stateless/01932_null_valid_identifier.reference new file mode 100644 index 00000000000..8600160f48c --- /dev/null +++ b/tests/queries/0_stateless/01932_null_valid_identifier.reference @@ -0,0 +1,3 @@ +1 +1 +1 \N diff --git a/tests/queries/0_stateless/01932_null_valid_identifier.sql b/tests/queries/0_stateless/01932_null_valid_identifier.sql new file mode 100644 index 00000000000..31f1a771675 --- /dev/null +++ b/tests/queries/0_stateless/01932_null_valid_identifier.sql @@ -0,0 +1,3 @@ +SELECT `null` FROM remote('127.0.0.2', view(SELECT 1 AS `null`)); +SELECT `NULL` FROM remote('127.0.0.2', view(SELECT 1 AS `NULL`)); +SELECT `nULl`, null FROM remote('127.0.0.2', view(SELECT 1 AS `nULl`)); From e6f0997924cfd3f80cb842550d160eec793f4d57 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 02:01:52 +0300 Subject: [PATCH 662/931] Remove Arcadia --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index afd11cb5a7d..8453094cc65 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -251,3 +251,4 @@ 01924_argmax_bitmap_state 01914_exchange_dictionaries 01923_different_expression_name_alias +01932_null_valid_identifier From 63c71a7b4b330dc18ca57ded95364be6e5c758de Mon Sep 17 00:00:00 2001 From: Ilya Golshtein Date: Fri, 2 Jul 2021 02:12:31 +0300 Subject: [PATCH 663/931] kerberized HDFS test fix if run in parallel --- tests/integration/helpers/cluster.py | 9 ++++++--- .../hdfs_configs/bootstrap.sh | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index bd2f7d2bd8a..0799c8ed0e8 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1188,15 +1188,18 @@ class ClickHouseCluster: time.sleep(1) - def wait_hdfs_to_start(self, timeout=300): + def wait_hdfs_to_start(self, timeout=300, check_marker=False): start = time.time() while time.time() - start < timeout: try: self.hdfs_api.write_data("/somefilewithrandomname222", "1") logging.debug("Connected to HDFS and SafeMode disabled! ") + if check_marker: + self.hdfs_api.read_data("/preparations_done_marker") + return except Exception as ex: - logging.exception("Can't connect to HDFS " + str(ex)) + logging.exception("Can't connect to HDFS or preparations are not done yet " + str(ex)) time.sleep(1) raise Exception("Can't wait HDFS to start") @@ -1443,7 +1446,7 @@ class ClickHouseCluster: os.chmod(self.hdfs_kerberized_logs_dir, stat.S_IRWXO) run_and_check(self.base_kerberized_hdfs_cmd + common_opts) self.make_hdfs_api(kerberized=True) - self.wait_hdfs_to_start() + self.wait_hdfs_to_start(check_marker=True) if self.with_mongo and self.base_mongo_cmd: logging.debug('Setup Mongo') diff --git a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh b/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh index 971491d4053..769056d70b3 100755 --- a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh +++ b/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh @@ -242,6 +242,7 @@ sleep 3 /usr/local/hadoop/bin/hdfs dfs -mkdir /user/specuser /usr/local/hadoop/bin/hdfs dfs -chown specuser /user/specuser +echo "chown_completed" | /usr/local/hadoop/bin/hdfs dfs -appendToFile - /preparations_done_marker kdestroy From 7a993404b4d222884bedf4a933f999213be48b5c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 02:30:18 +0300 Subject: [PATCH 664/931] Whitespace --- programs/benchmark/Benchmark.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index c8f1a4eef47..859222c236e 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -37,6 +37,7 @@ #include #include + namespace fs = std::filesystem; /** A tool for evaluating ClickHouse performance. From b33d91412db32a9352c481f2e7d6b10f79bdb6c9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 03:07:23 +0300 Subject: [PATCH 665/931] Correctly throw exception on invalid dates --- src/IO/ReadHelpers.cpp | 2 +- src/IO/ReadHelpers.h | 40 +++++++++++++------ .../0_stateless/01933_invalid_date.reference | 1 + .../0_stateless/01933_invalid_date.sql | 10 +++++ 4 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/01933_invalid_date.reference create mode 100644 tests/queries/0_stateless/01933_invalid_date.sql diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 8e9a14a20fb..2a5594a6866 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -765,7 +765,7 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) auto ignore_delimiter = [&] { - if (!buf.eof()) + if (!buf.eof() && !isNumericASCII(*buf.position())) { ++buf.position(); return true; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index d4e2db0b553..4e101aaaf63 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -572,27 +572,43 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) /// Optimistic path, when whole value is in buffer. if (!buf.eof() && buf.position() + 10 <= buf.buffer().end()) { - UInt16 year = (buf.position()[0] - '0') * 1000 + (buf.position()[1] - '0') * 100 + (buf.position()[2] - '0') * 10 + (buf.position()[3] - '0'); - buf.position() += 5; + char * pos = buf.position(); - UInt8 month = buf.position()[0] - '0'; - if (isNumericASCII(buf.position()[1])) + /// YYYY-MM-DD + /// YYYY-MM-D + /// YYYY-M-DD + /// YYYY-M-D + + /// The delimiters can be arbitrary characters, like YYYY/MM!DD, but obviously not digits. + + UInt16 year = (pos[0] - '0') * 1000 + (pos[1] - '0') * 100 + (pos[2] - '0') * 10 + (pos[3] - '0'); + pos += 5; + + if (isNumericASCII(pos[-1])) + return ReturnType(false); + + UInt8 month = pos[0] - '0'; + if (isNumericASCII(pos[1])) { - month = month * 10 + buf.position()[1] - '0'; - buf.position() += 3; + month = month * 10 + pos[1] - '0'; + pos += 3; } else - buf.position() += 2; + pos += 2; - UInt8 day = buf.position()[0] - '0'; - if (isNumericASCII(buf.position()[1])) + if (isNumericASCII(pos[-1])) + return ReturnType(false); + + UInt8 day = pos[0] - '0'; + if (isNumericASCII(pos[1])) { - day = day * 10 + buf.position()[1] - '0'; - buf.position() += 2; + day = day * 10 + pos[1] - '0'; + pos += 2; } else - buf.position() += 1; + pos += 1; + buf.position() = pos; date = LocalDate(year, month, day); return ReturnType(true); } diff --git a/tests/queries/0_stateless/01933_invalid_date.reference b/tests/queries/0_stateless/01933_invalid_date.reference new file mode 100644 index 00000000000..829e7e8c420 --- /dev/null +++ b/tests/queries/0_stateless/01933_invalid_date.reference @@ -0,0 +1 @@ +2019-07-08 diff --git a/tests/queries/0_stateless/01933_invalid_date.sql b/tests/queries/0_stateless/01933_invalid_date.sql new file mode 100644 index 00000000000..aac09c99e60 --- /dev/null +++ b/tests/queries/0_stateless/01933_invalid_date.sql @@ -0,0 +1,10 @@ +SELECT toDate('07-08-2019'); -- { serverError 6 } +SELECT toDate('2019-0708'); -- { serverError 38 } +SELECT toDate('201907-08'); -- { serverError 38 } +SELECT toDate('2019^7^8'); + +CREATE TEMPORARY TABLE test (d Date); +INSERT INTO test VALUES ('2018-01-01'); + +SELECT * FROM test WHERE d >= '07-08-2019'; -- { serverError 53 } +SELECT * FROM test WHERE d >= '2019-07-08'; From 0e621788c7f8821b6a2b3fffb1885f15ba3e5bcb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 03:53:08 +0300 Subject: [PATCH 666/931] Allow constexpr parameters for aggregate functions --- .../parseAggregateFunctionParameters.cpp | 41 ++++++++++--------- .../parseAggregateFunctionParameters.h | 12 ++++-- src/Functions/array/arrayReduce.cpp | 7 ++-- src/Functions/array/arrayReduceInRanges.cpp | 7 ++-- src/Functions/initializeAggregation.cpp | 7 ++-- src/Interpreters/ExpressionAnalyzer.cpp | 4 +- .../evaluateConstantExpression.cpp | 9 ++-- .../MergeTree/registerStorageMergeTree.cpp | 15 ++++--- ...pr_aggregate_function_parameters.reference | 2 + ...onstexpr_aggregate_function_parameters.sql | 11 +++++ 10 files changed, 72 insertions(+), 43 deletions(-) create mode 100644 tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.reference create mode 100644 tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql diff --git a/src/AggregateFunctions/parseAggregateFunctionParameters.cpp b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp index 3826d993c4a..64eb0932de9 100644 --- a/src/AggregateFunctions/parseAggregateFunctionParameters.cpp +++ b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp @@ -4,6 +4,8 @@ #include #include +#include + namespace DB { @@ -15,7 +17,7 @@ namespace ErrorCodes extern const int PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS; } -Array getAggregateFunctionParametersArray(const ASTPtr & expression_list, const std::string & error_context) +Array getAggregateFunctionParametersArray(const ASTPtr & expression_list, const std::string & error_context, ContextPtr context) { const ASTs & parameters = expression_list->children; if (parameters.empty()) @@ -25,25 +27,25 @@ Array getAggregateFunctionParametersArray(const ASTPtr & expression_list, const for (size_t i = 0; i < parameters.size(); ++i) { - const auto * literal = parameters[i]->as(); - - ASTPtr func_literal; - if (!literal) - if (const auto * func = parameters[i]->as()) - if ((func_literal = func->toLiteral())) - literal = func_literal->as(); - - if (!literal) + ASTPtr literal; + try { - throw Exception( - ErrorCodes::PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS, - "Parameters to aggregate functions must be literals. " - "Got parameter '{}'{}", - parameters[i]->formatForErrorMessage(), - (error_context.empty() ? "" : " (in " + error_context +")")); + literal = evaluateConstantExpressionAsLiteral(parameters[i], context); + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::BAD_ARGUMENTS) + throw Exception( + ErrorCodes::PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS, + "Parameters to aggregate functions must be literals. " + "Got parameter '{}'{}", + parameters[i]->formatForErrorMessage(), + (error_context.empty() ? "" : " (in " + error_context +")")); + + throw; } - params_row[i] = literal->value; + params_row[i] = literal->as()->value; } return params_row; @@ -54,7 +56,8 @@ void getAggregateFunctionNameAndParametersArray( const std::string & aggregate_function_name_with_params, std::string & aggregate_function_name, Array & aggregate_function_parameters, - const std::string & error_context) + const std::string & error_context, + ContextPtr context) { if (aggregate_function_name_with_params.back() != ')') { @@ -84,7 +87,7 @@ void getAggregateFunctionNameAndParametersArray( throw Exception("Incorrect list of parameters to aggregate function " + aggregate_function_name, ErrorCodes::BAD_ARGUMENTS); - aggregate_function_parameters = getAggregateFunctionParametersArray(args_ast); + aggregate_function_parameters = getAggregateFunctionParametersArray(args_ast, error_context, context); } } diff --git a/src/AggregateFunctions/parseAggregateFunctionParameters.h b/src/AggregateFunctions/parseAggregateFunctionParameters.h index 37f1f1d5097..033e92714dd 100644 --- a/src/AggregateFunctions/parseAggregateFunctionParameters.h +++ b/src/AggregateFunctions/parseAggregateFunctionParameters.h @@ -1,19 +1,23 @@ #pragma once + #include #include +#include namespace DB { -struct Settings; - -Array getAggregateFunctionParametersArray(const ASTPtr & expression_list, const std::string & error_context = ""); +Array getAggregateFunctionParametersArray( + const ASTPtr & expression_list, + const std::string & error_context, + ContextPtr context); void getAggregateFunctionNameAndParametersArray( const std::string & aggregate_function_name_with_params, std::string & aggregate_function_name, Array & aggregate_function_parameters, - const std::string & error_context); + const std::string & error_context, + ContextPtr context); } diff --git a/src/Functions/array/arrayReduce.cpp b/src/Functions/array/arrayReduce.cpp index 9a4b5aafdb9..3387d50a1f6 100644 --- a/src/Functions/array/arrayReduce.cpp +++ b/src/Functions/array/arrayReduce.cpp @@ -33,11 +33,12 @@ namespace ErrorCodes * arrayReduce('agg', arr1, ...) - apply the aggregate function `agg` to arrays `arr1...` * If multiple arrays passed, then elements on corresponding positions are passed as multiple arguments to the aggregate function. */ -class FunctionArrayReduce : public IFunction +class FunctionArrayReduce : public IFunction, private WithContext { public: static constexpr auto name = "arrayReduce"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } + FunctionArrayReduce(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } @@ -95,7 +96,7 @@ DataTypePtr FunctionArrayReduce::getReturnTypeImpl(const ColumnsWithTypeAndName String aggregate_function_name; Array params_row; getAggregateFunctionNameAndParametersArray(aggregate_function_name_with_params, - aggregate_function_name, params_row, "function " + getName()); + aggregate_function_name, params_row, "function " + getName(), getContext()); AggregateFunctionProperties properties; aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, argument_types, params_row, properties); diff --git a/src/Functions/array/arrayReduceInRanges.cpp b/src/Functions/array/arrayReduceInRanges.cpp index 9a2e8e1ca95..ffb047f2231 100644 --- a/src/Functions/array/arrayReduceInRanges.cpp +++ b/src/Functions/array/arrayReduceInRanges.cpp @@ -35,12 +35,13 @@ namespace ErrorCodes * * arrayReduceInRanges('agg', indices, lengths, arr1, ...) */ -class FunctionArrayReduceInRanges : public IFunction +class FunctionArrayReduceInRanges : public IFunction, private WithContext { public: static const size_t minimum_step = 64; static constexpr auto name = "arrayReduceInRanges"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } + FunctionArrayReduceInRanges(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } @@ -113,7 +114,7 @@ DataTypePtr FunctionArrayReduceInRanges::getReturnTypeImpl(const ColumnsWithType String aggregate_function_name; Array params_row; getAggregateFunctionNameAndParametersArray(aggregate_function_name_with_params, - aggregate_function_name, params_row, "function " + getName()); + aggregate_function_name, params_row, "function " + getName(), getContext()); AggregateFunctionProperties properties; aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, argument_types, params_row, properties); diff --git a/src/Functions/initializeAggregation.cpp b/src/Functions/initializeAggregation.cpp index b9b3d219551..060788773b6 100644 --- a/src/Functions/initializeAggregation.cpp +++ b/src/Functions/initializeAggregation.cpp @@ -25,11 +25,12 @@ namespace ErrorCodes namespace { -class FunctionInitializeAggregation : public IFunction +class FunctionInitializeAggregation : public IFunction, private WithContext { public: static constexpr auto name = "initializeAggregation"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } + FunctionInitializeAggregation(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } @@ -78,7 +79,7 @@ DataTypePtr FunctionInitializeAggregation::getReturnTypeImpl(const ColumnsWithTy String aggregate_function_name; Array params_row; getAggregateFunctionNameAndParametersArray(aggregate_function_name_with_params, - aggregate_function_name, params_row, "function " + getName()); + aggregate_function_name, params_row, "function " + getName(), getContext()); AggregateFunctionProperties properties; aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, argument_types, params_row, properties); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 5b2339975c1..e693d4ba988 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -468,7 +468,7 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) } AggregateFunctionProperties properties; - aggregate.parameters = (node->parameters) ? getAggregateFunctionParametersArray(node->parameters) : Array(); + aggregate.parameters = (node->parameters) ? getAggregateFunctionParametersArray(node->parameters, "", getContext()) : Array(); aggregate.function = AggregateFunctionFactory::instance().get(node->name, types, aggregate.parameters, properties); aggregate_descriptions.push_back(aggregate); @@ -651,7 +651,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) window_function.function_parameters = window_function.function_node->parameters ? getAggregateFunctionParametersArray( - window_function.function_node->parameters) + window_function.function_node->parameters, "", getContext()) : Array(); // Requiring a constant reference to a shared pointer to non-const AST diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 90f6ac84afc..d91ea9208e4 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -49,17 +49,20 @@ std::pair> evaluateConstantExpression(co expr_for_constant_folding->execute(block_with_constants); if (!block_with_constants || block_with_constants.rows() == 0) - throw Exception("Logical error: empty block after evaluation of constant expression for IN, VALUES or LIMIT", ErrorCodes::LOGICAL_ERROR); + throw Exception("Logical error: empty block after evaluation of constant expression for IN, VALUES or LIMIT or aggregate function parameter", + ErrorCodes::LOGICAL_ERROR); if (!block_with_constants.has(name)) - throw Exception("Element of set in IN, VALUES or LIMIT is not a constant expression (result column not found): " + name, ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Element of set in IN, VALUES or LIMIT or aggregate function parameter is not a constant expression (result column not found): {}", name); const ColumnWithTypeAndName & result = block_with_constants.getByName(name); const IColumn & result_column = *result.column; /// Expressions like rand() or now() are not constant if (!isColumnConst(result_column)) - throw Exception("Element of set in IN, VALUES or LIMIT is not a constant expression (result column is not const): " + name, ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Element of set in IN, VALUES or LIMIT or aggregate function parameter is not a constant expression (result column is not const): {}", name); return std::make_pair(result_column[0], result.type); } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index b3b9ce31ff5..539f7713320 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -116,8 +116,11 @@ static bool compareRetentions(const Graphite::Retention & a, const Graphite::Ret * * */ -static void -appendGraphitePattern(const Poco::Util::AbstractConfiguration & config, const String & config_element, Graphite::Patterns & patterns) +static void appendGraphitePattern( + const Poco::Util::AbstractConfiguration & config, + const String & config_element, + Graphite::Patterns & out_patterns, + ContextPtr context) { Graphite::Pattern pattern; @@ -137,7 +140,7 @@ appendGraphitePattern(const Poco::Util::AbstractConfiguration & config, const St String aggregate_function_name; Array params_row; getAggregateFunctionNameAndParametersArray( - aggregate_function_name_with_params, aggregate_function_name, params_row, "GraphiteMergeTree storage initialization"); + aggregate_function_name_with_params, aggregate_function_name, params_row, "GraphiteMergeTree storage initialization", context); /// TODO Not only Float64 AggregateFunctionProperties properties; @@ -181,7 +184,7 @@ appendGraphitePattern(const Poco::Util::AbstractConfiguration & config, const St if (pattern.type & pattern.TypeRetention) /// TypeRetention or TypeAll std::sort(pattern.retentions.begin(), pattern.retentions.end(), compareRetentions); - patterns.emplace_back(pattern); + out_patterns.emplace_back(pattern); } static void setGraphitePatternsFromConfig(ContextPtr context, const String & config_element, Graphite::Params & params) @@ -204,7 +207,7 @@ static void setGraphitePatternsFromConfig(ContextPtr context, const String & con { if (startsWith(key, "pattern")) { - appendGraphitePattern(config, config_element + "." + key, params.patterns); + appendGraphitePattern(config, config_element + "." + key, params.patterns, context); } else if (key == "default") { @@ -219,7 +222,7 @@ static void setGraphitePatternsFromConfig(ContextPtr context, const String & con } if (config.has(config_element + ".default")) - appendGraphitePattern(config, config_element + "." + ".default", params.patterns); + appendGraphitePattern(config, config_element + "." + ".default", params.patterns, context); } diff --git a/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.reference b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.reference new file mode 100644 index 00000000000..61be3e78ae7 --- /dev/null +++ b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.reference @@ -0,0 +1,2 @@ +[0,1,2,3,4] +[0,1,2,3,4] diff --git a/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql new file mode 100644 index 00000000000..3ab969ca256 --- /dev/null +++ b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql @@ -0,0 +1,11 @@ +SELECT groupArray(2 + 3)(number) FROM numbers(10); +SELECT groupArray('5'::UInt8)(number) FROM numbers(10); + +SELECT groupArray()(number) FROM numbers(10); -- { serverError 36 } +SELECT groupArray(NULL)(number) FROM numbers(10); -- { serverError 36 } +SELECT groupArray(NULL + NULL)(number) FROM numbers(10); -- { serverError 36 } +SELECT groupArray([])(number) FROM numbers(10); -- { serverError 36 } +SELECT groupArray(throwIf(1))(number) FROM numbers(10); -- { serverError 395 } + +-- Not the best error message, can be improved. +SELECT groupArray(number)(number) FROM numbers(10); -- { serverError 47 } From b8a0b4caf48654aa92d47ce2b2e6e9abafcedd4e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 03:57:46 +0300 Subject: [PATCH 667/931] One more test --- ...ametrized_query_parametric_aggregate_function.reference | 1 + ...935_parametrized_query_parametric_aggregate_function.sh | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.reference create mode 100755 tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.sh diff --git a/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.reference b/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.sh b/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.sh new file mode 100755 index 00000000000..bbc24af1214 --- /dev/null +++ b/tests/queries/0_stateless/01935_parametrized_query_parametric_aggregate_function.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CURL} -sS -XPOST "${CLICKHOUSE_URL}¶m_lim=2" --data-binary 'select length(topKArray({lim:UInt32})([1,1,2,3,4,5,6,7,7,7]))' From 23912c606609e257fa1ecb282b294674999ee8a2 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 2 Jul 2021 04:05:28 +0300 Subject: [PATCH 668/931] Update adopters.md --- docs/en/introduction/adopters.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index fdced7f354c..47927cd306a 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -155,5 +155,6 @@ toc_title: Adopters | Argedor | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) | | SigNoz | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) | | ChelPipe Group | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) | +| Zagrava Trading | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) | [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) From a41a1b7c74d7eb1a15bc731f902174d628754b07 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 2 Jul 2021 04:25:07 +0300 Subject: [PATCH 669/931] Update ReplxxLineReader.cpp --- base/common/ReplxxLineReader.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index 3d665744223..9c65b1dfe4c 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -25,7 +25,10 @@ void trim(String & s) s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); } -/// Copied from replxx::src/util.cxx::now_ms_str() +/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx. +/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) +/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) +/// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com) std::string replxx_now_ms_str() { std::chrono::milliseconds ms(std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch())); From 24759a9b67f5e002349f0161546dce3aa98fcd54 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 2 Jul 2021 04:26:49 +0300 Subject: [PATCH 670/931] Update ReplxxLineReader.cpp --- base/common/ReplxxLineReader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index 9c65b1dfe4c..de169b1581b 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -29,7 +29,7 @@ void trim(String & s) /// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) /// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) /// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com) -std::string replxx_now_ms_str() +static std::string replxx_now_ms_str() { std::chrono::milliseconds ms(std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch())); time_t t = ms.count() / 1000; From 3789e23dc9c7a1a6eb39201ea8b0e02943f07f03 Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 2 Jul 2021 02:11:28 +0000 Subject: [PATCH 671/931] fix update fix --- src/Storages/StorageMerge.h | 2 +- src/TableFunctions/TableFunctionMerge.cpp | 13 ++++++++----- src/TableFunctions/TableFunctionMerge.h | 5 +++-- .../00717_merge_and_distributed.reference | 6 +++--- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 7258f9cebaf..20460e95156 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -49,7 +49,7 @@ public: const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot) const override; private: - using DbToTableSetMap = std::unordered_map>; + using DbToTableSetMap = std::map>; std::optional source_database_regexp; std::optional source_table_regexp; diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 7a905bf2ae7..40d0d1b7921 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -106,9 +106,12 @@ ColumnsDescription TableFunctionMerge::getActualTableStructure(ContextPtr contex { for (const auto & db_with_tables : getSourceDatabasesAndTables(context)) { - auto storage = DatabaseCatalog::instance().tryGetTable(StorageID{db_with_tables.first, *db_with_tables.second.begin()}, context); - if (storage) - return ColumnsDescription{storage->getInMemoryMetadataPtr()->getColumns().getAllPhysical()}; + for (const auto & table : db_with_tables.second) + { + auto storage = DatabaseCatalog::instance().tryGetTable(StorageID{db_with_tables.first, table}, context); + if (storage) + return ColumnsDescription{storage->getInMemoryMetadataPtr()->getColumns().getAllPhysical()}; + } } throwNoTablesMatchRegexp(source_database_name_or_regexp, source_table_regexp); @@ -130,7 +133,7 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & /*ast_function*/, Cont return res; } -NameSet +TableFunctionMerge::TableSet TableFunctionMerge::getMatchedTablesWithAccess(const String & database_name, const String & table_regexp, const ContextPtr & context) { OptimizedRegularExpression table_re(table_regexp); @@ -144,7 +147,7 @@ TableFunctionMerge::getMatchedTablesWithAccess(const String & database_name, con bool granted_show_on_all_tables = access->isGranted(AccessType::SHOW_TABLES, database_name); bool granted_select_on_all_tables = access->isGranted(AccessType::SELECT, database_name); - NameSet tables; + TableSet tables; for (auto it = database->getTablesIterator(context, table_name_match); it->isValid(); it->next()) { diff --git a/src/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h index 438221ed95e..73b61f8eb79 100644 --- a/src/TableFunctions/TableFunctionMerge.h +++ b/src/TableFunctions/TableFunctionMerge.h @@ -20,11 +20,12 @@ private: StoragePtr executeImpl(const ASTPtr & ast_function, ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns) const override; const char * getStorageTypeName() const override { return "Merge"; } - using DbToTableSetMap = std::unordered_map>; + using TableSet = std::set; + using DbToTableSetMap = std::map; const DbToTableSetMap & getSourceDatabasesAndTables(ContextPtr context) const; ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - static NameSet getMatchedTablesWithAccess(const String & database_name, const String & table_regexp, const ContextPtr & context); + static TableSet getMatchedTablesWithAccess(const String & database_name, const String & table_regexp, const ContextPtr & context); String source_database_name_or_regexp; String source_table_regexp; diff --git a/tests/queries/0_stateless/00717_merge_and_distributed.reference b/tests/queries/0_stateless/00717_merge_and_distributed.reference index ec6c2725a2a..073df56d693 100644 --- a/tests/queries/0_stateless/00717_merge_and_distributed.reference +++ b/tests/queries/0_stateless/00717_merge_and_distributed.reference @@ -49,9 +49,9 @@ 2018-08-01 100 2018-08-01 200 --------------Implicit type conversion------------ +2018-08-01 -1 +2018-08-01 1 +2018-08-01 -1 2018-08-01 1 -2018-08-01 18446744073709551615 -2018-08-01 1 -2018-08-01 18446744073709551615 2018-08-01 1 2018-08-01 1 From 1bf7a175b7047015e74014d1c4218a73321d2c3c Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 2 Jul 2021 03:44:42 +0000 Subject: [PATCH 672/931] fix style --- src/Storages/StorageMerge.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 89ff817e393..2d5bbfc712d 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -470,7 +470,7 @@ Pipe StorageMerge::createSources( /// Subordinary tables could have different but convertible types, like numeric types of different width. /// We must return streams with structure equals to structure of Merge table. - convertingSourceStream(header, metadata_snapshot, aliases, modified_context, modified_query_info.query, pipe, processed_stage); + convertingSourceStream(header, metadata_snapshot, aliases, modified_context, modified_query_info.query, pipe, processed_stage); pipe.addTableLock(struct_lock); pipe.addStorageHolder(storage); From 1d332da0ed8f5e9f042da8584ab8ce21f38cf7b9 Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 2 Jul 2021 05:51:53 +0000 Subject: [PATCH 673/931] fix special build on clang 11 --- base/common/ReplxxLineReader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index de169b1581b..9c65b1dfe4c 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -29,7 +29,7 @@ void trim(String & s) /// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) /// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) /// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com) -static std::string replxx_now_ms_str() +std::string replxx_now_ms_str() { std::chrono::milliseconds ms(std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch())); time_t t = ms.count() / 1000; From 3ae127839188a1da088c5c1681831f02183ee098 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 2 Jul 2021 11:22:30 +0300 Subject: [PATCH 674/931] Fixed tests --- src/Functions/array/arrayReduce.cpp | 2 +- src/Functions/array/arrayReduceInRanges.cpp | 2 +- tests/queries/skip_list.json | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Functions/array/arrayReduce.cpp b/src/Functions/array/arrayReduce.cpp index 3387d50a1f6..e070596e5ee 100644 --- a/src/Functions/array/arrayReduce.cpp +++ b/src/Functions/array/arrayReduce.cpp @@ -38,7 +38,7 @@ class FunctionArrayReduce : public IFunction, private WithContext public: static constexpr auto name = "arrayReduce"; static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } - FunctionArrayReduce(ContextPtr context_) : WithContext(context_) {} + explicit FunctionArrayReduce(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } diff --git a/src/Functions/array/arrayReduceInRanges.cpp b/src/Functions/array/arrayReduceInRanges.cpp index ffb047f2231..18140fe504d 100644 --- a/src/Functions/array/arrayReduceInRanges.cpp +++ b/src/Functions/array/arrayReduceInRanges.cpp @@ -41,7 +41,7 @@ public: static const size_t minimum_step = 64; static constexpr auto name = "arrayReduceInRanges"; static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } - FunctionArrayReduceInRanges(ContextPtr context_) : WithContext(context_) {} + explicit FunctionArrayReduceInRanges(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 7c1f998e91d..64aef86ec9c 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -520,7 +520,8 @@ "01914_exchange_dictionaries", "01915_create_or_replace_dictionary", "01913_names_of_tuple_literal", - "01925_merge_prewhere_table" + "01925_merge_prewhere_table", + "01934_constexpr_aggregate_function_parameters" ], "parallel": [ From 55889eacf507f3173c851fc83543d961b59967d8 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 2 Jul 2021 11:24:45 +0300 Subject: [PATCH 675/931] Add test to ANTLR skip list --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 7c1f998e91d..803199b9121 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -520,7 +520,8 @@ "01914_exchange_dictionaries", "01915_create_or_replace_dictionary", "01913_names_of_tuple_literal", - "01925_merge_prewhere_table" + "01925_merge_prewhere_table", + "01932_null_valid_identifier" ], "parallel": [ From e992ed780a23af724113c9f2f619de2e61a8a06f Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 2 Jul 2021 11:30:57 +0300 Subject: [PATCH 676/931] Update rabbitmq.md --- docs/zh/engines/table-engines/integrations/rabbitmq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/engines/table-engines/integrations/rabbitmq.md b/docs/zh/engines/table-engines/integrations/rabbitmq.md index c43218da14f..a4a5be5f685 100644 --- a/docs/zh/engines/table-engines/integrations/rabbitmq.md +++ b/docs/zh/engines/table-engines/integrations/rabbitmq.md @@ -96,7 +96,7 @@ RabbitMQ 服务器配置应使用 ClickHouse 配置文件添加。 ## 描述 {#description} -`SELECT`对于读取消息不是特别有用(除了调试),因为每个消息只能读取一次。使用[物化视图](../../../sql-reference/statements/create/view.md)创建实时线程更为实用。要做到这一点: +`SELECT`对于读取消息不是特别有用(除了调试),因为每个消息只能读取一次。使用[物化视图](../../../sql-reference/statements/create.md#create-view)创建实时线程更为实用。要做到这一点: 1. 使用引擎创建一个 RabbitMQ 消费者,并将其视为一个数据流。 2. 创建一个具有所需结构的表。 From 5e79c68e8e80370d8a0c0e2d9e16e93285b1d55d Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Jul 2021 11:54:22 +0300 Subject: [PATCH 677/931] Debug --- src/Storages/MergeTree/BackgroundJobsExecutor.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp index 16e1702824e..7c9c771a845 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp @@ -58,9 +58,10 @@ void IBackgroundJobExecutor::scheduleTask(bool with_backoff) } else { - no_work_done_count = 0; /// We have work, but run without backoff + no_work_done_count = 0; next_time_to_execute = 1000 * sleep_settings.thread_sleep_seconds_if_nothing_to_do; } + LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NO WORK DONE TIMES {}", no_work_done_count); scheduling_task->scheduleAfter(next_time_to_execute, false); } From 2e29dc297560d80cb92be01965977b9885ec6416 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Jul 2021 12:29:45 +0300 Subject: [PATCH 678/931] More safe empty parts creation --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 8 +++++--- src/Storages/StorageReplicatedMergeTree.cpp | 11 ++++++++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index f656dc7b7e8..bb4d0888c56 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -68,16 +68,18 @@ bool ReplicatedMergeTreeQueue::checkPartInQueueAndGetSourceParts(const String & { std::lock_guard lock(state_mutex); + bool found = false; for (const auto & entry : queue) { - if (entry->new_part_name == part_name) + if (entry->new_part_name == part_name && entry->source_parts.size() > source_parts.size()) { + source_parts.clear(); source_parts.insert(source_parts.end(), entry->source_parts.begin(), entry->source_parts.end()); - return true; + found = true; } } - return false; + return found; } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index ac33d915958..35e7772e215 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -7486,7 +7486,16 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP try { MergeTreeData::Transaction transaction(*this); - renameTempPartAndReplace(new_data_part, nullptr, &transaction); + auto replaced_parts = renameTempPartAndReplace(new_data_part, nullptr, &transaction); + + if (!replaced_parts.empty()) + { + Strings part_names; + for (const auto & part : replaced_parts) + part_names.emplace_back(part->name); + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Tried to create empty part {}, but it replaces existing parts {}.", lost_part_name, fmt::join(part_names, ", ")); + } while (true) { From f8f734d3dc80ca527e499c9f20e2c5fef179014c Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Jul 2021 12:40:13 +0300 Subject: [PATCH 679/931] Better comment --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 0a2c092dfdb..820d2794a31 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -373,7 +373,9 @@ public: /// Checks that part is already in virtual parts bool isVirtualPart(const MergeTreeData::DataPartPtr & data_part) const; - /// Check that part produced by some entry in queue and get source parts for it + /// Check that part produced by some entry in queue and get source parts for it. + /// If there are several entries return largest source_parts set. This rarely possible + /// for example after replica clone. bool checkPartInQueueAndGetSourceParts(const String & part_name, Strings & source_parts) const; /// Check that part isn't in currently generating parts and isn't covered by them and add it to future_parts. From aa377bcd4249221863e39056afe6cc3e86d06c9a Mon Sep 17 00:00:00 2001 From: George Date: Fri, 2 Jul 2021 13:23:00 +0300 Subject: [PATCH 680/931] Deleted wrong doc --- docs/en/sql-reference/functions/logical-functions.md | 2 +- docs/en/sql-reference/operators/index.md | 4 ---- docs/ru/sql-reference/functions/logical-functions.md | 2 +- docs/ru/sql-reference/operators/index.md | 4 ---- 4 files changed, 2 insertions(+), 10 deletions(-) diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 4b188184074..9d451dfe2b5 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -153,7 +153,7 @@ Result: ## xor {#logical-xor-function} -Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function works as if it calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. Corresponds to [Logical XOR Operator](../../sql-reference/operators/index.md#logical-xor-operator). +Calculates the result of the logical exclusive disjunction between two or more values. For more than two values the function works as if it calculates `XOR` of the first two values and then uses the result with the next value to calculate `XOR` and so on. **Syntax** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 54239d48082..55da4afd145 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -223,10 +223,6 @@ Syntax `SELECT a OR b` — calculates logical disjunction of `a` and `b` with th Syntax `SELECT NOT a` — calculates logical negation of `a` with the function [not](../../sql-reference/functions/logical-functions.md#logical-not-function). -## Logical XOR operator {#logical-xor-operator} - -Syntax `SELECT a XOR b` — calculates logical exclusive disjunction of `a` and `b` with the function [xor](../../sql-reference/functions/logical-functions.md#logical-xor-function). - ## Conditional Operator {#conditional-operator} `a ? b : c` – The `if(a, b, c)` function. diff --git a/docs/ru/sql-reference/functions/logical-functions.md b/docs/ru/sql-reference/functions/logical-functions.md index e3fc75402ac..f4dee477ee0 100644 --- a/docs/ru/sql-reference/functions/logical-functions.md +++ b/docs/ru/sql-reference/functions/logical-functions.md @@ -153,7 +153,7 @@ SELECT NOT(1); ## xor {#logical-xor-function} -Вычисляет результат логической исключающей дизъюнкции между двумя и более значениями. При более чем двух значениях функция работает так: сначала вычисляет `XOR` для первых двух значений, а потом использует полученный результат при вычислении `XOR` со следующим значением и так далее. Соответствует [Оператору логического исключающего "ИЛИ"](../../sql-reference/operators/index.md#logical-xor-operator). +Вычисляет результат логической исключающей дизъюнкции между двумя и более значениями. При более чем двух значениях функция работает так: сначала вычисляет `XOR` для первых двух значений, а потом использует полученный результат при вычислении `XOR` со следующим значением и так далее. **Синтаксис** diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index 030de6a7574..785c142cca7 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -223,10 +223,6 @@ SELECT toDateTime('2014-10-26 00:00:00', 'Europe/Moscow') AS time, time + 60 * 6 Синтаксис `SELECT NOT a` — вычисляет логическое отрицание `a` функцией [not](../../sql-reference/functions/logical-functions.md#logical-not-function). -## Оператор логического исключающего "ИЛИ" {#logical-xor-operator} - -Синтаксис `SELECT a XOR b` — вычисляет логическую исключающую дизъюнкцию между `a` и `b` функцией [xor](../../sql-reference/functions/logical-functions.md#logical-xor-function). - ## Условный оператор {#uslovnyi-operator} `a ? b : c` - функция `if(a, b, c)` From e4a0e831f0e3a51ddc787475fb7ae7a7b0e7f415 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 2 Jul 2021 14:08:11 +0300 Subject: [PATCH 681/931] Update skip_list.json --- tests/queries/skip_list.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 0cd57ed39fb..be52bee71b1 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -520,7 +520,7 @@ "01914_exchange_dictionaries", "01915_create_or_replace_dictionary", "01913_names_of_tuple_literal", - "01925_merge_prewhere_table" + "01925_merge_prewhere_table", "01932_null_valid_identifier", "01934_constexpr_aggregate_function_parameters" ], From 8b4fabe60ce6aa3c5e62c2bb799ff76a36a71181 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 2 Jul 2021 14:20:41 +0300 Subject: [PATCH 682/931] Fix crash on call dictGet() with bad arguments. --- .../MarkTableIdentifiersVisitor.cpp | 43 +++++++++++-------- .../MarkTableIdentifiersVisitor.h | 2 +- ...arts_identifiers_in_wrong_places.reference | 1 + ...hree_parts_identifiers_in_wrong_places.sql | 7 +++ 4 files changed, 33 insertions(+), 20 deletions(-) create mode 100644 tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.reference create mode 100644 tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql diff --git a/src/Interpreters/MarkTableIdentifiersVisitor.cpp b/src/Interpreters/MarkTableIdentifiersVisitor.cpp index 52f180aa199..1f418e759e7 100644 --- a/src/Interpreters/MarkTableIdentifiersVisitor.cpp +++ b/src/Interpreters/MarkTableIdentifiersVisitor.cpp @@ -11,6 +11,26 @@ namespace DB { +namespace +{ + void replaceArgumentWithTableIdentifierIfNotAlias(ASTFunction & func, size_t argument_pos, const Aliases & aliases) + { + if (!func.arguments || (func.arguments->children.size() <= argument_pos)) + return; + auto arg = func.arguments->children[argument_pos]; + auto identifier = arg->as(); + if (!identifier) + return; + if (aliases.contains(identifier->name())) + return; + auto table_identifier = identifier->createTable(); + if (!table_identifier) + return; + func.arguments->children[argument_pos] = table_identifier; + } +} + + bool MarkTableIdentifiersMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child) { if (child->as()) @@ -23,37 +43,22 @@ bool MarkTableIdentifiersMatcher::needChildVisit(ASTPtr & node, const ASTPtr & c void MarkTableIdentifiersMatcher::visit(ASTPtr & ast, Data & data) { if (auto * node_func = ast->as()) - visit(*node_func, ast, data); + visit(*node_func, data); } -void MarkTableIdentifiersMatcher::visit(const ASTFunction & func, ASTPtr & ptr, Data & data) +void MarkTableIdentifiersMatcher::visit(ASTFunction & func, const Data & data) { /// `IN t` can be specified, where t is a table, which is equivalent to `IN (SELECT * FROM t)`. if (checkFunctionIsInOrGlobalInOperator(func)) { - auto ast = func.arguments->children.at(1); - auto opt_name = tryGetIdentifierName(ast); - if (opt_name && !data.aliases.count(*opt_name) && ast->as()) - { - ptr->as()->arguments->children[1] = ast->as()->createTable(); - assert(ptr->as()->arguments->children[1]); - } + replaceArgumentWithTableIdentifierIfNotAlias(func, 1, data.aliases); } // First argument of joinGet can be a table name, perhaps with a database. // First argument of dictGet can be a dictionary name, perhaps with a database. else if (functionIsJoinGet(func.name) || functionIsDictGet(func.name)) { - if (!func.arguments || func.arguments->children.empty()) - return; - - auto ast = func.arguments->children.at(0); - auto opt_name = tryGetIdentifierName(ast); - if (opt_name && !data.aliases.count(*opt_name) && ast->as()) - { - ptr->as()->arguments->children[0] = ast->as()->createTable(); - assert(ptr->as()->arguments->children[0]); - } + replaceArgumentWithTableIdentifierIfNotAlias(func, 0, data.aliases); } } diff --git a/src/Interpreters/MarkTableIdentifiersVisitor.h b/src/Interpreters/MarkTableIdentifiersVisitor.h index 0d80b865e53..d05c067397b 100644 --- a/src/Interpreters/MarkTableIdentifiersVisitor.h +++ b/src/Interpreters/MarkTableIdentifiersVisitor.h @@ -24,7 +24,7 @@ public: static void visit(ASTPtr & ast, Data & data); private: - static void visit(const ASTFunction & func, ASTPtr &, Data &); + static void visit(ASTFunction & func, const Data & data); }; using MarkTableIdentifiersVisitor = MarkTableIdentifiersMatcher::Visitor; diff --git a/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.reference b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.reference new file mode 100644 index 00000000000..bbf76e61257 --- /dev/null +++ b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.reference @@ -0,0 +1 @@ +still alive diff --git a/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql new file mode 100644 index 00000000000..d2ca771edc5 --- /dev/null +++ b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql @@ -0,0 +1,7 @@ +SELECT dictGet(t.nest.a, concat(currentDatabase(), '.dict.dict'), 's', number) FROM numbers(5); -- { serverError 47 } + +SELECT dictGetFloat64(t.b.s, 'database_for_dict.dict1', dictGetFloat64('Ta\0', toUInt64('databas\0_for_dict.dict1databas\0_for_dict.dict1', dictGetFloat64('', '', toUInt64(1048577), toDate(NULL)), NULL), toDate(dictGetFloat64(257, 'database_for_dict.dict1database_for_dict.dict1', '', toUInt64(NULL), 2, toDate(NULL)), '2019-05-2\0')), NULL, toUInt64(dictGetFloat64('', '', toUInt64(-9223372036854775808), toDate(NULL)), NULL)); -- { serverError 47 } + +SELECT NULL AND (2147483648 AND NULL) AND -2147483647, toUUID(((1048576 AND NULL) AND (2147483647 AND 257 AND NULL AND -2147483649) AND NULL) IN (test_01103.t1_distr.id), '00000000-e1fe-11e\0-bb8f\0853d60c00749'), stringToH3('89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff'); -- { serverError 47 } + +SELECT 'still alive'; From b3e3a3cde0d3c152fa33062f9016b199b524cee9 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 2 Jul 2021 16:36:02 +0300 Subject: [PATCH 683/931] Fixed tests --- src/AggregateFunctions/IAggregateFunction.h | 4 ++-- src/Interpreters/ActionsDAG.h | 2 -- src/Interpreters/Context.cpp | 1 + 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 4085675598b..74cd0890903 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -533,9 +533,9 @@ public: } catch (...) { - for (size_t destroy_index = 0; destroy_index < batch_index; ++destroy_index) + for (size_t destroy_index = batch_index; destroy_index < batch_size; ++destroy_index) if (destroy_place) - static_cast(this)->destroy(places[batch_index] + place_offset); + static_cast(this)->destroy(places[destroy_index] + place_offset); throw; } diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 6c2513d0d92..9cd0057bb1a 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -27,8 +27,6 @@ using FunctionOverloadResolverPtr = std::shared_ptr; class IDataType; using DataTypePtr = std::shared_ptr; -class CompiledExpressionCache; - namespace JSONBuilder { class JSONMap; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 2992bc010ab..9b204f12ab2 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include From 13c008c7a8a75abec0e4b07741790bf2411dcfde Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Jul 2021 16:38:46 +0300 Subject: [PATCH 684/931] Change exception type --- src/Storages/StorageReplicatedMergeTree.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 35e7772e215..b51b39f7d68 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -7494,7 +7494,15 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP for (const auto & part : replaced_parts) part_names.emplace_back(part->name); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Tried to create empty part {}, but it replaces existing parts {}.", lost_part_name, fmt::join(part_names, ", ")); + /// Why this exception is not a LOGICAL_ERROR? Because it's possible + /// to have some source parts for the lost part if replica currently + /// cloning from another replica, but source replica lost covering + /// part and finished MERGE_PARTS before clone. It's an extremely + /// rare case and it's unclear how to resolve it better. Eventually + /// source replica will replace lost part with empty part and we + /// will fetch this empty part instead of our source parts. This + /// will make replicas consistent, but some data will be lost. + throw Exception(ErrorCodes::INCORRECT_DATA, "Tried to create empty part {}, but it replaces existing parts {}.", lost_part_name, fmt::join(part_names, ", ")); } while (true) From 12aea188b04db3a44c35d9b5099c578cf5b4f41d Mon Sep 17 00:00:00 2001 From: zxc111 Date: Thu, 24 Jun 2021 19:35:19 +0800 Subject: [PATCH 685/931] add bin/unbin support --- src/Common/hex.cpp | 34 ++ src/Common/hex.h | 11 + src/Functions/FunctionsCoding.cpp | 2 + src/Functions/FunctionsCoding.h | 389 ++++++++++++++++++ .../0_stateless/01926_bin_unbin.reference | 16 + tests/queries/0_stateless/01926_bin_unbin.sql | 17 + 6 files changed, 469 insertions(+) create mode 100644 tests/queries/0_stateless/01926_bin_unbin.reference create mode 100644 tests/queries/0_stateless/01926_bin_unbin.sql diff --git a/src/Common/hex.cpp b/src/Common/hex.cpp index bad1bf19b8d..e8f9b981062 100644 --- a/src/Common/hex.cpp +++ b/src/Common/hex.cpp @@ -56,3 +56,37 @@ const char * const hex_char_to_digit_table = "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"; + +const char * const bin_byte_to_char_table = + "0000000000000001000000100000001100000100000001010000011000000111" + "0000100000001001000010100000101100001100000011010000111000001111" + "0001000000010001000100100001001100010100000101010001011000010111" + "0001100000011001000110100001101100011100000111010001111000011111" + "0010000000100001001000100010001100100100001001010010011000100111" + "0010100000101001001010100010101100101100001011010010111000101111" + "0011000000110001001100100011001100110100001101010011011000110111" + "0011100000111001001110100011101100111100001111010011111000111111" + "0100000001000001010000100100001101000100010001010100011001000111" + "0100100001001001010010100100101101001100010011010100111001001111" + "0101000001010001010100100101001101010100010101010101011001010111" + "0101100001011001010110100101101101011100010111010101111001011111" + "0110000001100001011000100110001101100100011001010110011001100111" + "0110100001101001011010100110101101101100011011010110111001101111" + "0111000001110001011100100111001101110100011101010111011001110111" + "0111100001111001011110100111101101111100011111010111111001111111" + "1000000010000001100000101000001110000100100001011000011010000111" + "1000100010001001100010101000101110001100100011011000111010001111" + "1001000010010001100100101001001110010100100101011001011010010111" + "1001100010011001100110101001101110011100100111011001111010011111" + "1010000010100001101000101010001110100100101001011010011010100111" + "1010100010101001101010101010101110101100101011011010111010101111" + "1011000010110001101100101011001110110100101101011011011010110111" + "1011100010111001101110101011101110111100101111011011111010111111" + "1100000011000001110000101100001111000100110001011100011011000111" + "1100100011001001110010101100101111001100110011011100111011001111" + "1101000011010001110100101101001111010100110101011101011011010111" + "1101100011011001110110101101101111011100110111011101111011011111" + "1110000011100001111000101110001111100100111001011110011011100111" + "1110100011101001111010101110101111101100111011011110111011101111" + "1111000011110001111100101111001111110100111101011111011011110111" + "1111100011111001111110101111101111111100111111011111111011111111"; diff --git a/src/Common/hex.h b/src/Common/hex.h index a1fa7b32465..62867f99c48 100644 --- a/src/Common/hex.h +++ b/src/Common/hex.h @@ -39,6 +39,17 @@ inline void writeHexByteLowercase(UInt8 byte, void * out) memcpy(out, &hex_byte_to_char_lowercase_table[static_cast(byte) * 2], 2); } +extern const char * const bin_byte_to_char_table; + +inline void writeBinByte(UInt8 byte, void * out) +{ + memcpy(out, &bin_byte_to_char_table[static_cast(byte) * 8], 8); +} + +inline void writeSingleBinByte(UInt8 byte, void * out) +{ + memcpy(out, &hex_digit_to_char_uppercase_table[static_cast(byte)], 1); +} /// Produces hex representation of an unsigned int with leading zeros (for checksums) template diff --git a/src/Functions/FunctionsCoding.cpp b/src/Functions/FunctionsCoding.cpp index 150d792f63b..f1bbeb5c43f 100644 --- a/src/Functions/FunctionsCoding.cpp +++ b/src/Functions/FunctionsCoding.cpp @@ -21,6 +21,8 @@ void registerFunctionsCoding(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index da667bfc691..5004905863f 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -1326,6 +1326,395 @@ public: } }; +class FunctionBin : public IFunction +{ +public: + static constexpr auto name = "bin"; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 1; } + bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + WhichDataType which(arguments[0]); + + if (!which.isStringOrFixedString() && + !which.isDate() && + !which.isDateTime() && + !which.isDateTime64() && + !which.isUInt() && + !which.isFloat() && + !which.isDecimal()) + throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + return std::make_shared(); + } + + template + void executeOneUInt(T x, char *& out) const + { + UInt8 result[sizeof(x) * 8] = {0}; + int cnt = 0; + if (0 == x) + { + writeSingleBinByte(0, out); + ++out; + *out = '\0'; + ++out; + return; + } + for (; x != 0; x = x >> 1) + { + result[cnt] = (x & 1); + cnt += 1; + } + for (int i = cnt - 1; i >= 0; --i) + { + writeSingleBinByte(result[i], out); + out += 1; + } + + *out = '\0'; + ++out; + } + + template + bool tryExecuteUInt(const IColumn * col, ColumnPtr & col_res) const + { + const ColumnVector * col_vec = checkAndGetColumn>(col); + + static constexpr size_t MAX_UINT_HEX_LENGTH = sizeof(T) * 8 + 1; /// Including trailing zero byte. + + if (col_vec) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const typename ColumnVector::Container & in_vec = col_vec->getData(); + + size_t size = in_vec.size(); + out_offsets.resize(size); + out_vec.resize(MAX_UINT_HEX_LENGTH); + + size_t pos = 0; + for (size_t i = 0; i < size; ++i) + { + /// Manual exponential growth, so as not to rely on the linear amortized work time of `resize` (no one guarantees it). + if (pos + MAX_UINT_HEX_LENGTH > out_vec.size()) + out_vec.resize(out_vec.size() * 8 + MAX_UINT_HEX_LENGTH); + + char * begin = reinterpret_cast(&out_vec[pos]); + char * end = begin; + + executeOneUInt(in_vec[i], end); + + pos += end - begin; + out_offsets[i] = pos; + } + + out_vec.resize(pos); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + template + void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) const + { + const size_t hex_length = type_size_in_bytes * 8 + 1; /// Including trailing zero byte. + auto col_str = ColumnString::create(); + + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + size_t size = in_vec.size(); + out_offsets.resize(size); + out_vec.resize(size * hex_length); + + size_t pos = 0; + char * begin = reinterpret_cast(out_vec.data()); + char * out = begin; + for (size_t i = 0; i < size; ++i) + { + const UInt8 * in_pos = reinterpret_cast(&in_vec[i]); + executeOneString(in_pos, in_pos + type_size_in_bytes, out); + + pos += hex_length; + out_offsets[i] = pos; + } + col_res = std::move(col_str); + } + + template + bool tryExecuteFloat(const IColumn * col, ColumnPtr & col_res) const + { + const ColumnVector * col_vec = checkAndGetColumn>(col); + if (col_vec) + { + const typename ColumnVector::Container & in_vec = col_vec->getData(); + executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); + return true; + } + else + { + return false; + } + } + + template + bool tryExecuteDecimal(const IColumn * col, ColumnPtr & col_res) const + { + const ColumnDecimal * col_dec = checkAndGetColumn>(col); + if (col_dec) + { + const typename ColumnDecimal::Container & in_vec = col_dec->getData(); + executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); + return true; + } + else + { + return false; + } + } + + + static void executeOneString(const UInt8 * pos, const UInt8 * end, char *& out) + { + while (pos < end) + { + writeBinByte(*pos, out); + + ++pos; + out += 8; + } + *out = '\0'; + ++out; + } + + static bool tryExecuteString(const IColumn * col, ColumnPtr & col_res) + { + const ColumnString * col_str_in = checkAndGetColumn(col); + + if (col_str_in) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const ColumnString::Chars & in_vec = col_str_in->getChars(); + const ColumnString::Offsets & in_offsets = col_str_in->getOffsets(); + + size_t size = in_offsets.size(); + + out_offsets.resize(size); + out_vec.resize((in_vec.size() - 1) * 8 + size); + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + size_t prev_offset = 0; + + for (size_t i = 0; i < size; ++i) + { + size_t new_offset = in_offsets[i]; + executeOneString(&in_vec[prev_offset], &in_vec[new_offset - 1], pos); + + out_offsets[i] = pos - begin; + + prev_offset = new_offset; + } + if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) + throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res) + { + const ColumnFixedString * col_fstr_in = checkAndGetColumn(col); + + if (col_fstr_in) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const ColumnString::Chars & in_vec = col_fstr_in->getChars(); + + size_t size = col_fstr_in->size(); + + out_offsets.resize(size); + out_vec.resize(in_vec.size() * 8 + size); + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + + size_t n = col_fstr_in->getN(); + + size_t prev_offset = 0; + + for (size_t i = 0; i < size; ++i) + { + size_t new_offset = prev_offset + n; + + executeOneString(&in_vec[prev_offset], &in_vec[new_offset], pos); + + out_offsets[i] = pos - begin; + prev_offset = new_offset; + } + + if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) + throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + bool useDefaultImplementationForConstants() const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + { + const IColumn * column = arguments[0].column.get(); + ColumnPtr res_column; + + if (tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) + || tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) + || tryExecuteString(column, res_column) || tryExecuteFixedString(column, res_column) + || tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) + || tryExecuteDecimal(column, res_column) || tryExecuteDecimal(column, res_column) + || tryExecuteDecimal(column, res_column)) + return res_column; + + throw Exception( + "Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN); + } +}; + +class FunctionUnbin : public IFunction +{ +public: + static constexpr auto name = "unbin"; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 1; } + bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (!isString(arguments[0])) + throw Exception( + "Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + return std::make_shared(); + } + + static void unbinOne(const char * pos, const char * end, char *& out) + { + uint8_t left = 0; + for (int left_cnt = (end - pos) & 7; left_cnt > 0; --left_cnt) + { + left = left << 1; + if (*pos == '1') + { + left += 1; + } + ++pos; + } + if (0 != left) + { + *out = left; + ++out; + } + + while (end - pos != 0) + { + int c = 0; + for (int i = 0; i < 8; ++i) + { + c = c << 1; + if (*pos == '1') + { + c += 1; + } + ++pos; + } + *out = c; + ++out; + } + + *out = '\0'; + ++out; + } + + bool useDefaultImplementationForConstants() const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + { + const ColumnPtr & column = arguments[0].column; + + if (const ColumnString * col = checkAndGetColumn(column.get())) + { + auto col_res = ColumnString::create(); + + ColumnString::Chars & out_vec = col_res->getChars(); + ColumnString::Offsets & out_offsets = col_res->getOffsets(); + + const ColumnString::Chars & in_vec = col->getChars(); + const ColumnString::Offsets & in_offsets = col->getOffsets(); + + size_t size = in_offsets.size(); + out_offsets.resize(size); + out_vec.resize(in_vec.size() / 8 + size); + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + size_t prev_offset = 0; + for (size_t i = 0; i < size; ++i) + { + size_t new_offset = in_offsets[i]; + + unbinOne(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); + + out_offsets[i] = pos - begin; + + prev_offset = new_offset; + } + + out_vec.resize(pos - begin); + + return col_res; + } + else + { + throw Exception("Illegal column " + arguments[0].column->getName() + + " of argument of function " + getName(), + ErrorCodes::ILLEGAL_COLUMN); + } + } +}; + class FunctionChar : public IFunction { public: diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference new file mode 100644 index 00000000000..b9ddf2f1db7 --- /dev/null +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -0,0 +1,16 @@ +0 +1 +1010 +1111111 +11111111 +00110000 +0011000100110000 +111001101011010110001011111010001010111110010101 +11100110101101011000101111101000101011111001010100000000000000000000000000000000 +10011010100110011001100100111111 +0011001100110011001100110011001100110011001100111111001100111111 +00000000000011100010011100000111 +0000000000000000000011000011110101011101010100111010101000000001 +0 +10 +测试 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql new file mode 100644 index 00000000000..fd7a77bd2fc --- /dev/null +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -0,0 +1,17 @@ +select bin(0); +select bin(1); +select bin(10); +select bin(127); +select bin(255); +select bin('0'); +select bin('10'); +select bin('测试'); +select bin(toFixedString('测试', 10)); +select bin(toFloat32(1.2)); +select bin(toFloat64(1.2)); +select bin(toDecimal32(1.2, 8)); +select bin(toDecimal64(1.2, 17)); + +select unbin('00110000'); -- 0 +select unbin('0011000100110000'); -- 10 +select unbin('111001101011010110001011111010001010111110010101'); -- 测试 From b4b54a05491c8ecbf96e1552742d1eb1d05f4ebe Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Jul 2021 18:25:05 +0300 Subject: [PATCH 686/931] Fix bug --- src/Storages/MergeTree/BackgroundJobsExecutor.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp index 7c9c771a845..36803ba5197 100644 --- a/src/Storages/MergeTree/BackgroundJobsExecutor.cpp +++ b/src/Storages/MergeTree/BackgroundJobsExecutor.cpp @@ -61,7 +61,6 @@ void IBackgroundJobExecutor::scheduleTask(bool with_backoff) no_work_done_count = 0; next_time_to_execute = 1000 * sleep_settings.thread_sleep_seconds_if_nothing_to_do; } - LOG_DEBUG(&Poco::Logger::get("DEBUG"), "NO WORK DONE TIMES {}", no_work_done_count); scheduling_task->scheduleAfter(next_time_to_execute, false); } @@ -177,7 +176,7 @@ void IBackgroundJobExecutor::triggerTask() { std::lock_guard lock(scheduling_task_mutex); if (scheduling_task) - scheduling_task->schedule(); + runTaskWithoutDelay(); } void IBackgroundJobExecutor::backgroundTaskFunction() From ace487278fb7ba7de852c68c993ac3055a18aae2 Mon Sep 17 00:00:00 2001 From: zxc111 Date: Fri, 2 Jul 2021 01:09:44 +0800 Subject: [PATCH 687/931] refactory hex/unhex/bin/unbin --- src/Common/hex.h | 5 - src/Functions/FunctionsCoding.h | 900 +++++++----------- .../0_stateless/01926_bin_unbin.reference | 4 +- tests/queries/0_stateless/01926_bin_unbin.sql | 2 + 4 files changed, 346 insertions(+), 565 deletions(-) diff --git a/src/Common/hex.h b/src/Common/hex.h index 62867f99c48..82eff776244 100644 --- a/src/Common/hex.h +++ b/src/Common/hex.h @@ -46,11 +46,6 @@ inline void writeBinByte(UInt8 byte, void * out) memcpy(out, &bin_byte_to_char_table[static_cast(byte) * 8], 8); } -inline void writeSingleBinByte(UInt8 byte, void * out) -{ - memcpy(out, &hex_digit_to_char_uppercase_table[static_cast(byte)], 1); -} - /// Produces hex representation of an unsigned int with leading zeros (for checksums) template inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index 5004905863f..f2e340aaeef 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -65,6 +65,10 @@ namespace ErrorCodes constexpr size_t uuid_bytes_length = 16; constexpr size_t uuid_text_length = 36; +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +} class FunctionIPv6NumToString : public IFunction { @@ -951,19 +955,20 @@ public: } }; - -class FunctionHex : public IFunction +template +class Conversion : public IFunction { public: - static constexpr auto name = "hex"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static constexpr auto name = Impl::name; + static constexpr size_t word_size = Impl::word_size; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } + String getName() const override { return name; } size_t getNumberOfArguments() const override { return 1; } + + bool useDefaultImplementationForConstants() const override { return true; } + bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override @@ -983,235 +988,6 @@ public: return std::make_shared(); } - template - void executeOneUInt(T x, char *& out) const - { - bool was_nonzero = false; - for (int offset = (sizeof(T) - 1) * 8; offset >= 0; offset -= 8) - { - UInt8 byte = x >> offset; - - /// Leading zeros. - if (byte == 0 && !was_nonzero && offset) // -V560 - continue; - - was_nonzero = true; - - writeHexByteUppercase(byte, out); - out += 2; - } - *out = '\0'; - ++out; - } - - template - bool tryExecuteUInt(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnVector * col_vec = checkAndGetColumn>(col); - - static constexpr size_t MAX_UINT_HEX_LENGTH = sizeof(T) * 2 + 1; /// Including trailing zero byte. - - if (col_vec) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const typename ColumnVector::Container & in_vec = col_vec->getData(); - - size_t size = in_vec.size(); - out_offsets.resize(size); - out_vec.resize(size * 3 + MAX_UINT_HEX_LENGTH); /// 3 is length of one byte in hex plus zero byte. - - size_t pos = 0; - for (size_t i = 0; i < size; ++i) - { - /// Manual exponential growth, so as not to rely on the linear amortized work time of `resize` (no one guarantees it). - if (pos + MAX_UINT_HEX_LENGTH > out_vec.size()) - out_vec.resize(out_vec.size() * 2 + MAX_UINT_HEX_LENGTH); - - char * begin = reinterpret_cast(&out_vec[pos]); - char * end = begin; - executeOneUInt(in_vec[i], end); - - pos += end - begin; - out_offsets[i] = pos; - } - - out_vec.resize(pos); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - template - void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) const - { - const size_t hex_length = type_size_in_bytes * 2 + 1; /// Including trailing zero byte. - auto col_str = ColumnString::create(); - - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - size_t size = in_vec.size(); - out_offsets.resize(size); - out_vec.resize(size * hex_length); - - size_t pos = 0; - char * out = reinterpret_cast(&out_vec[0]); - for (size_t i = 0; i < size; ++i) - { - const UInt8 * in_pos = reinterpret_cast(&in_vec[i]); - executeOneString(in_pos, in_pos + type_size_in_bytes, out); - - pos += hex_length; - out_offsets[i] = pos; - } - col_res = std::move(col_str); - } - - template - bool tryExecuteFloat(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnVector * col_vec = checkAndGetColumn>(col); - if (col_vec) - { - const typename ColumnVector::Container & in_vec = col_vec->getData(); - executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); - return true; - } - else - { - return false; - } - } - - template - bool tryExecuteDecimal(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnDecimal * col_dec = checkAndGetColumn>(col); - if (col_dec) - { - const typename ColumnDecimal::Container & in_vec = col_dec->getData(); - executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); - return true; - } - else - { - return false; - } - } - - - static void executeOneString(const UInt8 * pos, const UInt8 * end, char *& out) - { - while (pos < end) - { - writeHexByteUppercase(*pos, out); - ++pos; - out += 2; - } - *out = '\0'; - ++out; - } - - static bool tryExecuteString(const IColumn * col, ColumnPtr & col_res) - { - const ColumnString * col_str_in = checkAndGetColumn(col); - - if (col_str_in) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const ColumnString::Chars & in_vec = col_str_in->getChars(); - const ColumnString::Offsets & in_offsets = col_str_in->getOffsets(); - - size_t size = in_offsets.size(); - out_offsets.resize(size); - out_vec.resize(in_vec.size() * 2 - size); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - size_t prev_offset = 0; - - for (size_t i = 0; i < size; ++i) - { - size_t new_offset = in_offsets[i]; - - executeOneString(&in_vec[prev_offset], &in_vec[new_offset - 1], pos); - - out_offsets[i] = pos - begin; - - prev_offset = new_offset; - } - - if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) - throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res) - { - const ColumnFixedString * col_fstr_in = checkAndGetColumn(col); - - if (col_fstr_in) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const ColumnString::Chars & in_vec = col_fstr_in->getChars(); - - size_t size = col_fstr_in->size(); - - out_offsets.resize(size); - out_vec.resize(in_vec.size() * 2 + size); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - - size_t n = col_fstr_in->getN(); - - size_t prev_offset = 0; - - for (size_t i = 0; i < size; ++i) - { - size_t new_offset = prev_offset + n; - - executeOneString(&in_vec[prev_offset], &in_vec[new_offset], pos); - - out_offsets[i] = pos - begin; - prev_offset = new_offset; - } - - if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) - throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { const IColumn * column = arguments[0].column.get(); @@ -1234,19 +1010,192 @@ public: + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN); } + + template + bool tryExecuteUInt(const IColumn *col, ColumnPtr &col_res) const + { + const ColumnVector * col_vec = checkAndGetColumn>(col); + + static constexpr size_t MAX_LENGTH = sizeof(T) * word_size + 1; /// Including trailing zero byte. + + if (col_vec) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const typename ColumnVector::Container & in_vec = col_vec->getData(); + + size_t size = in_vec.size(); + out_offsets.resize(size); + out_vec.resize(size * (word_size+1) + MAX_LENGTH); /// word_size+1 is length of one byte in hex/bin plus zero byte. + + size_t pos = 0; + for (size_t i = 0; i < size; ++i) + { + /// Manual exponential growth, so as not to rely on the linear amortized work time of `resize` (no one guarantees it). + if (pos + MAX_LENGTH > out_vec.size()) + out_vec.resize(out_vec.size() * word_size + MAX_LENGTH); + + char * begin = reinterpret_cast(&out_vec[pos]); + char * end = begin; + Impl::executeOneUInt(in_vec[i], end); + + pos += end - begin; + out_offsets[i] = pos; + } + out_vec.resize(pos); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + bool tryExecuteString(const IColumn *col, ColumnPtr &col_res) const + { + const ColumnString * col_str_in = checkAndGetColumn(col); + + if (col_str_in) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const ColumnString::Chars & in_vec = col_str_in->getChars(); + const ColumnString::Offsets & in_offsets = col_str_in->getOffsets(); + + size_t size = in_offsets.size(); + + out_offsets.resize(size); + if (getName() == "bin") + { + out_vec.resize((in_vec.size() - size) * word_size + size); + } else if (getName() == "hex") + { + out_vec.resize(in_vec.size() * word_size - size); + } else + { + throw Exception("new function is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED); + } + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + size_t prev_offset = 0; + + for (size_t i = 0; i < size; ++i) + { + size_t new_offset = in_offsets[i]; + + Impl::executeOneString(&in_vec[prev_offset], &in_vec[new_offset - 1], pos); + + out_offsets[i] = pos - begin; + + prev_offset = new_offset; + } + if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) + throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + template + bool tryExecuteDecimal(const IColumn * col, ColumnPtr & col_res) const + { + const ColumnDecimal * col_dec = checkAndGetColumn>(col); + if (col_dec) + { + const typename ColumnDecimal::Container & in_vec = col_dec->getData(); + Impl::executeFloatAndDecimal(in_vec, col_res, sizeof(T)); + return true; + } + else + { + return false; + } + } + + static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res) + { + const ColumnFixedString * col_fstr_in = checkAndGetColumn(col); + + if (col_fstr_in) + { + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const ColumnString::Chars & in_vec = col_fstr_in->getChars(); + + size_t size = col_fstr_in->size(); + + out_offsets.resize(size); + out_vec.resize(in_vec.size() * word_size + size); + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + + size_t n = col_fstr_in->getN(); + + size_t prev_offset = 0; + + for (size_t i = 0; i < size; ++i) + { + size_t new_offset = prev_offset + n; + + Impl::executeOneString(&in_vec[prev_offset], &in_vec[new_offset], pos); + + out_offsets[i] = pos - begin; + prev_offset = new_offset; + } + + if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) + throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); + + col_res = std::move(col_str); + return true; + } + else + { + return false; + } + } + + template + bool tryExecuteFloat(const IColumn * col, ColumnPtr & col_res) const + { + const ColumnVector * col_vec = checkAndGetColumn>(col); + if (col_vec) + { + const typename ColumnVector::Container & in_vec = col_vec->getData(); + Impl::executeFloatAndDecimal(in_vec, col_res, sizeof(T)); + return true; + } + else + { + return false; + } + } }; - -class FunctionUnhex : public IFunction +template +class UnConversion : public IFunction { public: - static constexpr auto name = "unhex"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static constexpr auto name = Impl::name; + static constexpr size_t word_size = Impl::word_size; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } + String getName() const override { return name; } size_t getNumberOfArguments() const override { return 1; } bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } @@ -1255,29 +1204,11 @@ public: { if (!isString(arguments[0])) throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return std::make_shared(); } - static void unhexOne(const char * pos, const char * end, char *& out) - { - if ((end - pos) & 1) - { - *out = unhex(*pos); - ++out; - ++pos; - } - while (pos < end) - { - *out = unhex2(pos); - pos += 2; - ++out; - } - *out = '\0'; - ++out; - } - bool useDefaultImplementationForConstants() const override { return true; } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override @@ -1296,7 +1227,18 @@ public: size_t size = in_offsets.size(); out_offsets.resize(size); - out_vec.resize(in_vec.size() / 2 + size); + if (getName() == "unhex") + { + out_vec.resize(in_vec.size() / 2 + size); + } + else if (getName() == "unbin") + { + out_vec.resize(in_vec.size() / 8 + size); + } + else + { + throw Exception("new function is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED); + } char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; @@ -1306,7 +1248,7 @@ public: { size_t new_offset = in_offsets[i]; - unhexOne(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); + Impl::unConversion(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); out_offsets[i] = pos - begin; @@ -1326,56 +1268,130 @@ public: } }; -class FunctionBin : public IFunction +struct HexImpl { public: - static constexpr auto name = "bin"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - - String getName() const override { return name; } - - size_t getNumberOfArguments() const override { return 1; } - bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } - - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + static constexpr auto name = "hex"; + static const size_t word_size = 2; + template + static void executeOneUInt(T x, char *& out) { - WhichDataType which(arguments[0]); + bool was_nonzero = false; + for (int offset = (sizeof(T) - 1) * 8; offset >= 0; offset -= 8) + { + UInt8 byte = x >> offset; - if (!which.isStringOrFixedString() && - !which.isDate() && - !which.isDateTime() && - !which.isDateTime64() && - !which.isUInt() && - !which.isFloat() && - !which.isDecimal()) - throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + /// Leading zeros. + if (byte == 0 && !was_nonzero && offset) + continue; - return std::make_shared(); + was_nonzero = true; + writeHexByteUppercase(byte, out); + out += 2; + } + *out = '\0'; + ++out; + } + + static void executeOneString(const UInt8 * pos, const UInt8 * end, char *& out) + { + while (pos < end) + { + writeHexByteUppercase(*pos, out); + ++pos; + out += 2; + } + *out = '\0'; + ++out; } template - void executeOneUInt(T x, char *& out) const + static void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) { - UInt8 result[sizeof(x) * 8] = {0}; - int cnt = 0; - if (0 == x) + const size_t hex_length = type_size_in_bytes * word_size + 1; /// Including trailing zero byte. + auto col_str = ColumnString::create(); + + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + size_t size = in_vec.size(); + out_offsets.resize(size); + out_vec.resize(size * hex_length); + + size_t pos = 0; + char * out = reinterpret_cast(&out_vec[0]); + for (size_t i = 0; i < size; ++i) { - writeSingleBinByte(0, out); - ++out; - *out = '\0'; - ++out; - return; + const UInt8 * in_pos = reinterpret_cast(&in_vec[i]); + executeOneString(in_pos, in_pos + type_size_in_bytes, out); + + pos += hex_length; + out_offsets[i] = pos; } - for (; x != 0; x = x >> 1) + col_res = std::move(col_str); + } +}; + +struct UnhexImpl +{ +public: + static constexpr auto name = "unhex"; + + static String getName() + { + return name; + } + + static void unConversion(const char * pos, const char * end, char *& out) + { + if ((end - pos) & 1) { - result[cnt] = (x & 1); - cnt += 1; + *out = unhex(*pos); + ++out; + ++pos; } - for (int i = cnt - 1; i >= 0; --i) + while (pos < end) { - writeSingleBinByte(result[i], out); - out += 1; + *out = unhex2(pos); + pos += 2; + ++out; + } + *out = '\0'; + ++out; + } +}; + +struct BinImpl +{ +public: + static constexpr auto name = "bin"; + static constexpr size_t word_size = 8; + template + static void executeOneUInt(T x, char *& out) + { + bool was_nonzero = false; + T t = 1; + + for (int8_t offset = sizeof(x) * 8 - 1; offset >= 0; --offset) + { + t = t << offset; + if ((x & t) == t) + { + x = x - t; + was_nonzero = true; + *out = '1'; + t = 1; + } + else + { + t = 1; + if (!was_nonzero) + { + continue; + } + *out = '0'; + } + ++out; } *out = '\0'; @@ -1383,53 +1399,7 @@ public: } template - bool tryExecuteUInt(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnVector * col_vec = checkAndGetColumn>(col); - - static constexpr size_t MAX_UINT_HEX_LENGTH = sizeof(T) * 8 + 1; /// Including trailing zero byte. - - if (col_vec) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const typename ColumnVector::Container & in_vec = col_vec->getData(); - - size_t size = in_vec.size(); - out_offsets.resize(size); - out_vec.resize(MAX_UINT_HEX_LENGTH); - - size_t pos = 0; - for (size_t i = 0; i < size; ++i) - { - /// Manual exponential growth, so as not to rely on the linear amortized work time of `resize` (no one guarantees it). - if (pos + MAX_UINT_HEX_LENGTH > out_vec.size()) - out_vec.resize(out_vec.size() * 8 + MAX_UINT_HEX_LENGTH); - - char * begin = reinterpret_cast(&out_vec[pos]); - char * end = begin; - - executeOneUInt(in_vec[i], end); - - pos += end - begin; - out_offsets[i] = pos; - } - - out_vec.resize(pos); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - template - void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) const + static void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) { const size_t hex_length = type_size_in_bytes * 8 + 1; /// Including trailing zero byte. auto col_str = ColumnString::create(); @@ -1455,188 +1425,39 @@ public: col_res = std::move(col_str); } - template - bool tryExecuteFloat(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnVector * col_vec = checkAndGetColumn>(col); - if (col_vec) - { - const typename ColumnVector::Container & in_vec = col_vec->getData(); - executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); - return true; - } - else - { - return false; - } - } - - template - bool tryExecuteDecimal(const IColumn * col, ColumnPtr & col_res) const - { - const ColumnDecimal * col_dec = checkAndGetColumn>(col); - if (col_dec) - { - const typename ColumnDecimal::Container & in_vec = col_dec->getData(); - executeFloatAndDecimal::Container>(in_vec, col_res, sizeof(T)); - return true; - } - else - { - return false; - } - } - - static void executeOneString(const UInt8 * pos, const UInt8 * end, char *& out) { while (pos < end) { writeBinByte(*pos, out); - ++pos; - out += 8; + out += word_size; } *out = '\0'; ++out; } - - static bool tryExecuteString(const IColumn * col, ColumnPtr & col_res) - { - const ColumnString * col_str_in = checkAndGetColumn(col); - - if (col_str_in) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const ColumnString::Chars & in_vec = col_str_in->getChars(); - const ColumnString::Offsets & in_offsets = col_str_in->getOffsets(); - - size_t size = in_offsets.size(); - - out_offsets.resize(size); - out_vec.resize((in_vec.size() - 1) * 8 + size); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - size_t prev_offset = 0; - - for (size_t i = 0; i < size; ++i) - { - size_t new_offset = in_offsets[i]; - executeOneString(&in_vec[prev_offset], &in_vec[new_offset - 1], pos); - - out_offsets[i] = pos - begin; - - prev_offset = new_offset; - } - if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) - throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res) - { - const ColumnFixedString * col_fstr_in = checkAndGetColumn(col); - - if (col_fstr_in) - { - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const ColumnString::Chars & in_vec = col_fstr_in->getChars(); - - size_t size = col_fstr_in->size(); - - out_offsets.resize(size); - out_vec.resize(in_vec.size() * 8 + size); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - - size_t n = col_fstr_in->getN(); - - size_t prev_offset = 0; - - for (size_t i = 0; i < size; ++i) - { - size_t new_offset = prev_offset + n; - - executeOneString(&in_vec[prev_offset], &in_vec[new_offset], pos); - - out_offsets[i] = pos - begin; - prev_offset = new_offset; - } - - if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) - throw Exception("Column size mismatch (internal logical error)", ErrorCodes::LOGICAL_ERROR); - - col_res = std::move(col_str); - return true; - } - else - { - return false; - } - } - - bool useDefaultImplementationForConstants() const override { return true; } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override - { - const IColumn * column = arguments[0].column.get(); - ColumnPtr res_column; - - if (tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) - || tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) - || tryExecuteString(column, res_column) || tryExecuteFixedString(column, res_column) - || tryExecuteFloat(column, res_column) || tryExecuteFloat(column, res_column) - || tryExecuteDecimal(column, res_column) || tryExecuteDecimal(column, res_column) - || tryExecuteDecimal(column, res_column)) - return res_column; - - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN); - } }; -class FunctionUnbin : public IFunction +struct UnbinImpl { public: static constexpr auto name = "unbin"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override { return name; } + static String getName() { return name; } - size_t getNumberOfArguments() const override { return 1; } - bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } - - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + static void unConversion(const char * pos, const char * end, char *& out) { - if (!isString(arguments[0])) - throw Exception( - "Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + UInt8 left = 0; - return std::make_shared(); - } - - static void unbinOne(const char * pos, const char * end, char *& out) - { - uint8_t left = 0; - for (int left_cnt = (end - pos) & 7; left_cnt > 0; --left_cnt) + /// end - pos is the length of input. + /// (length & 7) to make remain bits length mod 8 is zero to split. + /// e.g. the length is 9 and the input is "101000001", + /// first left_cnt is 1, left is 0, right shift, pos is 1, left = 1 + /// then, left_cnt is 0, remain input is '01000001'. + for (uint8_t left_cnt = (end - pos) & 7; left_cnt > 0; --left_cnt) { left = left << 1; - if (*pos == '1') + if (*pos != '0') { left += 1; } @@ -1648,13 +1469,15 @@ public: ++out; } + /// input character encoding is UTF-8. And + /// remain bits mod 8 is zero. while (end - pos != 0) { - int c = 0; - for (int i = 0; i < 8; ++i) + UInt8 c = 0; + for (uint8_t i = 0; i < 8; ++i) { c = c << 1; - if (*pos == '1') + if (*pos != '0') { c += 1; } @@ -1667,54 +1490,13 @@ public: *out = '\0'; ++out; } - - bool useDefaultImplementationForConstants() const override { return true; } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override - { - const ColumnPtr & column = arguments[0].column; - - if (const ColumnString * col = checkAndGetColumn(column.get())) - { - auto col_res = ColumnString::create(); - - ColumnString::Chars & out_vec = col_res->getChars(); - ColumnString::Offsets & out_offsets = col_res->getOffsets(); - - const ColumnString::Chars & in_vec = col->getChars(); - const ColumnString::Offsets & in_offsets = col->getOffsets(); - - size_t size = in_offsets.size(); - out_offsets.resize(size); - out_vec.resize(in_vec.size() / 8 + size); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) - { - size_t new_offset = in_offsets[i]; - - unbinOne(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); - - out_offsets[i] = pos - begin; - - prev_offset = new_offset; - } - - out_vec.resize(pos - begin); - - return col_res; - } - else - { - throw Exception("Illegal column " + arguments[0].column->getName() - + " of argument of function " + getName(), - ErrorCodes::ILLEGAL_COLUMN); - } - } }; +using FunctionHex = Conversion; +using FunctionUnhex = UnConversion; +using FunctionBin = Conversion; +using FunctionUnbin = UnConversion; + class FunctionChar : public IFunction { public: diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index b9ddf2f1db7..54c01c5d145 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -1,4 +1,4 @@ -0 + 1 1010 1111111 @@ -11,6 +11,8 @@ 0011001100110011001100110011001100110011001100111111001100111111 00000000000011100010011100000111 0000000000000000000011000011110101011101010100111010101000000001 +0011000100110010001100110011001100110010001101000011001000110100 +0011000100110010001100110011001100110010001101000011001000110100 0 10 测试 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql index fd7a77bd2fc..40635091120 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.sql +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -11,6 +11,8 @@ select bin(toFloat32(1.2)); select bin(toFloat64(1.2)); select bin(toDecimal32(1.2, 8)); select bin(toDecimal64(1.2, 17)); +select bin('12332424'); +select bin(toLowCardinality(materialize('12332424'))); select unbin('00110000'); -- 0 select unbin('0011000100110000'); -- 10 From 23dd7544922fdc62369a8271169702203419b6e0 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 2 Jul 2021 22:26:33 +0300 Subject: [PATCH 688/931] Update libpq --- contrib/libpq | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libpq b/contrib/libpq index 69e8a80e98f..e071ea570f8 160000 --- a/contrib/libpq +++ b/contrib/libpq @@ -1 +1 @@ -Subproject commit 69e8a80e98f27e3a5deec617334e31db2b9ed7d7 +Subproject commit e071ea570f8985aa00e34f5b9d50a3cfe666327e From fb6fc028bd7047e16a6379026e9fa9e255cadc49 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 2 Jul 2021 22:37:09 +0300 Subject: [PATCH 689/931] Update FunctionsJSON.h --- src/Functions/FunctionsJSON.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 0cee3264c0f..4b087feac9c 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -679,7 +679,7 @@ struct JSONExtractTree return false; const auto * type = assert_cast *>(data_type.get()); - std::stringstream ss; + std::stringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM ss << std::setprecision(type->getPrecision()) << element.getDouble(); auto str = ss.str(); ReadBufferFromString res(str); From 55ce7de2484f8888ecd14e4e3a928cf450cc6354 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 2 Jul 2021 22:39:21 +0300 Subject: [PATCH 690/931] Remove trailing spaces -- style check fix --- src/Storages/StorageS3.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 4f6c55d1fe4..b4fec69e075 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -436,7 +436,7 @@ BlockOutputStreamPtr StorageS3::write(const ASTPtr & /*query*/, const StorageMet max_single_part_upload_size); } - + void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) { updateClientAndAuthSettings(local_context, client_auth); From 132edc9e2217ae99e7936560779ff6b2daefa327 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 22:47:43 +0300 Subject: [PATCH 691/931] Allow quantiles* functions to work with `aggregate_functions_null_for_empty` --- .../AggregateFunctionQuantile.cpp | 29 ++++++++++--------- ...936_quantiles_cannot_return_null.reference | 4 +++ .../01936_quantiles_cannot_return_null.sql | 9 ++++++ 3 files changed, 29 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/01936_quantiles_cannot_return_null.reference create mode 100644 tests/queries/0_stateless/01936_quantiles_cannot_return_null.sql diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/src/AggregateFunctions/AggregateFunctionQuantile.cpp index cae0021082f..11b14585653 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantile.cpp @@ -125,44 +125,47 @@ AggregateFunctionPtr createAggregateFunctionQuantile( void registerAggregateFunctionsQuantile(AggregateFunctionFactory & factory) { + /// For aggregate functions returning array we cannot return NULL on empty set. + AggregateFunctionProperties properties = { .returns_default_when_only_null = true }; + factory.registerFunction(NameQuantile::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantiles::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantiles::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileDeterministic::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesDeterministic::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesDeterministic::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExact::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExact::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExact::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExactLow::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExactLow::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExactLow::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExactHigh::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExactHigh::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExactHigh::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExactExclusive::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExactExclusive::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExactExclusive::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExactInclusive::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExactInclusive::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExactInclusive::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileExactWeighted::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesExactWeighted::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesExactWeighted::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileTiming::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesTiming::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesTiming::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileTimingWeighted::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesTimingWeighted::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesTimingWeighted::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileTDigest::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesTDigest::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesTDigest::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileTDigestWeighted::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesTDigestWeighted::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesTDigestWeighted::name, { createAggregateFunctionQuantile, properties }); factory.registerFunction(NameQuantileBFloat16::name, createAggregateFunctionQuantile); - factory.registerFunction(NameQuantilesBFloat16::name, createAggregateFunctionQuantile); + factory.registerFunction(NameQuantilesBFloat16::name, { createAggregateFunctionQuantile, properties }); /// 'median' is an alias for 'quantile' factory.registerAlias("median", NameQuantile::name); diff --git a/tests/queries/0_stateless/01936_quantiles_cannot_return_null.reference b/tests/queries/0_stateless/01936_quantiles_cannot_return_null.reference new file mode 100644 index 00000000000..f9b4a3157f7 --- /dev/null +++ b/tests/queries/0_stateless/01936_quantiles_cannot_return_null.reference @@ -0,0 +1,4 @@ +[nan] +[nan] +[nan] +[nan] diff --git a/tests/queries/0_stateless/01936_quantiles_cannot_return_null.sql b/tests/queries/0_stateless/01936_quantiles_cannot_return_null.sql new file mode 100644 index 00000000000..81ac6224268 --- /dev/null +++ b/tests/queries/0_stateless/01936_quantiles_cannot_return_null.sql @@ -0,0 +1,9 @@ +set aggregate_functions_null_for_empty=0; + +SELECT quantiles(0.95)(x) FROM (SELECT 1 x WHERE 0); +SELECT quantiles(0.95)(number) FROM (SELECT number FROM numbers(10) WHERE number > 10); + +set aggregate_functions_null_for_empty=1; + +SELECT quantiles(0.95)(x) FROM (SELECT 1 x WHERE 0); +SELECT quantiles(0.95)(number) FROM (SELECT number FROM numbers(10) WHERE number > 10); From 7d6e08c6adaf34d9d772ac504f9804ddd0f169d6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 23:01:26 +0300 Subject: [PATCH 692/931] Remove obsolete code from init script --- debian/clickhouse-server.init | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/debian/clickhouse-server.init b/debian/clickhouse-server.init index d7d87c6d53c..89f97697c61 100755 --- a/debian/clickhouse-server.init +++ b/debian/clickhouse-server.init @@ -43,29 +43,6 @@ command -v flock >/dev/null && FLOCK=flock # Override defaults from optional config file test -f /etc/default/clickhouse && . /etc/default/clickhouse -# On x86_64, check for required instruction set. -if uname -mpi | grep -q 'x86_64'; then - if ! grep -q 'sse4_2' /proc/cpuinfo; then - # On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. - if ! grep -q 'Common KVM processor' /proc/cpuinfo; then - - # Some other VMs also report wrong flags in cpuinfo. - # Tricky way to test for instruction set: - # create temporary binary and run it; - # if it get caught illegal instruction signal, - # then required instruction set is not supported really. - # - # Generated this way: - # gcc -xc -Os -static -nostdlib - <<< 'void _start() { __asm__("pcmpgtq %%xmm0, %%xmm1; mov $0x3c, %%rax; xor %%rdi, %%rdi; syscall":::"memory"); }' && strip -R .note.gnu.build-id -R .comment -R .eh_frame -s ./a.out && gzip -c -9 ./a.out | base64 -w0; echo - - if ! (echo -n 'H4sICAwAW1cCA2Eub3V0AKt39XFjYmRkgAEmBjsGEI+H0QHMd4CKGyCUAMUsGJiBJDNQNUiYlQEZOKDQclB9cnD9CmCSBYqJBRxQOvBpSQobGfqIAWn8FuYnPI4fsAGyPQz/87MeZtArziguKSpJTGLQK0mtKGGgGHADMSgoYH6AhTMPNHyE0NQzYuEzYzEXFr6CBPQDANAsXKTwAQAA' | base64 -d | gzip -d > /tmp/clickhouse_test_sse42 && chmod a+x /tmp/clickhouse_test_sse42 && /tmp/clickhouse_test_sse42); then - echo 'Warning! SSE 4.2 instruction set is not supported' - #exit 3 - fi - fi - fi -fi - die() { From a879234192ef9d1d716ffd08839bf9522d50a19c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Jul 2021 23:02:27 +0300 Subject: [PATCH 693/931] Fix for systems with systemd --- debian/clickhouse-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/clickhouse-server.init b/debian/clickhouse-server.init index d7d87c6d53c..7470959a948 100755 --- a/debian/clickhouse-server.init +++ b/debian/clickhouse-server.init @@ -116,7 +116,7 @@ forcestop() service_or_func() { if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then - service $PROGRAM $1 + systemctl $1 $PROGRAM else $1 fi From 7e372a2e5349be67d04ca958c1b149978cf446e0 Mon Sep 17 00:00:00 2001 From: Alexey Date: Fri, 2 Jul 2021 20:18:55 +0000 Subject: [PATCH 694/931] Ru translation + some updates + link fixes --- .../reference/quantilebfloat16.md | 4 +- .../aggregate-functions/reference/median.md | 22 ++++--- .../reference/quantilebfloat16.md | 64 +++++++++++++++++++ .../reference/quantiles.md | 6 +- 4 files changed, 81 insertions(+), 15 deletions(-) create mode 100644 docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index f8ecd1d71ce..cdbb60f2fe8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -43,10 +43,10 @@ Input table has an integer and a float columns: └───┴───────┘ ``` -Query: +Query to calculate 0.75-quantile (third quartile): ``` sql -SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; +SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; ``` Result: diff --git a/docs/ru/sql-reference/aggregate-functions/reference/median.md b/docs/ru/sql-reference/aggregate-functions/reference/median.md index a208c21dd21..1472809e2e3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/median.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/median.md @@ -1,17 +1,19 @@ # median {#median} -Функции `median*` — алиасы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности. +Функции `median*` — синонимы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности. -Functions: +Функции: -- `median` — алиас [quantile](#quantile). -- `medianDeterministic` — алиас [quantileDeterministic](#quantiledeterministic). -- `medianExact` — алиас [quantileExact](#quantileexact). -- `medianExactWeighted` — алиас [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — алиас [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — алиас [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — алиас [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — алиас [quantileTDigestWeighted](#quantiletdigestweighted). + +- `median` — синоним для [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile). +- `medianDeterministic` — синоним для [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md#quantiledeterministic). +- `medianExact` — синоним для [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexact). +- `medianExactWeighted` — синоним для [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md#quantileexactweighted). +- `medianTiming` — синоним для [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). +- `medianTimingWeighted` — синоним для [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md#quantiletimingweighted). +- `medianTDigest` — синоним для [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md#quantiletdigest). +- `medianTDigestWeighted` — синоним для [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md#quantiletdigestweighted). +- `medianBFloat16` — синоним для [quantileBFloat16](../../../sql-reference/aggregate-functions/reference/quantilebfloat16.md#quantilebfloat16). **Пример** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md new file mode 100644 index 00000000000..217da78d1d1 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -0,0 +1,64 @@ +--- +toc_priority: 209 +--- + +# quantileBFloat16 {#quantilebfloat16} + +Приближенно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) выборки чисел в формате [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format). bfloat16 — это формат с плавающей точкой, в котором для представления числа используется 1 знаковый бит, 8 бит для порядка и 7 бит для мантиссы. +Функция преобразует входное число в 32-битное с плавающей точкой и обрабатывает его старшие 16 бит. Она вычисляет квантиль в формате bfloat16 и преобразует его в 64-битное число с плавающей точкой, добавляя нулевые биты. +Эта функция выполняет быстрые приближенные вычисления с относительной ошибкой не более 0.390625%. + +**Синтаксис** + +``` sql +quantileBFloat16[(level)](expr) +``` + +Синоним: `medianBFloat16` + +**Аргументы** + +- `expr` — столбец с числовыми данными. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md). + +**Параметры** + +- `level` — уровень квантиля. Необязательный. Допустимый диапазон значений от 0 до 1. Значение по умолчанию: 0.5. [Float](../../../sql-reference/data-types/float.md). + +**Возвращаемое значение** + +- Приближенное значение квантиля. + +Тип: [Float64](../../../sql-reference/data-types/float.md#float32-float64). + +**Пример** + +В таблице есть столбцы с целыми числами и с числами с плавающей точкой: + +``` text +┌─a─┬─────b─┐ +│ 1 │ 1.001 │ +│ 2 │ 1.002 │ +│ 3 │ 1.003 │ +│ 4 │ 1.004 │ +└───┴───────┘ +``` + +Запрос для вычисления 0.75-квантиля (верхнего квартиля): + +``` sql +SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; +``` + +Результат: + +``` text +┌─quantileBFloat16(0.75)(a)─┬─quantileBFloat16(0.75)(b)─┐ +│ 3 │ 1 │ +└───────────────────────────┴───────────────────────────┘ +``` +Обратите внимание, что все числа с плавающей точкой в примере были округлены до 1.0 при преобразовании к bfloat16. + +**See Also** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index d2e7003e4e7..2417d6de139 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -8,7 +8,7 @@ toc_priority: 201 Синтаксис: `quantiles(level1, level2, …)(x)` -Все функции для вычисления квантилей имеют соответствующие функции для вычисления нескольких квантилей: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. Эти функции вычисляют все квантили указанных уровней в один проход и возвращают массив с вычисленными значениями. +Все функции для вычисления квантилей имеют соответствующие функции для вычисления нескольких квантилей: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`, `quantilesBFloat16`. Эти функции вычисляют все квантили указанных уровней в один проход и возвращают массив с вычисленными значениями. ## quantilesExactExclusive {#quantilesexactexclusive} @@ -18,7 +18,7 @@ toc_priority: 201 Эта функция эквивалентна Excel функции [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba), [тип R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -С наборами уровней работает эффективнее, чем [quantilesExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). +С наборами уровней работает эффективнее, чем [quantileExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive). **Синтаксис** @@ -70,7 +70,7 @@ SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM Эта функция эквивалентна Excel функции [PERCENTILE.INC](https://support.microsoft.com/en-us/office/percentile-inc-function-680f9539-45eb-410b-9a5e-c1355e5fe2ed), [тип R7](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample). -С наборами уровней работает эффективнее, чем [quantilesExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantilesexactinclusive). +С наборами уровней работает эффективнее, чем [quantileExactInclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactinclusive). **Синтаксис** From 24803bd84689a4ec16b1deedcc872d71d218ca4c Mon Sep 17 00:00:00 2001 From: Alexey Date: Fri, 2 Jul 2021 20:21:38 +0000 Subject: [PATCH 695/931] bfloat16 wrapped into back ticks --- .../aggregate-functions/reference/quantilebfloat16.md | 6 +++--- .../aggregate-functions/reference/quantilebfloat16.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index cdbb60f2fe8..b914e1feedf 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -4,8 +4,8 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. bfloat16 is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. -The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates bfloat16 quantile value and converts the result to a 64-bit float by appending zero bits. +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. +The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits. The function is a fast quantile estimator with a relative error no more than 0.390625%. **Syntax** @@ -56,7 +56,7 @@ Result: │ 3 │ 1 │ └───────────────────────────┴───────────────────────────┘ ``` -Note that all floating point values in the example are truncated to 1.0 when converting to bfloat16. +Note that all floating point values in the example are truncated to 1.0 when converting to `bfloat16`. **See Also** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 217da78d1d1..1b882525c61 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -4,8 +4,8 @@ toc_priority: 209 # quantileBFloat16 {#quantilebfloat16} -Приближенно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) выборки чисел в формате [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format). bfloat16 — это формат с плавающей точкой, в котором для представления числа используется 1 знаковый бит, 8 бит для порядка и 7 бит для мантиссы. -Функция преобразует входное число в 32-битное с плавающей точкой и обрабатывает его старшие 16 бит. Она вычисляет квантиль в формате bfloat16 и преобразует его в 64-битное число с плавающей точкой, добавляя нулевые биты. +Приближенно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) выборки чисел в формате [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format). `bfloat16` — это формат с плавающей точкой, в котором для представления числа используется 1 знаковый бит, 8 бит для порядка и 7 бит для мантиссы. +Функция преобразует входное число в 32-битное с плавающей точкой и обрабатывает его старшие 16 бит. Она вычисляет квантиль в формате `bfloat16` и преобразует его в 64-битное число с плавающей точкой, добавляя нулевые биты. Эта функция выполняет быстрые приближенные вычисления с относительной ошибкой не более 0.390625%. **Синтаксис** @@ -56,7 +56,7 @@ SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; │ 3 │ 1 │ └───────────────────────────┴───────────────────────────┘ ``` -Обратите внимание, что все числа с плавающей точкой в примере были округлены до 1.0 при преобразовании к bfloat16. +Обратите внимание, что все числа с плавающей точкой в примере были округлены до 1.0 при преобразовании к `bfloat16`. **See Also** From 9f52e64805c6c4dd832b54c48543c4183e3a167e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 3 Jul 2021 01:22:04 +0300 Subject: [PATCH 696/931] FunctionInitializeAggregation build fix --- src/Functions/initializeAggregation.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/initializeAggregation.cpp b/src/Functions/initializeAggregation.cpp index 060788773b6..b097d81e385 100644 --- a/src/Functions/initializeAggregation.cpp +++ b/src/Functions/initializeAggregation.cpp @@ -30,7 +30,7 @@ class FunctionInitializeAggregation : public IFunction, private WithContext public: static constexpr auto name = "initializeAggregation"; static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } - FunctionInitializeAggregation(ContextPtr context_) : WithContext(context_) {} + explicit FunctionInitializeAggregation(ContextPtr context_) : WithContext(context_) {} String getName() const override { return name; } From b26feb2b19c61ed1d90b64d81841406250591045 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Jul 2021 03:13:21 +0300 Subject: [PATCH 697/931] Add support for Chinese components in Nested data types --- src/DataTypes/NestedUtils.cpp | 34 +++---------------- .../01937_nested_chinese.reference | 12 +++++++ .../0_stateless/01937_nested_chinese.sql | 8 +++++ 3 files changed, 24 insertions(+), 30 deletions(-) create mode 100644 tests/queries/0_stateless/01937_nested_chinese.reference create mode 100644 tests/queries/0_stateless/01937_nested_chinese.sql diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index 6c13eea0a1b..ed9ea3e1b5c 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -34,41 +34,15 @@ std::string concatenateName(const std::string & nested_table_name, const std::st } -/** Name can be treated as compound if and only if both parts are simple identifiers. +/** Name can be treated as compound if it contains dot (.) in the middle. */ std::pair splitName(const std::string & name) { - const char * begin = name.data(); - const char * pos = begin; - const char * end = begin + name.size(); - - if (pos >= end || !isValidIdentifierBegin(*pos)) + auto idx = name.find_first_of('.'); + if (idx == std::string::npos || idx == 0 || idx + 1 == name.size()) return {name, {}}; - ++pos; - - while (pos < end && isWordCharASCII(*pos)) - ++pos; - - if (pos >= end || *pos != '.') - return {name, {}}; - - const char * first_end = pos; - ++pos; - const char * second_begin = pos; - - if (pos >= end || !isValidIdentifierBegin(*pos)) - return {name, {}}; - - ++pos; - - while (pos < end && isWordCharASCII(*pos)) - ++pos; - - if (pos != end) - return {name, {}}; - - return {{ begin, first_end }, { second_begin, end }}; + return {name.substr(0, idx), name.substr(idx + 1)}; } diff --git a/tests/queries/0_stateless/01937_nested_chinese.reference b/tests/queries/0_stateless/01937_nested_chinese.reference new file mode 100644 index 00000000000..54b6175d7fc --- /dev/null +++ b/tests/queries/0_stateless/01937_nested_chinese.reference @@ -0,0 +1,12 @@ +id String +products.产品 Array(Array(String)) +products.销量 Array(Array(Int32)) +id String +products.产品 Array(Array(String)) +products.销量 Array(Array(Int32)) +id String +products.产品 Array(String) +products.销量 Array(Int32) +p.产品 Array(String) +p.销量 Array(Int32) +0 diff --git a/tests/queries/0_stateless/01937_nested_chinese.sql b/tests/queries/0_stateless/01937_nested_chinese.sql new file mode 100644 index 00000000000..94c6598480e --- /dev/null +++ b/tests/queries/0_stateless/01937_nested_chinese.sql @@ -0,0 +1,8 @@ +CREATE TEMPORARY TABLE test (`id` String, `products` Nested (`产品` Array(String), `销量` Array(Int32))); + +DESCRIBE test; +DESCRIBE (SELECT * FROM test); +DESCRIBE (SELECT * FROM test ARRAY JOIN products); +DESCRIBE (SELECT p.`产品`, p.`销量` FROM test ARRAY JOIN products AS p); +SELECT * FROM test ARRAY JOIN products; +SELECT count() FROM (SELECT * FROM test ARRAY JOIN products); From b20c0e2674f3a15319ff8a76f06499cc06fb7a24 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Jul 2021 03:23:14 +0300 Subject: [PATCH 698/931] Partially fix idiotic code in JOINs --- src/Interpreters/JoinedTables.cpp | 2 +- .../0_stateless/01938_joins_identifiers.reference | 1 + .../queries/0_stateless/01938_joins_identifiers.sql | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01938_joins_identifiers.reference create mode 100644 tests/queries/0_stateless/01938_joins_identifiers.sql diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 45466ae5ca1..421fe7fcddd 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -48,7 +48,7 @@ void replaceJoinedTable(const ASTSelectQuery & select_query) if (table_expr.database_and_table_name) { const auto & table_id = table_expr.database_and_table_name->as(); - String expr = "(select * from " + table_id.name() + ") as " + table_id.shortName(); + String expr = "(SELECT * FROM " + backQuote(table_id.name()) + ") AS " + backQuote(table_id.shortName()); // FIXME: since the expression "a as b" exposes both "a" and "b" names, which is not equivalent to "(select * from a) as b", // we can't replace aliased tables. diff --git a/tests/queries/0_stateless/01938_joins_identifiers.reference b/tests/queries/0_stateless/01938_joins_identifiers.reference new file mode 100644 index 00000000000..4ce2f5c2505 --- /dev/null +++ b/tests/queries/0_stateless/01938_joins_identifiers.reference @@ -0,0 +1 @@ +0 0 1 diff --git a/tests/queries/0_stateless/01938_joins_identifiers.sql b/tests/queries/0_stateless/01938_joins_identifiers.sql new file mode 100644 index 00000000000..b518080b116 --- /dev/null +++ b/tests/queries/0_stateless/01938_joins_identifiers.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS "/t0"; +DROP TABLE IF EXISTS "/t1"; + +create table "/t0" (a Int64, b Int64) engine = MergeTree() partition by a order by a; +create table "/t1" (a Int64, b Int64) engine = MergeTree() partition by a order by a; + +insert into "/t0" values (0, 0); +insert into "/t1" values (0, 1); + +select * from "/t0" join "/t1" using a; + +DROP TABLE "/t0"; +DROP TABLE "/t1"; From acd1342df8444f136df814ac5c4d5df3b93cc6ca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Jul 2021 04:54:43 +0300 Subject: [PATCH 699/931] Skip test for ANTLR #25904 --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index be52bee71b1..e0a96ef8ded 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -522,7 +522,8 @@ "01913_names_of_tuple_literal", "01925_merge_prewhere_table", "01932_null_valid_identifier", - "01934_constexpr_aggregate_function_parameters" + "01934_constexpr_aggregate_function_parameters", + "01932_alter_index_with_order" ], "parallel": [ From 5bce3d35f8e042093a730aeb12ddaa57b4cd05b9 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Sat, 3 Jul 2021 07:57:57 +0300 Subject: [PATCH 700/931] Web UI + new adopter --- docs/en/interfaces/http.md | 28 ++++++++++------ docs/en/introduction/adopters.md | 1 + docs/ru/getting-started/playground.md | 2 +- docs/ru/interfaces/http.md | 48 +++++++++++++++------------ 4 files changed, 46 insertions(+), 33 deletions(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index dec3c839020..5f3eae34f92 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -7,16 +7,22 @@ toc_title: HTTP Interface The HTTP interface lets you use ClickHouse on any platform from any programming language. We use it for working from Java and Perl, as well as shell scripts. In other departments, the HTTP interface is used from Perl, Python, and Go. The HTTP interface is more limited than the native interface, but it has better compatibility. -By default, clickhouse-server listens for HTTP on port 8123 (this can be changed in the config). +By default, `clickhouse-server` listens for HTTP on port 8123 (this can be changed in the config). -If you make a GET / request without parameters, it returns 200 response code and the string which defined in [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response) default value “Ok.” (with a line feed at the end) +If you make a `GET /` request without parameters, it returns 200 response code and the string which defined in [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response) default value “Ok.” (with a line feed at the end) ``` bash $ curl 'http://localhost:8123/' Ok. ``` -Use GET /ping request in health-check scripts. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. +Web UI can be accessed here: + +``` bash +$ curl 'http://localhost:8123/play' +``` + +In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. ``` bash $ curl 'http://localhost:8123/ping' @@ -51,8 +57,8 @@ X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","writ 1 ``` -As you can see, curl is somewhat inconvenient in that spaces must be URL escaped. -Although wget escapes everything itself, we do not recommend using it because it does not work well over HTTP 1.1 when using keep-alive and Transfer-Encoding: chunked. +As you can see, `curl` is somewhat inconvenient in that spaces must be URL escaped. +Although `wget` escapes everything itself, we do not recommend using it because it does not work well over HTTP 1.1 when using keep-alive and Transfer-Encoding: chunked. ``` bash $ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- @@ -75,7 +81,7 @@ ECT 1 , expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception ``` -By default, data is returned in TabSeparated format (for more information, see the “Formats” section). +By default, data is returned in [TabSeparated](formats.md#tabseparated) format. You use the FORMAT clause of the query to request any other format. @@ -90,9 +96,11 @@ $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @ └───┘ ``` -The POST method of transmitting data is necessary for INSERT queries. In this case, you can write the beginning of the query in the URL parameter, and use POST to pass the data to insert. The data to insert could be, for example, a tab-separated dump from MySQL. In this way, the INSERT query replaces LOAD DATA LOCAL INFILE from MySQL. +The POST method of transmitting data is necessary for `INSERT` queries. In this case, you can write the beginning of the query in the URL parameter, and use POST to pass the data to insert. The data to insert could be, for example, a tab-separated dump from MySQL. In this way, the `INSERT` query replaces `LOAD DATA LOCAL INFILE` from MySQL. -Examples: Creating a table: +**Examples** + +Creating a table: ``` bash $ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- @@ -632,6 +640,4 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' < Relative Path File * Connection #0 to host localhost left intact -``` - -[Original article](https://clickhouse.tech/docs/en/interfaces/http_interface/) +``` \ No newline at end of file diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 8d72e12f01b..34d3580c8ca 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -59,6 +59,7 @@ toc_title: Adopters | HUYA | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | ICA | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) | | Idealista | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| Infobaleen | Marketing | Analytics | — | — | [Official site](https://infobaleen.com) | | Infovista | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | Instabug | APM Platform | Main product | — | — | [A quote from Co-Founder](https://altinity.com/) | diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index b51a9b2b436..d3101213b78 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -61,4 +61,4 @@ clickhouse client --secure -h play-api.clickhouse.tech --port 9440 -u playground Бэкэнд Playground - это кластер ClickHouse без дополнительных серверных приложений. Как упоминалось выше, способы подключения по HTTPS и TCP/TLS общедоступны как часть Playground. Они проксируются через [Cloudflare Spectrum](https://www.cloudflare.com/products/cloudflare-spectrum/) для добавления дополнительного уровня защиты и улучшенного глобального подключения. !!! warning "Предупреждение" -Открывать сервер ClickHouse для публичного доступа в любой другой ситуации **настоятельно не рекомендуется**. Убедитесь, что он настроен только на частную сеть и защищен брандмауэром. + Открывать сервер ClickHouse для публичного доступа в любой другой ситуации **настоятельно не рекомендуется**. Убедитесь, что он настроен только на частную сеть и защищен брандмауэром. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 9e553c12dc0..cf62045b61c 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -5,30 +5,36 @@ toc_title: "HTTP-интерфейс" # HTTP-интерфейс {#http-interface} -HTTP интерфейс позволяет использовать ClickHouse на любой платформе, из любого языка программирования. У нас он используется для работы из Java и Perl, а также из shell-скриптов. В других отделах, HTTP интерфейс используется из Perl, Python и Go. HTTP интерфейс более ограничен по сравнению с родным интерфейсом, но является более совместимым. +HTTP интерфейс позволяет использовать ClickHouse на любой платформе, из любого языка программирования. У нас он используется для работы из Java и Perl, а также из shell-скриптов. В других отделах HTTP интерфейс используется из Perl, Python и Go. HTTP интерфейс более ограничен по сравнению с родным интерфейсом, но является более совместимым. -По умолчанию, clickhouse-server слушает HTTP на порту 8123 (это можно изменить в конфиге). -Если запросить GET / без параметров, то вернётся строка заданная с помощью настройки [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response). Значение по умолчанию «Ok.» (с переводом строки на конце). +По умолчанию `clickhouse-server` слушает HTTP на порту 8123 (это можно изменить в конфиге). +Если запросить `GET /` без параметров, то вернётся строка заданная с помощью настройки [http_server_default_response](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response). Значение по умолчанию «Ok.» (с переводом строки на конце). ``` bash $ curl 'http://localhost:8123/' Ok. ``` -В скриптах проверки доступности вы можете использовать GET /ping без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). +Веб-интерфейс доступен по адресу: + +``` bash +$ curl 'http://localhost:8123/play' +``` + +В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). ``` bash $ curl 'http://localhost:8123/ping' Ok. ``` -Запрос отправляется в виде URL параметра с именем query. Или как тело запроса при использовании метода POST. +Запрос отправляется в виде URL параметра с именем `query`. Или как тело запроса при использовании метода POST. Или начало запроса в URL параметре query, а продолжение POST-ом (зачем это нужно, будет объяснено ниже). Размер URL ограничен 16KB, это следует учитывать при отправке больших запросов. -В случае успеха, вам вернётся код ответа 200 и результат обработки запроса в теле ответа. -В случае ошибки, вам вернётся код ответа 500 и текст с описанием ошибки в теле ответа. +В случае успеха вам вернётся код ответа 200 и результат обработки запроса в теле ответа. +В случае ошибки вам вернётся код ответа 500 и текст с описанием ошибки в теле ответа. -При использовании метода GET, выставляется настройка readonly. То есть, для запросов, модифицирующие данные, можно использовать только метод POST. Сам запрос при этом можно отправлять как в теле POST-а, так и в параметре URL. +При использовании метода GET выставляется настройка readonly. То есть, для запросов, модифицирующих данные, можно использовать только метод POST. Сам запрос при этом можно отправлять как в теле POST запроса, так и в параметре URL. Примеры: @@ -51,8 +57,8 @@ X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","writ 1 ``` -Как видно, curl немного неудобен тем, что надо URL-эскейпить пробелы. -Хотя wget сам всё эскейпит, но его не рекомендуется использовать, так как он плохо работает по HTTP 1.1 при использовании keep-alive и Transfer-Encoding: chunked. +Как видно, `curl` немного неудобен тем, что надо URL-эскейпить пробелы. +Хотя `wget` сам всё эскейпит, но его не рекомендуется использовать, так как он плохо работает по HTTP 1.1 при использовании `keep-alive` и `Transfer-Encoding: chunked`. ``` bash $ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- @@ -65,7 +71,7 @@ $ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- 1 ``` -Если часть запроса отправляется в параметре, а часть POST-ом, то между этими двумя кусками данных ставится перевод строки. +Если часть запроса отправляется в параметре, а часть POST запросом, то между этими двумя кусками данных ставится перевод строки. Пример (так работать не будет): ``` bash @@ -75,9 +81,9 @@ ECT 1 , expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception ``` -По умолчанию, данные возвращаются в формате TabSeparated (подробнее смотри раздел «Форматы»). +По умолчанию данные возвращаются в формате [TabSeparated](formats.md#tabseparated). -Можно попросить любой другой формат - с помощью секции FORMAT запроса. +Можно попросить любой другой формат с помощью секции FORMAT запроса. Кроме того, вы можете использовать параметр URL-адреса `default_format` или заголовок `X-ClickHouse-Format`, чтобы указать формат по умолчанию, отличный от `TabSeparated`. @@ -90,9 +96,10 @@ $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @ └───┘ ``` -Возможность передавать данные POST-ом нужна для INSERT-запросов. В этом случае вы можете написать начало запроса в параметре URL, а вставляемые данные передать POST-ом. Вставляемыми данными может быть, например, tab-separated дамп, полученный из MySQL. Таким образом, запрос INSERT заменяет LOAD DATA LOCAL INFILE из MySQL. +Возможность передавать данные с помощью POST нужна для запросов `INSERT`. В этом случае вы можете написать начало запроса в параметре URL, а вставляемые данные передать POST запросом. Вставляемыми данными может быть, например, tab-separated дамп, полученный из MySQL. Таким образом, запрос `INSERT` заменяет `LOAD DATA LOCAL INFILE` из MySQL. + +**Примеры** -Примеры: Создаём таблицу: ``` bash @@ -147,7 +154,7 @@ $ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- ``` -Для запросов, которые не возвращают таблицу с данными, в случае успеха, выдаётся пустое тело ответа. +Для запросов, которые не возвращают таблицу с данными, в случае успеха выдаётся пустое тело ответа. ## Сжатие {#compression} @@ -165,7 +172,7 @@ $ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- - `deflate` - `xz` -Для отправки сжатого запроса `POST`, добавьте заголовок `Content-Encoding: compression_method`. +Для отправки сжатого запроса `POST` добавьте заголовок `Content-Encoding: compression_method`. Чтобы ClickHouse сжимал ответ, разрешите сжатие настройкой [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) и добавьте заголовок `Accept-Encoding: compression_method`. Уровень сжатия данных для всех методов сжатия можно задать с помощью настройки [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level). !!! note "Примечание" @@ -281,13 +288,13 @@ X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_ro HTTP интерфейс позволяет передать внешние данные (внешние временные таблицы) для использования запроса. Подробнее смотрите раздел «Внешние данные для обработки запроса» -## Буферизация ответа {#buferizatsiia-otveta} +## Буферизация ответа {#response-buffering} Существует возможность включить буферизацию ответа на стороне сервера. Для этого предусмотрены параметры URL `buffer_size` и `wait_end_of_query`. `buffer_size` определяет количество байт результата которые будут буферизованы в памяти сервера. Если тело результата больше этого порога, то буфер будет переписан в HTTP канал, а оставшиеся данные будут отправляться в HTTP-канал напрямую. -Чтобы гарантировать буферизацию всего ответа необходимо выставить `wait_end_of_query=1`. В этом случае данные, не поместившиеся в памяти, будут буферизованы во временном файле сервера. +Чтобы гарантировать буферизацию всего ответа, необходимо выставить `wait_end_of_query=1`. В этом случае данные, не поместившиеся в памяти, будут буферизованы во временном файле сервера. Пример: @@ -295,7 +302,7 @@ HTTP интерфейс позволяет передать внешние да $ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' ``` -Буферизация позволяет избежать ситуации когда код ответа и HTTP-заголовки были отправлены клиенту, после чего возникла ошибка выполнения запроса. В такой ситуации сообщение об ошибке записывается в конце тела ответа, и на стороне клиента ошибка может быть обнаружена только на этапе парсинга. +Буферизация позволяет избежать ситуации, когда код ответа и HTTP-заголовки были отправлены клиенту, после чего возникла ошибка выполнения запроса. В такой ситуации сообщение об ошибке записывается в конце тела ответа, и на стороне клиента ошибка может быть обнаружена только на этапе парсинга. ### Запросы с параметрами {#cli-queries-with-parameters} @@ -634,4 +641,3 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' Relative Path File * Connection #0 to host localhost left intact ``` - From 5ae0d19cb4f8602bbd9a16da384dbb0c4feca0d8 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Sat, 3 Jul 2021 08:10:10 +0300 Subject: [PATCH 701/931] Update adopters.md --- docs/en/introduction/adopters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 34d3580c8ca..2c7496197bc 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -59,7 +59,7 @@ toc_title: Adopters | HUYA | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | ICA | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) | | Idealista | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| Infobaleen | Marketing | Analytics | — | — | [Official site](https://infobaleen.com) | +| Infobaleen | AI markting tool | Analytics | — | — | [Official site](https://infobaleen.com) | | Infovista | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | Instabug | APM Platform | Main product | — | — | [A quote from Co-Founder](https://altinity.com/) | From 26c4f3047bedbfd6020f406f32b4b944ec928859 Mon Sep 17 00:00:00 2001 From: lehasm Date: Sat, 3 Jul 2021 10:06:17 +0300 Subject: [PATCH 702/931] Update docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../aggregate-functions/reference/quantilebfloat16.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 1b882525c61..47038d279df 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -22,7 +22,7 @@ quantileBFloat16[(level)](expr) **Параметры** -- `level` — уровень квантиля. Необязательный. Допустимый диапазон значений от 0 до 1. Значение по умолчанию: 0.5. [Float](../../../sql-reference/data-types/float.md). +- `level` — уровень квантиля. Необязательный параметр. Допустимый диапазон значений от 0 до 1. Значение по умолчанию: 0.5. [Float](../../../sql-reference/data-types/float.md). **Возвращаемое значение** From 5268f64b144949090913b4003373e03ac8629d2e Mon Sep 17 00:00:00 2001 From: lehasm Date: Sat, 3 Jul 2021 10:06:32 +0300 Subject: [PATCH 703/931] Update docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../aggregate-functions/reference/quantilebfloat16.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 47038d279df..ba4a762dff7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -58,7 +58,7 @@ SELECT quantileBFloat16(0.75)(a), quantileBFloat16(0.75)(b) FROM example_table; ``` Обратите внимание, что все числа с плавающей точкой в примере были округлены до 1.0 при преобразовании к `bfloat16`. -**See Also** +**См. также** - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) From 02681019f86743bc8d5d063623e9877588a6f6c2 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 3 Jul 2021 07:45:37 +0000 Subject: [PATCH 704/931] Fix --- src/Interpreters/ExpressionAnalyzer.cpp | 7 ++++++- src/Interpreters/ExpressionAnalyzer.h | 2 ++ src/Interpreters/InterpreterSelectQuery.cpp | 2 +- src/Parsers/ASTSelectQuery.h | 1 + .../01925_test_group_by_const_consistency.reference | 6 +----- .../0_stateless/01925_test_group_by_const_consistency.sql | 7 +------ 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index e693d4ba988..d4d0c0d0a9b 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -231,7 +231,6 @@ void ExpressionAnalyzer::analyzeAggregation() if (has_aggregation) { - /// Find out aggregation keys. if (select_query) { @@ -252,6 +251,8 @@ void ExpressionAnalyzer::analyzeAggregation() /// Constant expressions have non-null column pointer at this stage. if (node->column && isColumnConst(*node->column)) { + select_query->group_by_with_constant_keys = true; + /// But don't remove last key column if no aggregate functions, otherwise aggregation will not work. if (!aggregate_descriptions.empty() || size > 1) { @@ -288,6 +289,10 @@ void ExpressionAnalyzer::analyzeAggregation() else aggregated_columns = temp_actions->getNamesAndTypesList(); + /// Constant expressions are already removed during first 'analyze' run. + /// So for second `analyze` information is taken from select_query. + has_const_aggregation_keys = select_query->group_by_with_constant_keys; + for (const auto & desc : aggregate_descriptions) aggregated_columns.emplace_back(desc.column_name, desc.function->getReturnType()); } diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index fe00b3e9f88..ac5d281f337 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -65,6 +65,7 @@ struct ExpressionAnalyzerData bool has_aggregation = false; NamesAndTypesList aggregation_keys; + bool has_const_aggregation_keys = false; AggregateDescriptions aggregate_descriptions; WindowDescriptions window_descriptions; @@ -309,6 +310,7 @@ public: bool hasTableJoin() const { return syntax->ast_join; } const NamesAndTypesList & aggregationKeys() const { return aggregation_keys; } + bool hasConstAggregationKeys() const { return has_const_aggregation_keys; } const AggregateDescriptions & aggregates() const { return aggregate_descriptions; } const PreparedSets & getPreparedSets() const { return prepared_sets; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 181b60b7bf3..324340e4635 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2035,7 +2035,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac settings.group_by_two_level_threshold, settings.group_by_two_level_threshold_bytes, settings.max_bytes_before_external_group_by, - settings.empty_result_for_aggregation_by_empty_set, + settings.empty_result_for_aggregation_by_empty_set || (keys.empty() && query_analyzer->hasConstAggregationKeys()), context->getTemporaryVolume(), settings.max_threads, settings.min_free_disk_space_for_temporary_data); diff --git a/src/Parsers/ASTSelectQuery.h b/src/Parsers/ASTSelectQuery.h index e9aaa4ab83b..3fc8efb5311 100644 --- a/src/Parsers/ASTSelectQuery.h +++ b/src/Parsers/ASTSelectQuery.h @@ -44,6 +44,7 @@ public: bool group_by_with_totals = false; bool group_by_with_rollup = false; bool group_by_with_cube = false; + bool group_by_with_constant_keys = false; bool limit_with_ties = false; ASTPtr & refSelect() { return getExpression(Expression::SELECT); } diff --git a/tests/queries/0_stateless/01925_test_group_by_const_consistency.reference b/tests/queries/0_stateless/01925_test_group_by_const_consistency.reference index f2342900cb9..573541ac970 100644 --- a/tests/queries/0_stateless/01925_test_group_by_const_consistency.reference +++ b/tests/queries/0_stateless/01925_test_group_by_const_consistency.reference @@ -1,5 +1 @@ -1 0 -1 0 -1 0 -2 1 0 -D 0 +0 diff --git a/tests/queries/0_stateless/01925_test_group_by_const_consistency.sql b/tests/queries/0_stateless/01925_test_group_by_const_consistency.sql index f31d22a88ac..8a5de0e7c4f 100644 --- a/tests/queries/0_stateless/01925_test_group_by_const_consistency.sql +++ b/tests/queries/0_stateless/01925_test_group_by_const_consistency.sql @@ -1,7 +1,2 @@ SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a; - -SELECT materialize(1) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; -SELECT materialize(1) as a, count() FROM numbers(10) WHERE 0 ORDER BY a; - -SELECT 2 as b, less(1, b) as a, count() FROM numbers(10) WHERE 0 GROUP BY a; -SELECT upper('d') as a, count() FROM numbers(10) WHERE 0 GROUP BY a; +SELECT count() FROM numbers(10) WHERE 0 From 80335713ccbba5daf1832ac5fc9483cb780a97cf Mon Sep 17 00:00:00 2001 From: Tiaonmmn Date: Sat, 3 Jul 2021 18:10:30 +0800 Subject: [PATCH 705/931] Update syntax.md --- docs/zh/sql-reference/syntax.md | 88 +++++++++++++++++---------------- 1 file changed, 46 insertions(+), 42 deletions(-) diff --git a/docs/zh/sql-reference/syntax.md b/docs/zh/sql-reference/syntax.md index c05c5a1a7bf..d3bdd208059 100644 --- a/docs/zh/sql-reference/syntax.md +++ b/docs/zh/sql-reference/syntax.md @@ -1,39 +1,42 @@ --- toc_priority: 31 toc_title: SQL语法 + --- # SQL语法 {#syntax} -CH有2类解析器:完整SQL解析器(递归式解析器),以及数据格式解析器(快速流式解析器) +ClickHouse有2类解析器:完整SQL解析器(递归式解析器),以及数据格式解析器(快速流式解析器) 除了 `INSERT` 查询,其它情况下仅使用完整SQL解析器。 `INSERT`查询会同时使用2种解析器: + ``` sql INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') ``` -含`INSERT INTO t VALUES` 的部分由完整SQL解析器处理,包含数据的部分 `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 交给快速流式解析器解析。通过设置参数 [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions),你也可以对数据部分开启完整SQL解析器。当 `input_format_values_interpret_expressions = 1` 时,CH优先采用快速流式解析器来解析数据。如果失败,CH再尝试用完整SQL解析器来处理,就像处理SQL [expression](#syntax-expressions) 一样。 +含`INSERT INTO t VALUES` 的部分由完整SQL解析器处理,包含数据的部分 `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 交给快速流式解析器解析。通过设置参数 [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions),你也可以对数据部分开启完整SQL解析器。当 `input_format_values_interpret_expressions = 1` 时,ClickHouse优先采用快速流式解析器来解析数据。如果失败,ClickHouse再尝试用完整SQL解析器来处理,就像处理SQL [expression](#syntax-expressions) 一样。 数据可以采用任何格式。当CH接收到请求时,服务端先在内存中计算不超过 [max_query_size](../operations/settings/settings.md#settings-max_query_size) 字节的请求数据(默认1 mb),然后剩下部分交给快速流式解析器。 -这将避免在处理大型的 `INSERT`语句时出现问题。 +当 `INSERT` 语句中使用 `Values` 格式时,看起来数据部分的解析和解析`SELECT` 中的表达式相同,但并不是这样的。 `Values` 格式有非常多的限制。 -当 `INSERT` 语句中使用 `Values` 形式时,看起来 数据部分的解析和解析`SELECT` 中的表达式相同,但并不是这样的。 `Values` 形式非常有限。 -该篇的剩余部分涵盖了完整SQL解析器。关于格式解析的更多信息,参见 [Formats](../interfaces/formats.md) 章节。 +本文的剩余部分涵盖了完整SQL解析器。关于格式解析的更多信息,参见 [Formats](../interfaces/formats.md) 章节。 -## 空字符 {#spaces} +## 空白{#spaces} -sql语句中(包含sql的起始和结束)可以有任意的空字符,这些空字符类型包括:空格字符,tab制表符,换行符,CR符,换页符等。 +sql语句的语法结构部分之间(标识符之间、部分符号之间、包括sql的起始和结束)可以有任意的空白字符,这些空字符类型包括:空格字符,tab制表符,换行符,CR符,换页符等。 ## 注释 {#comments} -CH支持SQL风格或C语言风格的注释: +ClickHouse支持SQL风格或C语言风格的注释: + - SQL风格的注释以 `--` 开始,直到行末,`--` 后紧跟的空格可以忽略 -- C语言风格的注释以 `/*` 开始,以 `*/` 结束,支持多行形式,同样可以省略 `/*` 后的空格 +- C语言风格的注释以 `/*` 开始,以 `*/` 结束,可以跨行,同样可以省略 `/*` 后的空格 ## 关键字 {#syntax-keywords} 以下场景的关键字是大小写不敏感的: + - 标准SQL。例如,`SELECT`, `select` 和 `SeLeCt` 都是允许的 - 在某些流行的RDBMS中被实现的关键字,例如,`DateTime` 和 `datetime`是一样的 @@ -41,38 +44,36 @@ CH支持SQL风格或C语言风格的注释: 你可以在系统表 [system.data_type_families](../operations/system-tables/data_type_families.md#system_tables-data_type_families) 中检查某个数据类型的名称是否是大小写敏感型。 和标准SQL相反,所有其它的关键字都是 **大小写敏感的**,包括函数名称。 -In contrast to standard SQL, all other keywords (including functions names) are **case-sensitive**. -关键字不是保留的;它们仅在相应的上下文中才会被处理。如果你使用和关键字同名的 [变量名](#syntax-identifiers) ,需要使用双引号或转移符将它们包含起来。例如:如果表 `table_name` 包含列 `"FROM"`,那么 `SELECT "FROM" FROM table_name` 是合法的 +关键字不是保留的;它们仅在相应的上下文中才会被认为是关键字。如果你使用和关键字同名的 [标识符](#syntax-identifiers) ,需要使用双引号或反引号将它们包含起来。例如:如果表 `table_name` 包含列 `"FROM"`,那么 `SELECT "FROM" FROM table_name` 是合法的 -## 变量名 {#syntax-identifiers} +## 标识符 {#syntax-identifiers} -变量包括: -Identifiers are: +标识符包括: -- 集群,数据库,表,分区,列名称 +- 集群、数据库、表、分区、列的名称 - 函数 - 数据类型 -- 表达式别名 +- [表达式别名](https://clickhouse.tech/docs/zh/sql-reference/syntax/#syntax-expression_aliases) -变量名可以使用反引号包含起来 +变量名可以被括起或不括起,后者是推荐做法。 -没有使用反引号包含的变量名,必须匹配正则表达式 `^[a-zA-Z_][0-9a-zA-Z_]*$`,并且不能和 [关键字]相同 +没有括起的变量名,必须匹配正则表达式 `^[a-zA-Z_][0-9a-zA-Z_]*$`,并且不能和 [关键字](#syntax-keywords)相同,合法的标识符名称:`x`,`_1`,`X_y__Z123_`等。 -如果想使用和关键字同名的变量名称,或者在变量名称中包含其它符号,你需要通过双引号或转义符号,例如: `"id"`, `` `id` `` +如果想使用和关键字同名的变量名称,或者在变量名称中包含其它符号,你需要通过双引号或反引号,例如: `"id"`, `` `id` `` ## 字符 {#literals} -CH包含数字,字母,括号,NULL值等字符 +字符包含数字,字母,括号,NULL值等字符。 ### 数字 {#numeric} 数字类型字符会被做如下解析: -- 首先,当做64位的有符号整数,使用该函数 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) + +- 首先,当做64位的有符号整数,使用函数 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) - 如果失败,解析成64位无符号整数,同样使用函数 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) - 如果还失败了,试图解析成浮点型数值,使用函数 [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) -Numeric literal tries to be parsed: - 最后,以上情形都不符合时,返回异常 @@ -82,13 +83,14 @@ Numeric literal tries to be parsed: 例如: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. -### 字母 {#syntax-string-literal} -CH只支持用单引号包含的字母。特殊字符可通过反斜杠进行转义。下列转义字符都有相应的实际值: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`。其它情况下,以 `\c`形式出现的转义字符,当`c`表示任意字符时,转义字符会转换成`c`。这意味着你可以使用 `\'`和`\\`。该值将拥有[String](../sql-reference/data-types/string.md)类型。 +### 字符串 {#syntax-string-literal} + +ClickHouse只支持用单引号包含的字符串。特殊字符可通过反斜杠进行转义。下列转义字符都有相应的实际值: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`。其它情况下,以 `\c`形式出现的转义字符,当`c`表示任意字符时,转义字符会转换成`c`。这意味着你可以使用 `\'`和`\\`。该值将拥有[String](../sql-reference/data-types/string.md)类型。 在字符串中,你至少需要对 `'` 和 `\` 进行转义。单引号可以使用单引号转义,例如 `'It\'s'` 和 `'It''s'` 是相同的。 -### 括号 {#compound} +### 复合字符串 {#compound} 数组都是使用方括号进行构造 `[1, 2, 3]`,元组则使用圆括号 `(1, 'Hello, world!', 2)` 从技术上来讲,这些都不是字符串,而是包含创建数组和元组运算符的表达式。 @@ -97,17 +99,18 @@ CH只支持用单引号包含的字母。特殊字符可通过反斜杠进行转 ### NULL值 {#null-literal} -代表不存在的值 +代表不存在的值。 -为了能在表字段中存储NULL值,该字段必须声明为 [空值](../sql-reference/data-types/nullable.md) 类型 +为了能在表字段中存储NULL值,该字段必须声明为 [空值](../sql-reference/data-types/nullable.md) 类型。 根据数据的格式(输入或输出),NULL值有不同的表现形式。更多信息参见文档 [数据格式](../interfaces/formats.md#formats) -在处理 `NULL`时存在很多细微差别。例如,比较运算的至少一个参数为 `NULL` ,该结果也是 `NULL` 。与之类似的还有乘法运算, 加法运算,以及其它运算。更多信息,请参阅每种运算的文档部分。 +在处理 `NULL`时存在很多细微差别。例如,比较运算的至少一个参数为 `NULL` ,则该结果也是 `NULL` 。与之类似的还有乘法运算, 加法运算,以及其它运算。更多信息,请参阅每种运算的文档部分。 -在语句中,可以通过 [是否为NULL](operators/index.md#operator-is-null) 以及 [是否不为NULL](operators/index.md) 运算符,以及 `isNull` 、 `isNotNull` 函数来检查 `NULL` 值 +在语句中,可以通过 [IS NULL](operators/index.md#operator-is-null) 以及 [IS NOT NULL](operators/index.md) 运算符,以及 `isNull` 、 `isNotNull` 函数来检查 `NULL` 值 ## 函数 {#functions} -函数调用的写法,类似于变量并带有被圆括号包含的参数列表(可能为空)。与标准SQL不同,圆括号是必须的,不管参数列表是否为空。例如: `now()`。 + +函数调用的写法,类似于一个标识符后接被圆括号包含的参数列表(可能为空)。与标准SQL不同,圆括号是必须的,不管参数列表是否为空。例如: `now()`。 函数分为常规函数和聚合函数(参见“Aggregate functions”一章)。有些聚合函数包含2个参数列表,第一个参数列表中的参数被称为“parameters”。不包含“parameters”的聚合函数语法和常规函数是一样的。 @@ -116,12 +119,12 @@ CH只支持用单引号包含的字母。特殊字符可通过反斜杠进行转 在查询解析阶段,运算符会被转换成对应的函数,使用时请注意它们的优先级。例如: 表达式 `1 + 2 * 3 + 4` 会被解析成 `plus(plus(1, multiply(2, 3)), 4)`. - + ## 数据类型及数据库/表引擎 {#data_types-and-database-table-engines} `CREATE` 语句中的数据类型和表引擎写法与变量或函数类似。 -换句话说,它们可以用括号包含参数列表。更多信息,参见“数据类型,” “数据表引擎” 和 “CREATE语句”等章节 +换句话说,它们可以包含或不包含用括号包含的参数列表。更多信息,参见“数据类型,” “数据表引擎” 和 “CREATE语句”等章节 ## 表达式别名 {#syntax-expression_aliases} @@ -131,29 +134,30 @@ CH只支持用单引号包含的字母。特殊字符可通过反斜杠进行转 expr AS alias ``` -- `AS` — 用于定义别名的关键字。可以对表或select语句中的列定义别名(`AS` 可以省略) - 例如, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. +- `AS` — 用于定义别名的关键字。可以对表或select语句中的列定义别名(`AS` 可以省略) + 例如, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - 在 [CAST函数](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) 中,`AS`有其它含义。请参见该函数的说明部分。 + 在 [CAST函数](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) 中,`AS`有其它含义。请参见该函数的说明部分。 -- `expr` — 任意CH支持的表达式. +- `expr` — 任意CH支持的表达式. - 例如, `SELECT column_name * 2 AS double FROM some_table`. + 例如, `SELECT column_name * 2 AS double FROM some_table`. -- `alias` — `expr` 的名称。别名必须符合 [变量名]](#syntax-identifiers) 语法. +- `alias` — `expr` 的名称。别名必须符合 [标识符](#syntax-identifiers) 语法. - 例如, `SELECT "table t".column_name FROM table_name AS "table t"`. + 例如, `SELECT "table t".column_name FROM table_name AS "table t"`. ### 用法注意 {#notes-on-usage} 别名在当前查询或子查询中是全局可见的,你可以在查询语句的任何位置对表达式定义别名 -别名在当前查询的子查询及不同子查询中是不可见的。例如,执行如下查询SQL: `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ,CH会提示异常 `Unknown identifier: num`. +别名在当前查询的子查询及不同子查询中是不可见的。例如,执行如下查询SQL: `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ,ClickHouse会提示异常 `Unknown identifier: num`. 如果给select子查询语句的结果列定义其别名,那么在外层可以使用该别名。例如, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. 注意列的别名和表的别名相同时的情形,考虑如下示例: + ``` sql CREATE TABLE t ( @@ -175,7 +179,7 @@ Received exception from server (version 18.14.17): Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. ``` -在这个示例中,先声明了表 `t` 以及列 `b`。然后,在查询数据时,又定义了别名 `sum(b) AS b`。由于别名是全局的,CH使用表达式 `sum(b)` 来替换表达式 `argMax(a, b)` 中的变量 `b`。这种替换导致出现异常。 +在这个示例中,先声明了表 `t` 以及列 `b`。然后,在查询数据时,又定义了别名 `sum(b) AS b`。由于别名是全局的,ClickHouse使用表达式 `sum(b)` 来替换表达式 `argMax(a, b)` 中的变量 `b`。这种替换导致出现异常。 ## 星号 {#asterisk} @@ -184,7 +188,7 @@ select查询中,星号可以代替表达式使用。详情请参见“select ## 表达式 {#syntax-expressions} -表达式是函数、标识符、字符、运算符的应用程序、括号中的表达式、子查询或星号。它也可以包含别名。 +表达式是函数、标识符、字符、使用运算符的语句、括号中的表达式、子查询或星号。它也可以包含别名。 表达式列表是用逗号分隔的一个或多个表达式。 反过来,函数和运算符可以将表达式作为参数。 From ae1bb3f18b0851247cdf4e1b75c742d813c96241 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 3 Jul 2021 13:16:55 +0300 Subject: [PATCH 706/931] Fix clang-tidy --- src/Interpreters/MarkTableIdentifiersVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/MarkTableIdentifiersVisitor.cpp b/src/Interpreters/MarkTableIdentifiersVisitor.cpp index 1f418e759e7..3010dbf57af 100644 --- a/src/Interpreters/MarkTableIdentifiersVisitor.cpp +++ b/src/Interpreters/MarkTableIdentifiersVisitor.cpp @@ -18,7 +18,7 @@ namespace if (!func.arguments || (func.arguments->children.size() <= argument_pos)) return; auto arg = func.arguments->children[argument_pos]; - auto identifier = arg->as(); + auto * identifier = arg->as(); if (!identifier) return; if (aliases.contains(identifier->name())) From a9a91a4f15065585faf156cc8a917586ffafd4a5 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 3 Jul 2021 11:28:24 +0000 Subject: [PATCH 707/931] Setting move to prewhere if final --- src/Core/Settings.h | 1 + src/Interpreters/InterpreterSelectQuery.cpp | 11 +++++- src/Interpreters/InterpreterSelectQuery.h | 1 + ...der_key_to_prewhere_select_final.reference | 38 +++++++++++++++++++ ...ove_order_key_to_prewhere_select_final.sql | 21 ++++++++++ 5 files changed, 70 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0197bfac7e4..f9765f0278f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -93,6 +93,7 @@ class IColumn; M(Bool, distributed_directory_monitor_split_batch_on_failure, false, "Should StorageDistributed DirectoryMonitors try to split batch into smaller in case of failures.", 0) \ \ M(Bool, optimize_move_to_prewhere, true, "Allows disabling WHERE to PREWHERE optimization in SELECT queries from MergeTree.", 0) \ + M(Bool, optimize_move_to_prewhere_if_final, false, "If query has `final`, optimization `move_to_prewhere` is enabled only if `optimize_move_to_prewhere` and `optimize_move_to_prewhere_if_final` are enabled", 0) \ \ M(UInt64, replication_alter_partitions_sync, 1, "Wait for actions to manipulate the partitions. 0 - do not wait, 1 - wait for execution only of itself, 2 - wait for everyone.", 0) \ M(UInt64, replication_alter_columns_timeout, 60, "Wait for actions to change the table structure within the specified number of seconds. 0 - wait unlimited time.", 0) \ diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 181b60b7bf3..612fcac2dd0 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -506,7 +506,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( result_header = getSampleBlockImpl(); }; - analyze(settings.optimize_move_to_prewhere); + analyze(settings.optimize_move_to_prewhere && moveToPrewhereIfFinal()); bool need_analyze_again = false; if (analysis_result.prewhere_constant_filter_description.always_false || analysis_result.prewhere_constant_filter_description.always_true) @@ -1532,6 +1532,13 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan( } } +bool InterpreterSelectQuery::moveToPrewhereIfFinal() +{ + const Settings & settings = context->getSettingsRef(); + const ASTSelectQuery & query = getSelectQuery(); + return !query.final() || settings.optimize_move_to_prewhere_if_final; +} + void InterpreterSelectQuery::addPrewhereAliasActions() { const Settings & settings = context->getSettingsRef(); @@ -1541,7 +1548,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (!expressions.prewhere_info) { const bool does_storage_support_prewhere = !input && !input_pipe && storage && storage->supportsPrewhere(); - if (does_storage_support_prewhere && settings.optimize_move_to_prewhere) + if (does_storage_support_prewhere && settings.optimize_move_to_prewhere && moveToPrewhereIfFinal()) { /// Execute row level filter in prewhere as a part of "move to prewhere" optimization. expressions.prewhere_info = std::make_shared( diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index c155ce0bc13..ae0bd5d5681 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -118,6 +118,7 @@ private: ASTSelectQuery & getSelectQuery() { return query_ptr->as(); } void addPrewhereAliasActions(); + bool moveToPrewhereIfFinal(); Block getSampleBlockImpl(); diff --git a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference index 95479cf37ba..71d10397326 100644 --- a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference +++ b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.reference @@ -1,9 +1,12 @@ +optimize_move_to_prewhere_if_final = 1 + SELECT x, y, z FROM prewhere_move_select_final PREWHERE y > 100 + SELECT x, y, @@ -11,6 +14,7 @@ SELECT FROM prewhere_move_select_final FINAL PREWHERE y > 100 + SELECT x, y, @@ -18,6 +22,7 @@ SELECT FROM prewhere_move_select_final FINAL WHERE z > 400 + SELECT x, y, @@ -26,3 +31,36 @@ FROM prewhere_move_select_final FINAL PREWHERE y > 100 WHERE (y > 100) AND (z > 400) + +optimize_move_to_prewhere_if_final = 0 + +SELECT + x, + y, + z +FROM prewhere_move_select_final +PREWHERE y > 100 + +SELECT + x, + y, + z +FROM prewhere_move_select_final +FINAL +WHERE y > 100 + +SELECT + x, + y, + z +FROM prewhere_move_select_final +FINAL +WHERE z > 400 + +SELECT + x, + y, + z +FROM prewhere_move_select_final +FINAL +WHERE (y > 100) AND (z > 400) diff --git a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql index a3a882c461a..ecc11c625e3 100644 --- a/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql +++ b/tests/queries/0_stateless/01737_move_order_key_to_prewhere_select_final.sql @@ -1,15 +1,36 @@ DROP TABLE IF EXISTS prewhere_move_select_final; + CREATE TABLE prewhere_move_select_final (x Int, y Int, z Int) ENGINE = ReplacingMergeTree() ORDER BY (x, y); INSERT INTO prewhere_move_select_final SELECT number, number * 2, number * 3 FROM numbers(1000); +select 'optimize_move_to_prewhere_if_final = 1'; +SET optimize_move_to_prewhere_if_final = 1; + -- order key can be pushed down with final +select ''; EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE y > 100; +select ''; EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100; -- can not be pushed down +select ''; EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400; -- only y can be pushed down +select ''; +EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400; + +select ''; +select 'optimize_move_to_prewhere_if_final = 0'; +SET optimize_move_to_prewhere_if_final = 0; + +select ''; +EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final WHERE y > 100; +select ''; +EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100; +select ''; +EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400; +select ''; EXPLAIN SYNTAX SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400; DROP TABLE prewhere_move_select_final; From a1706f20ff12c44fa8ad6e529a044f31febc50af Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 3 Jul 2021 15:05:08 +0300 Subject: [PATCH 708/931] Update syntax.md --- docs/zh/sql-reference/syntax.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/sql-reference/syntax.md b/docs/zh/sql-reference/syntax.md index d3bdd208059..644dc646726 100644 --- a/docs/zh/sql-reference/syntax.md +++ b/docs/zh/sql-reference/syntax.md @@ -137,7 +137,7 @@ expr AS alias - `AS` — 用于定义别名的关键字。可以对表或select语句中的列定义别名(`AS` 可以省略) 例如, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - 在 [CAST函数](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) 中,`AS`有其它含义。请参见该函数的说明部分。 + 在 [CAST函数](../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) 中,`AS`有其它含义。请参见该函数的说明部分。 - `expr` — 任意CH支持的表达式. From 317ade9b4cff79c3c7de2f01c56e982f3a24791c Mon Sep 17 00:00:00 2001 From: vdimir Date: Sat, 3 Jul 2021 16:24:41 +0300 Subject: [PATCH 709/931] Build subquery in replaceJoinedTable without parsing --- src/Interpreters/JoinedTables.cpp | 45 ++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 421fe7fcddd..7647b60458a 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -5,6 +5,8 @@ #include #include #include + +#include #include #include #include @@ -12,8 +14,7 @@ #include #include #include -#include -#include + #include #include #include @@ -33,6 +34,23 @@ namespace ErrorCodes namespace { +template +std::shared_ptr addASTChildrenTo(IAST & node, ASTPtr & children, Args && ... args) +{ + auto new_children = std::make_shared(std::forward(args)...); + children = new_children; + node.children.push_back(children); + return new_children; +} + +template +std::shared_ptr addASTChildren(IAST & node) +{ + auto children = std::make_shared(); + node.children.push_back(children); + return children; +} + void replaceJoinedTable(const ASTSelectQuery & select_query) { const ASTTablesInSelectQueryElement * join = select_query.join(); @@ -48,15 +66,30 @@ void replaceJoinedTable(const ASTSelectQuery & select_query) if (table_expr.database_and_table_name) { const auto & table_id = table_expr.database_and_table_name->as(); - String expr = "(SELECT * FROM " + backQuote(table_id.name()) + ") AS " + backQuote(table_id.shortName()); - + String table_name = table_id.name(); + String table_short_name = table_id.shortName(); // FIXME: since the expression "a as b" exposes both "a" and "b" names, which is not equivalent to "(select * from a) as b", // we can't replace aliased tables. // FIXME: long table names include database name, which we can't save within alias. if (table_id.alias.empty() && table_id.isShort()) { - ParserTableExpression parser; - table_expr = parseQuery(parser, expr, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)->as(); + /// Build query of form '(SELECT * FROM table_name) AS table_short_name' + table_expr = ASTTableExpression(); + + auto subquery = addASTChildrenTo(table_expr, table_expr.subquery); + subquery->setAlias(table_short_name); + + auto sub_select_with_union = addASTChildren(*subquery); + auto list_of_selects = addASTChildrenTo(*sub_select_with_union, sub_select_with_union->list_of_selects); + + auto new_select = addASTChildren(*list_of_selects); + new_select->setExpression(ASTSelectQuery::Expression::SELECT, std::make_shared()); + addASTChildren(*new_select->select()); + new_select->setExpression(ASTSelectQuery::Expression::TABLES, std::make_shared()); + + auto tables_elem = addASTChildren(*new_select->tables()); + auto sub_table_expr = addASTChildrenTo(*tables_elem, tables_elem->table_expression); + addASTChildrenTo(*sub_table_expr, sub_table_expr->database_and_table_name, table_name); } } } From 55220d49f9df9f0186b95d9023a202cab6836978 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 3 Jul 2021 16:29:32 +0300 Subject: [PATCH 710/931] Fixed code review issues --- base/common/FunctorToStaticMethodAdaptor.h | 16 ++--- src/AggregateFunctions/IAggregateFunction.cpp | 40 +++++++++++ src/AggregateFunctions/IAggregateFunction.h | 66 ++++--------------- src/Interpreters/Aggregator.cpp | 19 +++--- src/Interpreters/JIT/compileFunction.h | 3 + 5 files changed, 73 insertions(+), 71 deletions(-) diff --git a/base/common/FunctorToStaticMethodAdaptor.h b/base/common/FunctorToStaticMethodAdaptor.h index 273c436e9d7..9f55b52a79a 100644 --- a/base/common/FunctorToStaticMethodAdaptor.h +++ b/base/common/FunctorToStaticMethodAdaptor.h @@ -12,15 +12,15 @@ template class FunctorToStaticMethodAdaptor { public: - static R call(C * ptr, Args... arguments) + static R call(C * ptr, Args &&... arguments) { - return std::invoke(&C::operator(), ptr, arguments...); + return std::invoke(&C::operator(), ptr, std::forward(arguments)...); } - static R unsafeCall(char * ptr, Args... arguments) + static R unsafeCall(char * ptr, Args &&... arguments) { C * ptr_typed = reinterpret_cast(ptr); - return std::invoke(&C::operator(), ptr_typed, arguments...); + return std::invoke(&C::operator(), ptr_typed, std::forward(arguments)...); } }; @@ -28,14 +28,14 @@ template class FunctorToStaticMethodAdaptor { public: - static R call(C * ptr, Args... arguments) + static R call(C * ptr, Args &&... arguments) { - return std::invoke(&C::operator(), ptr, arguments...); + return std::invoke(&C::operator(), ptr, std::forward(arguments)...); } - static R unsafeCall(char * ptr, Args... arguments) + static R unsafeCall(char * ptr, Args &&... arguments) { C * ptr_typed = static_cast(ptr); - return std::invoke(&C::operator(), ptr_typed, arguments...); + return std::invoke(&C::operator(), ptr_typed, std::forward(arguments)...); } }; diff --git a/src/AggregateFunctions/IAggregateFunction.cpp b/src/AggregateFunctions/IAggregateFunction.cpp index 49e68449e18..55998d963bf 100644 --- a/src/AggregateFunctions/IAggregateFunction.cpp +++ b/src/AggregateFunctions/IAggregateFunction.cpp @@ -10,4 +10,44 @@ DataTypePtr IAggregateFunction::getStateType() const return std::make_shared(shared_from_this(), argument_types, parameters); } +String IAggregateFunction::getDescription() const +{ + String description; + + description += getName(); + + description += '('; + + for (const auto & parameter : parameters) + { + description += parameter.dump(); + description += ", "; + } + + if (!parameters.empty()) + { + description.pop_back(); + description.pop_back(); + } + + description += ')'; + + description += '('; + + for (const auto & argument_type : argument_types) + { + description += argument_type->getName(); + description += ", "; + } + + if (!argument_types.empty()) + { + description.pop_back(); + description.pop_back(); + } + + description += ')'; + + return description; +} } diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 74cd0890903..7acfa82a139 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -218,9 +218,10 @@ public: const IColumn ** columns, Arena * arena) const = 0; - /** Insert result of aggregate function into places with batch size. - * Also all places must be destroyed if there was exception during insert. - * If destroy_place is true. Then client must destroy aggregate places if insert throws exception. + /** Insert result of aggregate function into result column with batch size. + * If destroy_place_after_insert is true. Then implementation of this method + * must destroy aggregate place if insert state into result column was successful. + * All places that were not inserted must be destroyed if there was exception during insert into result column. */ virtual void insertResultIntoBatch( size_t batch_size, @@ -228,7 +229,7 @@ public: size_t place_offset, IColumn & to, Arena * arena, - bool destroy_place) const = 0; + bool destroy_place_after_insert) const = 0; /** Destroy batch of aggregate places. */ @@ -270,46 +271,8 @@ public: // of true window functions, so this hack-ish interface suffices. virtual bool isOnlyWindowFunction() const { return false; } - /// Description of AggregateFunction in form of name(argument_types)(parameters). - virtual String getDescription() const - { - String description; - - description += getName(); - description += '('; - - for (const auto & argument_type : argument_types) - { - description += argument_type->getName(); - description += ", "; - } - - if (!argument_types.empty()) - { - description.pop_back(); - description.pop_back(); - } - - description += ')'; - - description += '('; - - for (const auto & parameter : parameters) - { - description += parameter.dump(); - description += ", "; - } - - if (!parameters.empty()) - { - description.pop_back(); - description.pop_back(); - } - - description += ')'; - - return description; - } + /// Description of AggregateFunction in form of name(parameters)(argument_types). + String getDescription() const; #if USE_EMBEDDED_COMPILER @@ -319,25 +282,25 @@ public: /// compileCreate should generate code for initialization of aggregate function state in aggregate_data_ptr virtual void compileCreate(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const { - throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName()); } /// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector & /*arguments_values*/) const { - throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName()); } /// compileMerge should generate code for merging aggregate function states stored in aggregate_data_dst_ptr and aggregate_data_src_ptr virtual void compileMerge(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_dst_ptr*/, llvm::Value * /*aggregate_data_src_ptr*/) const { - throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName()); } /// compileGetResult should generate code for getting result value from aggregate function state stored in aggregate_data_ptr virtual llvm::Value * compileGetResult(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/) const { - throw Exception(getName() + " is not JIT-compilable", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName()); } #endif @@ -517,7 +480,7 @@ public: } } - void insertResultIntoBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, bool destroy_place) const override + void insertResultIntoBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, IColumn & to, Arena * arena, bool destroy_place_after_insert) const override { size_t batch_index = 0; @@ -527,15 +490,14 @@ public: { static_cast(this)->insertResultInto(places[batch_index] + place_offset, to, arena); - if (destroy_place) + if (destroy_place_after_insert) static_cast(this)->destroy(places[batch_index] + place_offset); } } catch (...) { for (size_t destroy_index = batch_index; destroy_index < batch_size; ++destroy_index) - if (destroy_place) - static_cast(this)->destroy(places[destroy_index] + place_offset); + static_cast(this)->destroy(places[destroy_index] + place_offset); throw; } diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 800145cf330..715d44eecf0 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -341,7 +341,6 @@ void Aggregator::compileAggregateFunctions() is_aggregate_function_compiled[i] = function->isCompilable(); } - /// TODO: Probably better to compile more than 2 functions if (functions_to_compile.empty()) return; @@ -636,7 +635,7 @@ void NO_INLINE Aggregator::executeImplBatch( return; } - /// Optimization for special case when aggregating by 8bit key.` + /// Optimization for special case when aggregating by 8bit key. if constexpr (!no_more_keys && std::is_same_v) { /// We use another method if there are aggregate functions with -Array combinator. @@ -702,6 +701,8 @@ void NO_INLINE Aggregator::executeImplBatch( } #if defined(MEMORY_SANITIZER) + + /// We compile only functions that do not allocate some data in Arena. Only store necessary state in AggregateData place. for (size_t aggregate_function_index = 0; aggregate_function_index < aggregate_functions.size(); ++aggregate_function_index) { if (!is_aggregate_function_compiled[aggregate_function_index]) @@ -1384,17 +1385,18 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index]; size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoAndDestroyBatch + /** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoBatch * throws exception, it also must destroy all necessary states. * Then code need to continue to destroy other aggregate function states with next function index. */ size_t destroy_index = aggregate_functions_destroy_index; ++aggregate_functions_destroy_index; + /// For State AggregateFunction ownership of aggregate place is passed to result column after insert bool is_state = aggregate_functions[destroy_index]->isState(); - bool destroy_place = !is_state; + bool destroy_place_after_insert = !is_state; - aggregate_functions[destroy_index]->insertResultIntoBatch(places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place); + aggregate_functions[destroy_index]->insertResultIntoBatch(places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert); } } catch (...) @@ -1414,10 +1416,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal( } size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index]; - - bool is_state = aggregate_functions[aggregate_functions_destroy_index]->isState(); - if (!is_state) - aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); + aggregate_functions[aggregate_functions_destroy_index]->destroyBatch(places.size(), places.data(), offset); } if (exception) @@ -2015,7 +2014,6 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( } else if (res->without_key) { - /// TODO: Use compile function mergeDataNoMoreKeysImpl( getDataVariant(*res).data, res->without_key, @@ -2024,7 +2022,6 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( } else { - /// TODO: Use compile function mergeDataOnlyExistingKeysImpl( getDataVariant(*res).data, getDataVariant(current).data, diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index 92c8cd93b35..5355227defe 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -64,7 +64,10 @@ struct CompiledAggregateFunctions JITMergeAggregateStatesFunction merge_aggregate_states_function; JITInsertAggregateStatesIntoColumnsFunction insert_aggregates_into_columns_function; + /// Count of functions that were compiled size_t functions_count; + + /// Compiled module. It is client responsibility to destroy it after functions are no longer required. CHJIT::CompiledModule compiled_module; }; From a0623ddb08e0ec9aea0509563d899acc6f531192 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 3 Jul 2021 13:35:11 +0000 Subject: [PATCH 711/931] Correct test --- .../test.py | 58 ++++++++++--------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index d13a7501b35..2d8689f31e8 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -72,8 +72,11 @@ def create_materialized_db(ip, port, instance.query(create_query) assert materialized_database in instance.query('SHOW DATABASES') -def drop_materialized_db(materialized_database='test_database'): - instance.query('DROP DATABASE {}'.format(materialized_database)) +def drop_materialized_db(materialized_database='test_database', sync=False): + if sync: + instance.query('DROP DATABASE IF EXISTS {} SYNC'.format(materialized_database)) + else: + instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) assert materialized_database not in instance.query('SHOW DATABASES') def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): @@ -148,7 +151,7 @@ def started_cluster(): @pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db(sync=True) conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -171,14 +174,12 @@ def test_load_and_sync_all_database_tables(started_cluster): result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') assert(int(result) == NUM_TABLES) - - instance.query("DROP DATABASE test_database") - assert 'test_database' not in instance.query('SHOW DATABASES') + drop_materialized_db() @pytest.mark.timeout(120) def test_replicating_dml(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -217,13 +218,12 @@ def test_replicating_dml(started_cluster): for i in range(NUM_TABLES): cursor.execute('drop table postgresql_replica_{};'.format(i)) - instance.query("DROP DATABASE test_database") - assert 'test_database' not in instance.query('SHOW DATABASES') + drop_materialized_db() @pytest.mark.timeout(120) def test_different_data_types(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -303,13 +303,13 @@ def test_different_data_types(started_cluster): check_tables_are_synchronized('test_array_data_type'); result = instance.query('SELECT * FROM test_database.test_array_data_type ORDER BY key;') - instance.query("DROP DATABASE test_database") assert(result == expected) + drop_materialized_db() @pytest.mark.timeout(120) def test_load_and_sync_subset_of_database_tables(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db(sync=True) conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -355,14 +355,12 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): if i < int(NUM_TABLES/2): check_tables_are_synchronized(table_name); cursor.execute('drop table {};'.format(table_name)) - - instance.query("DROP DATABASE test_database") - assert 'test_database' not in instance.query('SHOW DATABASES') + drop_materialized_db() @pytest.mark.timeout(120) def test_changing_replica_identity_value(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -377,11 +375,12 @@ def test_changing_replica_identity_value(started_cluster): check_tables_are_synchronized('postgresql_replica'); cursor.execute("UPDATE postgresql_replica SET key=key-25 WHERE key<100 ") check_tables_are_synchronized('postgresql_replica'); + drop_materialized_db() @pytest.mark.timeout(320) def test_clickhouse_restart(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -405,11 +404,12 @@ def test_clickhouse_restart(started_cluster): for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); + drop_materialized_db() @pytest.mark.timeout(120) def test_replica_identity_index(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -433,11 +433,12 @@ def test_replica_identity_index(started_cluster): cursor.execute('DELETE FROM postgresql_replica WHERE key2<75;') check_tables_are_synchronized('postgresql_replica', order_by='key1'); + drop_materialized_db() @pytest.mark.timeout(320) def test_table_schema_changes(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -493,7 +494,7 @@ def test_table_schema_changes(started_cluster): @pytest.mark.timeout(120) def test_many_concurrent_queries(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -573,7 +574,7 @@ def test_many_concurrent_queries(started_cluster): @pytest.mark.timeout(120) def test_single_transaction(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=False) @@ -601,6 +602,7 @@ def test_single_transaction(started_cluster): def test_virtual_columns(started_cluster): + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -634,8 +636,8 @@ def test_virtual_columns(started_cluster): def test_multiple_databases(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database_1") - instance.query("DROP DATABASE IF EXISTS test_database_2") + drop_materialized_db('test_database_1') + drop_materialized_db('test_database_2') NUM_TABLES = 5 conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -692,7 +694,7 @@ def test_multiple_databases(started_cluster): @pytest.mark.timeout(320) def test_concurrent_transactions(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -737,7 +739,7 @@ def test_concurrent_transactions(started_cluster): @pytest.mark.timeout(320) def test_abrupt_connection_loss_while_heavy_replication(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -798,7 +800,7 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster): def test_drop_database_while_replication_startup_not_finished(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -817,7 +819,7 @@ def test_drop_database_while_replication_startup_not_finished(started_cluster): def test_restart_server_while_replication_startup_not_finished(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -839,7 +841,7 @@ def test_restart_server_while_replication_startup_not_finished(started_cluster): @pytest.mark.timeout(320) def test_abrupt_server_restart_while_heavy_replication(started_cluster): - instance.query("DROP DATABASE IF EXISTS test_database") + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) From 313117c7d8505b44b2e60f606ee7881667da1657 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Jul 2021 18:28:42 +0300 Subject: [PATCH 712/931] Skip ANTLR test --- tests/queries/skip_list.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index e0a96ef8ded..8fb95741bab 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -523,7 +523,8 @@ "01925_merge_prewhere_table", "01932_null_valid_identifier", "01934_constexpr_aggregate_function_parameters", - "01932_alter_index_with_order" + "01932_alter_index_with_order", + "01936_quantiles_cannot_return_null" ], "parallel": [ From 127455f15fe8ce7463f025619c6d5905c0d3d822 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Jul 2021 18:48:03 +0300 Subject: [PATCH 713/931] Remove experimental ANTLR parser --- .gitmodules | 3 - contrib/CMakeLists.txt | 1 - contrib/antlr4-runtime | 1 - contrib/antlr4-runtime-cmake/CMakeLists.txt | 156 - docker/test/fasttest/run.sh | 1 - src/CMakeLists.txt | 5 +- src/Core/Settings.h | 1 - src/Interpreters/executeQuery.cpp | 22 +- src/Parsers/New/AST/AlterTableQuery.cpp | 814 - src/Parsers/New/AST/AlterTableQuery.h | 191 - src/Parsers/New/AST/AttachQuery.cpp | 57 - src/Parsers/New/AST/AttachQuery.h | 32 - src/Parsers/New/AST/CheckQuery.cpp | 44 - src/Parsers/New/AST/CheckQuery.h | 24 - src/Parsers/New/AST/ColumnExpr.cpp | 588 - src/Parsers/New/AST/ColumnExpr.h | 82 - src/Parsers/New/AST/ColumnTypeExpr.cpp | 166 - src/Parsers/New/AST/ColumnTypeExpr.h | 62 - src/Parsers/New/AST/CreateDatabaseQuery.cpp | 51 - src/Parsers/New/AST/CreateDatabaseQuery.h | 26 - src/Parsers/New/AST/CreateDictionaryQuery.cpp | 361 - src/Parsers/New/AST/CreateDictionaryQuery.h | 183 - src/Parsers/New/AST/CreateLiveViewQuery.cpp | 86 - src/Parsers/New/AST/CreateLiveViewQuery.h | 39 - .../New/AST/CreateMaterializedViewQuery.cpp | 99 - .../New/AST/CreateMaterializedViewQuery.h | 40 - src/Parsers/New/AST/CreateTableQuery.cpp | 224 - src/Parsers/New/AST/CreateTableQuery.h | 76 - src/Parsers/New/AST/CreateViewQuery.cpp | 62 - src/Parsers/New/AST/CreateViewQuery.h | 34 - src/Parsers/New/AST/DDLQuery.cpp | 6 - src/Parsers/New/AST/DDLQuery.h | 29 - src/Parsers/New/AST/DescribeQuery.cpp | 36 - src/Parsers/New/AST/DescribeQuery.h | 27 - src/Parsers/New/AST/DropQuery.cpp | 126 - src/Parsers/New/AST/DropQuery.h | 46 - src/Parsers/New/AST/EngineExpr.cpp | 199 - src/Parsers/New/AST/EngineExpr.h | 85 - src/Parsers/New/AST/ExistsQuery.cpp | 87 - src/Parsers/New/AST/ExistsQuery.h | 37 - src/Parsers/New/AST/ExplainQuery.cpp | 62 - src/Parsers/New/AST/ExplainQuery.h | 34 - src/Parsers/New/AST/INode.h | 103 - src/Parsers/New/AST/Identifier.cpp | 174 - src/Parsers/New/AST/Identifier.h | 66 - src/Parsers/New/AST/InsertQuery.cpp | 125 - src/Parsers/New/AST/InsertQuery.h | 73 - src/Parsers/New/AST/JoinExpr.cpp | 326 - src/Parsers/New/AST/JoinExpr.h | 103 - src/Parsers/New/AST/KillQuery.cpp | 56 - src/Parsers/New/AST/KillQuery.h | 33 - src/Parsers/New/AST/LimitExpr.cpp | 39 - src/Parsers/New/AST/LimitExpr.h | 24 - src/Parsers/New/AST/Literal.cpp | 222 - src/Parsers/New/AST/Literal.h | 96 - src/Parsers/New/AST/OptimizeQuery.cpp | 59 - src/Parsers/New/AST/OptimizeQuery.h | 27 - src/Parsers/New/AST/OrderExpr.cpp | 62 - src/Parsers/New/AST/OrderExpr.h | 33 - src/Parsers/New/AST/Query.cpp | 34 - src/Parsers/New/AST/Query.h | 29 - src/Parsers/New/AST/README.md | 32 - src/Parsers/New/AST/RatioExpr.cpp | 43 - src/Parsers/New/AST/RatioExpr.h | 24 - src/Parsers/New/AST/RenameQuery.cpp | 58 - src/Parsers/New/AST/RenameQuery.h | 23 - src/Parsers/New/AST/SelectUnionQuery.cpp | 444 - src/Parsers/New/AST/SelectUnionQuery.h | 193 - src/Parsers/New/AST/SetQuery.cpp | 43 - src/Parsers/New/AST/SetQuery.h | 23 - src/Parsers/New/AST/SettingExpr.cpp | 33 - src/Parsers/New/AST/SettingExpr.h | 25 - src/Parsers/New/AST/ShowCreateQuery.cpp | 96 - src/Parsers/New/AST/ShowCreateQuery.h | 36 - src/Parsers/New/AST/ShowQuery.cpp | 49 - src/Parsers/New/AST/ShowQuery.h | 32 - src/Parsers/New/AST/SystemQuery.cpp | 191 - src/Parsers/New/AST/SystemQuery.h | 50 - src/Parsers/New/AST/TableElementExpr.cpp | 264 - src/Parsers/New/AST/TableElementExpr.h | 123 - src/Parsers/New/AST/TableExpr.cpp | 190 - src/Parsers/New/AST/TableExpr.h | 81 - src/Parsers/New/AST/TruncateQuery.cpp | 47 - src/Parsers/New/AST/TruncateQuery.h | 25 - src/Parsers/New/AST/UseQuery.cpp | 37 - src/Parsers/New/AST/UseQuery.h | 23 - src/Parsers/New/AST/WatchQuery.cpp | 51 - src/Parsers/New/AST/WatchQuery.h | 26 - src/Parsers/New/AST/fwd_decl.h | 91 - src/Parsers/New/CMakeLists.txt | 93 - src/Parsers/New/CharInputStream.cpp | 79 - src/Parsers/New/CharInputStream.h | 34 - src/Parsers/New/ClickHouseLexer.cpp | 1603 -- src/Parsers/New/ClickHouseLexer.h | 98 - src/Parsers/New/ClickHouseParser.cpp | 20220 ---------------- src/Parsers/New/ClickHouseParser.h | 3435 --- src/Parsers/New/ClickHouseParserVisitor.cpp | 9 - src/Parsers/New/ClickHouseParserVisitor.h | 422 - src/Parsers/New/LexerErrorListener.cpp | 26 - src/Parsers/New/LexerErrorListener.h | 21 - src/Parsers/New/ParseTreeVisitor.cpp | 150 - src/Parsers/New/ParseTreeVisitor.h | 304 - src/Parsers/New/ParserErrorListener.cpp | 37 - src/Parsers/New/ParserErrorListener.h | 21 - src/Parsers/New/parseQuery.cpp | 89 - src/Parsers/New/parseQuery.h | 14 - tests/ci/ci_config.json | 12 - tests/clickhouse-test | 12 - tests/queries/skip_list.json | 345 - utils/CMakeLists.txt | 1 - .../New => utils/antlr}/ClickHouseLexer.g4 | 0 .../New => utils/antlr}/ClickHouseParser.g4 | 0 {src/Parsers/New => utils/antlr}/README.md | 0 utils/syntax-analyzer/CMakeLists.txt | 3 - utils/syntax-analyzer/main.cpp | 63 - 115 files changed, 3 insertions(+), 35430 deletions(-) delete mode 160000 contrib/antlr4-runtime delete mode 100644 contrib/antlr4-runtime-cmake/CMakeLists.txt delete mode 100644 src/Parsers/New/AST/AlterTableQuery.cpp delete mode 100644 src/Parsers/New/AST/AlterTableQuery.h delete mode 100644 src/Parsers/New/AST/AttachQuery.cpp delete mode 100644 src/Parsers/New/AST/AttachQuery.h delete mode 100644 src/Parsers/New/AST/CheckQuery.cpp delete mode 100644 src/Parsers/New/AST/CheckQuery.h delete mode 100644 src/Parsers/New/AST/ColumnExpr.cpp delete mode 100644 src/Parsers/New/AST/ColumnExpr.h delete mode 100644 src/Parsers/New/AST/ColumnTypeExpr.cpp delete mode 100644 src/Parsers/New/AST/ColumnTypeExpr.h delete mode 100644 src/Parsers/New/AST/CreateDatabaseQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateDatabaseQuery.h delete mode 100644 src/Parsers/New/AST/CreateDictionaryQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateDictionaryQuery.h delete mode 100644 src/Parsers/New/AST/CreateLiveViewQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateLiveViewQuery.h delete mode 100644 src/Parsers/New/AST/CreateMaterializedViewQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateMaterializedViewQuery.h delete mode 100644 src/Parsers/New/AST/CreateTableQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateTableQuery.h delete mode 100644 src/Parsers/New/AST/CreateViewQuery.cpp delete mode 100644 src/Parsers/New/AST/CreateViewQuery.h delete mode 100644 src/Parsers/New/AST/DDLQuery.cpp delete mode 100644 src/Parsers/New/AST/DDLQuery.h delete mode 100644 src/Parsers/New/AST/DescribeQuery.cpp delete mode 100644 src/Parsers/New/AST/DescribeQuery.h delete mode 100644 src/Parsers/New/AST/DropQuery.cpp delete mode 100644 src/Parsers/New/AST/DropQuery.h delete mode 100644 src/Parsers/New/AST/EngineExpr.cpp delete mode 100644 src/Parsers/New/AST/EngineExpr.h delete mode 100644 src/Parsers/New/AST/ExistsQuery.cpp delete mode 100644 src/Parsers/New/AST/ExistsQuery.h delete mode 100644 src/Parsers/New/AST/ExplainQuery.cpp delete mode 100644 src/Parsers/New/AST/ExplainQuery.h delete mode 100644 src/Parsers/New/AST/INode.h delete mode 100644 src/Parsers/New/AST/Identifier.cpp delete mode 100644 src/Parsers/New/AST/Identifier.h delete mode 100644 src/Parsers/New/AST/InsertQuery.cpp delete mode 100644 src/Parsers/New/AST/InsertQuery.h delete mode 100644 src/Parsers/New/AST/JoinExpr.cpp delete mode 100644 src/Parsers/New/AST/JoinExpr.h delete mode 100644 src/Parsers/New/AST/KillQuery.cpp delete mode 100644 src/Parsers/New/AST/KillQuery.h delete mode 100644 src/Parsers/New/AST/LimitExpr.cpp delete mode 100644 src/Parsers/New/AST/LimitExpr.h delete mode 100644 src/Parsers/New/AST/Literal.cpp delete mode 100644 src/Parsers/New/AST/Literal.h delete mode 100644 src/Parsers/New/AST/OptimizeQuery.cpp delete mode 100644 src/Parsers/New/AST/OptimizeQuery.h delete mode 100644 src/Parsers/New/AST/OrderExpr.cpp delete mode 100644 src/Parsers/New/AST/OrderExpr.h delete mode 100644 src/Parsers/New/AST/Query.cpp delete mode 100644 src/Parsers/New/AST/Query.h delete mode 100644 src/Parsers/New/AST/README.md delete mode 100644 src/Parsers/New/AST/RatioExpr.cpp delete mode 100644 src/Parsers/New/AST/RatioExpr.h delete mode 100644 src/Parsers/New/AST/RenameQuery.cpp delete mode 100644 src/Parsers/New/AST/RenameQuery.h delete mode 100644 src/Parsers/New/AST/SelectUnionQuery.cpp delete mode 100644 src/Parsers/New/AST/SelectUnionQuery.h delete mode 100644 src/Parsers/New/AST/SetQuery.cpp delete mode 100644 src/Parsers/New/AST/SetQuery.h delete mode 100644 src/Parsers/New/AST/SettingExpr.cpp delete mode 100644 src/Parsers/New/AST/SettingExpr.h delete mode 100644 src/Parsers/New/AST/ShowCreateQuery.cpp delete mode 100644 src/Parsers/New/AST/ShowCreateQuery.h delete mode 100644 src/Parsers/New/AST/ShowQuery.cpp delete mode 100644 src/Parsers/New/AST/ShowQuery.h delete mode 100644 src/Parsers/New/AST/SystemQuery.cpp delete mode 100644 src/Parsers/New/AST/SystemQuery.h delete mode 100644 src/Parsers/New/AST/TableElementExpr.cpp delete mode 100644 src/Parsers/New/AST/TableElementExpr.h delete mode 100644 src/Parsers/New/AST/TableExpr.cpp delete mode 100644 src/Parsers/New/AST/TableExpr.h delete mode 100644 src/Parsers/New/AST/TruncateQuery.cpp delete mode 100644 src/Parsers/New/AST/TruncateQuery.h delete mode 100644 src/Parsers/New/AST/UseQuery.cpp delete mode 100644 src/Parsers/New/AST/UseQuery.h delete mode 100644 src/Parsers/New/AST/WatchQuery.cpp delete mode 100644 src/Parsers/New/AST/WatchQuery.h delete mode 100644 src/Parsers/New/AST/fwd_decl.h delete mode 100644 src/Parsers/New/CMakeLists.txt delete mode 100644 src/Parsers/New/CharInputStream.cpp delete mode 100644 src/Parsers/New/CharInputStream.h delete mode 100644 src/Parsers/New/ClickHouseLexer.cpp delete mode 100644 src/Parsers/New/ClickHouseLexer.h delete mode 100644 src/Parsers/New/ClickHouseParser.cpp delete mode 100644 src/Parsers/New/ClickHouseParser.h delete mode 100644 src/Parsers/New/ClickHouseParserVisitor.cpp delete mode 100644 src/Parsers/New/ClickHouseParserVisitor.h delete mode 100644 src/Parsers/New/LexerErrorListener.cpp delete mode 100644 src/Parsers/New/LexerErrorListener.h delete mode 100644 src/Parsers/New/ParseTreeVisitor.cpp delete mode 100644 src/Parsers/New/ParseTreeVisitor.h delete mode 100644 src/Parsers/New/ParserErrorListener.cpp delete mode 100644 src/Parsers/New/ParserErrorListener.h delete mode 100644 src/Parsers/New/parseQuery.cpp delete mode 100644 src/Parsers/New/parseQuery.h rename {src/Parsers/New => utils/antlr}/ClickHouseLexer.g4 (100%) rename {src/Parsers/New => utils/antlr}/ClickHouseParser.g4 (100%) rename {src/Parsers/New => utils/antlr}/README.md (100%) delete mode 100644 utils/syntax-analyzer/CMakeLists.txt delete mode 100644 utils/syntax-analyzer/main.cpp diff --git a/.gitmodules b/.gitmodules index 0a7a6b4a3f9..1d9d4d25baf 100644 --- a/.gitmodules +++ b/.gitmodules @@ -168,9 +168,6 @@ [submodule "contrib/fmtlib"] path = contrib/fmtlib url = https://github.com/fmtlib/fmt.git -[submodule "contrib/antlr4-runtime"] - path = contrib/antlr4-runtime - url = https://github.com/ClickHouse-Extras/antlr4-runtime.git [submodule "contrib/sentry-native"] path = contrib/sentry-native url = https://github.com/ClickHouse-Extras/sentry-native.git diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 710c8c7fca5..164692fb893 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -34,7 +34,6 @@ endif() set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1) add_subdirectory (abseil-cpp-cmake) -add_subdirectory (antlr4-runtime-cmake) add_subdirectory (boost-cmake) add_subdirectory (cctz-cmake) add_subdirectory (consistent-hashing) diff --git a/contrib/antlr4-runtime b/contrib/antlr4-runtime deleted file mode 160000 index 672643e9a42..00000000000 --- a/contrib/antlr4-runtime +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 672643e9a427ef803abf13bc8cb4989606553d64 diff --git a/contrib/antlr4-runtime-cmake/CMakeLists.txt b/contrib/antlr4-runtime-cmake/CMakeLists.txt deleted file mode 100644 index 4f639a33ebf..00000000000 --- a/contrib/antlr4-runtime-cmake/CMakeLists.txt +++ /dev/null @@ -1,156 +0,0 @@ -set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime") - -set (SRCS - "${LIBRARY_DIR}/ANTLRErrorListener.cpp" - "${LIBRARY_DIR}/ANTLRErrorStrategy.cpp" - "${LIBRARY_DIR}/ANTLRFileStream.cpp" - "${LIBRARY_DIR}/ANTLRInputStream.cpp" - "${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp" - "${LIBRARY_DIR}/atn/ActionTransition.cpp" - "${LIBRARY_DIR}/atn/AmbiguityInfo.cpp" - "${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp" - "${LIBRARY_DIR}/atn/ATN.cpp" - "${LIBRARY_DIR}/atn/ATNConfig.cpp" - "${LIBRARY_DIR}/atn/ATNConfigSet.cpp" - "${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp" - "${LIBRARY_DIR}/atn/ATNDeserializer.cpp" - "${LIBRARY_DIR}/atn/ATNSerializer.cpp" - "${LIBRARY_DIR}/atn/ATNSimulator.cpp" - "${LIBRARY_DIR}/atn/ATNState.cpp" - "${LIBRARY_DIR}/atn/AtomTransition.cpp" - "${LIBRARY_DIR}/atn/BasicBlockStartState.cpp" - "${LIBRARY_DIR}/atn/BasicState.cpp" - "${LIBRARY_DIR}/atn/BlockEndState.cpp" - "${LIBRARY_DIR}/atn/BlockStartState.cpp" - "${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp" - "${LIBRARY_DIR}/atn/DecisionEventInfo.cpp" - "${LIBRARY_DIR}/atn/DecisionInfo.cpp" - "${LIBRARY_DIR}/atn/DecisionState.cpp" - "${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp" - "${LIBRARY_DIR}/atn/EpsilonTransition.cpp" - "${LIBRARY_DIR}/atn/ErrorInfo.cpp" - "${LIBRARY_DIR}/atn/LexerAction.cpp" - "${LIBRARY_DIR}/atn/LexerActionExecutor.cpp" - "${LIBRARY_DIR}/atn/LexerATNConfig.cpp" - "${LIBRARY_DIR}/atn/LexerATNSimulator.cpp" - "${LIBRARY_DIR}/atn/LexerChannelAction.cpp" - "${LIBRARY_DIR}/atn/LexerCustomAction.cpp" - "${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp" - "${LIBRARY_DIR}/atn/LexerModeAction.cpp" - "${LIBRARY_DIR}/atn/LexerMoreAction.cpp" - "${LIBRARY_DIR}/atn/LexerPopModeAction.cpp" - "${LIBRARY_DIR}/atn/LexerPushModeAction.cpp" - "${LIBRARY_DIR}/atn/LexerSkipAction.cpp" - "${LIBRARY_DIR}/atn/LexerTypeAction.cpp" - "${LIBRARY_DIR}/atn/LL1Analyzer.cpp" - "${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp" - "${LIBRARY_DIR}/atn/LoopEndState.cpp" - "${LIBRARY_DIR}/atn/NotSetTransition.cpp" - "${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp" - "${LIBRARY_DIR}/atn/ParseInfo.cpp" - "${LIBRARY_DIR}/atn/ParserATNSimulator.cpp" - "${LIBRARY_DIR}/atn/PlusBlockStartState.cpp" - "${LIBRARY_DIR}/atn/PlusLoopbackState.cpp" - "${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp" - "${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp" - "${LIBRARY_DIR}/atn/PredicateTransition.cpp" - "${LIBRARY_DIR}/atn/PredictionContext.cpp" - "${LIBRARY_DIR}/atn/PredictionMode.cpp" - "${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp" - "${LIBRARY_DIR}/atn/RangeTransition.cpp" - "${LIBRARY_DIR}/atn/RuleStartState.cpp" - "${LIBRARY_DIR}/atn/RuleStopState.cpp" - "${LIBRARY_DIR}/atn/RuleTransition.cpp" - "${LIBRARY_DIR}/atn/SemanticContext.cpp" - "${LIBRARY_DIR}/atn/SetTransition.cpp" - "${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp" - "${LIBRARY_DIR}/atn/StarBlockStartState.cpp" - "${LIBRARY_DIR}/atn/StarLoopbackState.cpp" - "${LIBRARY_DIR}/atn/StarLoopEntryState.cpp" - "${LIBRARY_DIR}/atn/TokensStartState.cpp" - "${LIBRARY_DIR}/atn/Transition.cpp" - "${LIBRARY_DIR}/atn/WildcardTransition.cpp" - "${LIBRARY_DIR}/BailErrorStrategy.cpp" - "${LIBRARY_DIR}/BaseErrorListener.cpp" - "${LIBRARY_DIR}/BufferedTokenStream.cpp" - "${LIBRARY_DIR}/CharStream.cpp" - "${LIBRARY_DIR}/CommonToken.cpp" - "${LIBRARY_DIR}/CommonTokenFactory.cpp" - "${LIBRARY_DIR}/CommonTokenStream.cpp" - "${LIBRARY_DIR}/ConsoleErrorListener.cpp" - "${LIBRARY_DIR}/DefaultErrorStrategy.cpp" - "${LIBRARY_DIR}/dfa/DFA.cpp" - "${LIBRARY_DIR}/dfa/DFASerializer.cpp" - "${LIBRARY_DIR}/dfa/DFAState.cpp" - "${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp" - "${LIBRARY_DIR}/DiagnosticErrorListener.cpp" - "${LIBRARY_DIR}/Exceptions.cpp" - "${LIBRARY_DIR}/FailedPredicateException.cpp" - "${LIBRARY_DIR}/InputMismatchException.cpp" - "${LIBRARY_DIR}/InterpreterRuleContext.cpp" - "${LIBRARY_DIR}/IntStream.cpp" - "${LIBRARY_DIR}/Lexer.cpp" - "${LIBRARY_DIR}/LexerInterpreter.cpp" - "${LIBRARY_DIR}/LexerNoViableAltException.cpp" - "${LIBRARY_DIR}/ListTokenSource.cpp" - "${LIBRARY_DIR}/misc/InterpreterDataReader.cpp" - "${LIBRARY_DIR}/misc/Interval.cpp" - "${LIBRARY_DIR}/misc/IntervalSet.cpp" - "${LIBRARY_DIR}/misc/MurmurHash.cpp" - "${LIBRARY_DIR}/misc/Predicate.cpp" - "${LIBRARY_DIR}/NoViableAltException.cpp" - "${LIBRARY_DIR}/Parser.cpp" - "${LIBRARY_DIR}/ParserInterpreter.cpp" - "${LIBRARY_DIR}/ParserRuleContext.cpp" - "${LIBRARY_DIR}/ProxyErrorListener.cpp" - "${LIBRARY_DIR}/RecognitionException.cpp" - "${LIBRARY_DIR}/Recognizer.cpp" - "${LIBRARY_DIR}/RuleContext.cpp" - "${LIBRARY_DIR}/RuleContextWithAltNum.cpp" - "${LIBRARY_DIR}/RuntimeMetaData.cpp" - "${LIBRARY_DIR}/support/Any.cpp" - "${LIBRARY_DIR}/support/Arrays.cpp" - "${LIBRARY_DIR}/support/CPPUtils.cpp" - "${LIBRARY_DIR}/support/guid.cpp" - "${LIBRARY_DIR}/support/StringUtils.cpp" - "${LIBRARY_DIR}/Token.cpp" - "${LIBRARY_DIR}/TokenSource.cpp" - "${LIBRARY_DIR}/TokenStream.cpp" - "${LIBRARY_DIR}/TokenStreamRewriter.cpp" - "${LIBRARY_DIR}/tree/ErrorNode.cpp" - "${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp" - "${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp" - "${LIBRARY_DIR}/tree/ParseTree.cpp" - "${LIBRARY_DIR}/tree/ParseTreeListener.cpp" - "${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp" - "${LIBRARY_DIR}/tree/ParseTreeWalker.cpp" - "${LIBRARY_DIR}/tree/pattern/Chunk.cpp" - "${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp" - "${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp" - "${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp" - "${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp" - "${LIBRARY_DIR}/tree/pattern/TagChunk.cpp" - "${LIBRARY_DIR}/tree/pattern/TextChunk.cpp" - "${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp" - "${LIBRARY_DIR}/tree/TerminalNode.cpp" - "${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp" - "${LIBRARY_DIR}/tree/Trees.cpp" - "${LIBRARY_DIR}/tree/xpath/XPath.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp" - "${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp" - "${LIBRARY_DIR}/UnbufferedCharStream.cpp" - "${LIBRARY_DIR}/UnbufferedTokenStream.cpp" - "${LIBRARY_DIR}/Vocabulary.cpp" - "${LIBRARY_DIR}/WritableToken.cpp" -) - -add_library (antlr4-runtime ${SRCS}) - -target_include_directories (antlr4-runtime SYSTEM PUBLIC ${LIBRARY_DIR}) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 6efdbf6178c..71c430bdbb5 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -160,7 +160,6 @@ function clone_submodules SUBMODULES_TO_UPDATE=( contrib/abseil-cpp - contrib/antlr4-runtime contrib/boost contrib/zlib-ng contrib/libxml2 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 88a6113b8fa..fc1064ffcc7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -54,7 +54,6 @@ add_subdirectory (Dictionaries) add_subdirectory (Disks) add_subdirectory (Storages) add_subdirectory (Parsers) -add_subdirectory (Parsers/New) add_subdirectory (IO) add_subdirectory (Functions) add_subdirectory (Interpreters) @@ -222,12 +221,12 @@ endif() if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PRIVATE clickhouse_parsers_new jemalloc libdivide ${DBMS_COMMON_LIBRARIES}) + target_link_libraries (dbms PRIVATE jemalloc libdivide ${DBMS_COMMON_LIBRARIES}) set (all_modules dbms) else() add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) target_link_libraries (dbms PUBLIC ${all_modules} ${DBMS_COMMON_LIBRARIES}) - target_link_libraries (clickhouse_interpreters PRIVATE clickhouse_parsers_new jemalloc libdivide) + target_link_libraries (clickhouse_interpreters PRIVATE jemalloc libdivide) list (APPEND all_modules dbms) # force all split libs to be linked if (OS_DARWIN) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0197bfac7e4..9eec38f9788 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -445,7 +445,6 @@ class IColumn; M(Bool, allow_experimental_window_functions, false, "Allow experimental window functions", 0) \ M(Bool, allow_experimental_projection_optimization, false, "Enable projection optimization when processing SELECT queries", 0) \ M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \ - M(Bool, use_antlr_parser, false, "Parse incoming queries using ANTLR-generated experimental parser", 0) \ M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \ M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \ \ diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index c69a5bcd3e1..5b55754f00a 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -26,11 +26,6 @@ #include #include #include - -#if !defined(ARCADIA_BUILD) -# include // Y_IGNORE -#endif - #include #include #include @@ -166,11 +161,10 @@ static void logQuery(const String & query, ContextPtr context, bool internal) if (!comment.empty()) comment = fmt::format(" (comment: {})", comment); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}, using {} parser){} {}", + LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}){} {}", client_info.current_address.toString(), (current_user != "default" ? ", user: " + current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), - (context->getSettingsRef().use_antlr_parser ? "experimental" : "production"), comment, joinLines(query)); @@ -386,24 +380,10 @@ static std::tuple executeQueryImpl( String query_table; try { -#if !defined(ARCADIA_BUILD) - if (settings.use_antlr_parser) - { - ast = parseQuery(begin, end, max_query_size, settings.max_parser_depth, context->getCurrentDatabase()); - } - else - { - ParserQuery parser(end); - - /// TODO: parser should fail early when max_query_size limit is reached. - ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth); - } -#else ParserQuery parser(end); /// TODO: parser should fail early when max_query_size limit is reached. ast = parseQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth); -#endif /// Interpret SETTINGS clauses as early as possible (before invoking the corresponding interpreter), /// to allow settings to take effect. diff --git a/src/Parsers/New/AST/AlterTableQuery.cpp b/src/Parsers/New/AST/AlterTableQuery.cpp deleted file mode 100644 index e1ea54b61e5..00000000000 --- a/src/Parsers/New/AST/AlterTableQuery.cpp +++ /dev/null @@ -1,814 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -AssignmentExpr::AssignmentExpr(PtrTo identifier, PtrTo expr) : INode{identifier, expr} -{ -} - -ASTPtr AssignmentExpr::convertToOld() const -{ - auto expr = std::make_shared(); - - expr->column_name = get(IDENTIFIER)->convertToOld()->getColumnName(); - expr->children.push_back(get(EXPR)->convertToOld()); - - return expr; -} - -PartitionClause::PartitionClause(PtrTo id) : PartitionClause(ClauseType::ID, {id}) -{ -} - -PartitionClause::PartitionClause(PtrTo> list) : PartitionClause(ClauseType::LIST, {list}) -{ -} - -PartitionClause::PartitionClause(ClauseType type, PtrList exprs) : INode(exprs), clause_type(type) -{ -} - -ASTPtr PartitionClause::convertToOld() const -{ - auto partition = std::make_shared(); - - switch(clause_type) - { - case ClauseType::ID: - partition->id = get(ID)->as(); - break; - case ClauseType::LIST: - { - auto tuple = std::make_shared(); - - tuple->name = "tuple"; - tuple->arguments = std::make_shared(); - for (const auto & child : get(LIST)->as &>()) - tuple->arguments->children.push_back(child->convertToOld()); - tuple->children.push_back(tuple->arguments); - - partition->value = tuple; - partition->children.push_back(partition->value); - partition->fields_count = get>(LIST)->size(); - partition->fields_str = get(LIST)->toString(); - } - break; - } - - return partition; -} - -// static -PtrTo AlterTableClause::createAddColumn(bool if_not_exists, PtrTo element, PtrTo after) -{ - assert(element->getType() == TableElementExpr::ExprType::COLUMN); - PtrTo query(new AlterTableClause(ClauseType::ADD_COLUMN, {element, after})); - query->if_not_exists = if_not_exists; - return query; -} - -// static -PtrTo AlterTableClause::createAddIndex(bool if_not_exists, PtrTo element, PtrTo after) -{ - assert(element->getType() == TableElementExpr::ExprType::INDEX); - PtrTo query(new AlterTableClause(ClauseType::ADD_INDEX, {element, after})); - query->if_not_exists = if_not_exists; - return query; -} - -// static -PtrTo AlterTableClause::createAddProjection(bool if_not_exists, PtrTo element, PtrTo after) -{ - assert(element->getType() == TableElementExpr::ExprType::PROJECTION); - PtrTo query(new AlterTableClause(ClauseType::ADD_PROJECTION, {element, after})); - query->if_not_exists = if_not_exists; - return query; -} - -// static -PtrTo AlterTableClause::createAttach(PtrTo clause, PtrTo from) -{ - return PtrTo(new AlterTableClause(ClauseType::ATTACH, {clause, from})); -} - -// static -PtrTo AlterTableClause::createClearColumn(bool if_exists, PtrTo identifier, PtrTo in) -{ - PtrTo query(new AlterTableClause(ClauseType::CLEAR_COLUMN, {identifier, in})); - query->if_exists = if_exists; - return query; -} - -PtrTo AlterTableClause::createClearIndex(bool if_exists, PtrTo identifier, PtrTo in) -{ - PtrTo query(new AlterTableClause(ClauseType::CLEAR_INDEX, {identifier, in})); - query->if_exists = if_exists; - return query; -} - -PtrTo AlterTableClause::createClearProjection(bool if_exists, PtrTo identifier, PtrTo in) -{ - PtrTo query(new AlterTableClause(ClauseType::CLEAR_PROJECTION, {identifier, in})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createCodec(bool if_exists, PtrTo identifier, PtrTo codec) -{ - PtrTo query(new AlterTableClause(ClauseType::CODEC, {identifier, codec})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createComment(bool if_exists, PtrTo identifier, PtrTo comment) -{ - PtrTo query(new AlterTableClause(ClauseType::COMMENT, {identifier, comment})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createDelete(PtrTo expr) -{ - return PtrTo(new AlterTableClause(ClauseType::DELETE, {expr})); -} - -// static -PtrTo AlterTableClause::createDetach(PtrTo clause) -{ - return PtrTo(new AlterTableClause(ClauseType::DETACH, {clause})); -} - -// static -PtrTo AlterTableClause::createDropColumn(bool if_exists, PtrTo identifier) -{ - PtrTo query(new AlterTableClause(ClauseType::DROP_COLUMN, {identifier})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createDropIndex(bool if_exists, PtrTo identifier) -{ - PtrTo query(new AlterTableClause(ClauseType::DROP_INDEX, {identifier})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createDropProjection(bool if_exists, PtrTo identifier) -{ - PtrTo query(new AlterTableClause(ClauseType::DROP_PROJECTION, {identifier})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createDropPartition(PtrTo clause) -{ - return PtrTo(new AlterTableClause(ClauseType::DROP_PARTITION, {clause})); -} - -// static -PtrTo AlterTableClause::createFreezePartition(PtrTo clause) -{ - return PtrTo(new AlterTableClause(ClauseType::FREEZE_PARTITION, {clause})); -} - -// static -PtrTo AlterTableClause::createMaterializeIndex(bool if_exists, PtrTo identifier, PtrTo in) -{ - PtrTo query(new AlterTableClause(ClauseType::MATERIALIZE_INDEX, {identifier, in})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createMaterializeProjection(bool if_exists, PtrTo identifier, PtrTo in) -{ - PtrTo query(new AlterTableClause(ClauseType::MATERIALIZE_PROJECTION, {identifier, in})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createModify(bool if_exists, PtrTo element) -{ - // TODO: assert(element->getType() == TableElementExpr::ExprType::COLUMN); - PtrTo query(new AlterTableClause(ClauseType::MODIFY, {element})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createMovePartitionToDisk(PtrTo clause, PtrTo literal) -{ - return PtrTo(new AlterTableClause(ClauseType::MOVE_PARTITION_TO_DISK, {clause, literal})); -} - -// static -PtrTo AlterTableClause::createMovePartitionToTable(PtrTo clause, PtrTo identifier) -{ - return PtrTo(new AlterTableClause(ClauseType::MOVE_PARTITION_TO_TABLE, {clause, identifier})); -} - -// static -PtrTo AlterTableClause::createMovePartitionToVolume(PtrTo clause, PtrTo literal) -{ - return PtrTo(new AlterTableClause(ClauseType::MOVE_PARTITION_TO_VOLUME, {clause, literal})); -} - -// static -PtrTo AlterTableClause::createOrderBy(PtrTo expr) -{ - return PtrTo(new AlterTableClause(ClauseType::ORDER_BY, {expr})); -} - -// static -PtrTo AlterTableClause::createRemove(bool if_exists, PtrTo identifier, TableColumnPropertyType type) -{ - PtrTo query(new AlterTableClause(ClauseType::REMOVE, {identifier})); - query->if_exists = if_exists; - query->property_type = type; - return query; -} - -// static -PtrTo AlterTableClause::createRemoveTTL() -{ - return PtrTo(new AlterTableClause(ClauseType::REMOVE_TTL, {})); -} - -// static -PtrTo AlterTableClause::createRename(bool if_exists, PtrTo identifier, PtrTo to) -{ - PtrTo query(new AlterTableClause(ClauseType::RENAME, {identifier, to})); - query->if_exists = if_exists; - return query; -} - -// static -PtrTo AlterTableClause::createReplace(PtrTo clause, PtrTo from) -{ - return PtrTo(new AlterTableClause(ClauseType::REPLACE, {clause, from})); -} - -// static -PtrTo AlterTableClause::createTTL(PtrTo clause) -{ - return PtrTo(new AlterTableClause(ClauseType::TTL, {clause})); -} - -// static -PtrTo AlterTableClause::createUpdate(PtrTo list, PtrTo where) -{ - return PtrTo(new AlterTableClause(ClauseType::UPDATE, {list, where})); -} - -ASTPtr AlterTableClause::convertToOld() const -{ - auto command = std::make_shared(); - - switch(clause_type) - { - case ClauseType::ADD_COLUMN: - command->type = ASTAlterCommand::ADD_COLUMN; - command->if_not_exists = if_not_exists; - // TODO: command->first - command->col_decl = get(ELEMENT)->convertToOld(); - if (has(AFTER)) command->column = get(AFTER)->convertToOld(); - break; - - case ClauseType::ADD_INDEX: - command->type = ASTAlterCommand::ADD_INDEX; - command->if_not_exists = if_not_exists; - command->index_decl = get(ELEMENT)->convertToOld(); - if (has(AFTER)) command->index = get(AFTER)->convertToOld(); - break; - - case ClauseType::ADD_PROJECTION: - command->type = ASTAlterCommand::ADD_PROJECTION; - command->if_not_exists = if_not_exists; - command->projection_decl = get(ELEMENT)->convertToOld(); - if (has(AFTER)) command->projection = get(AFTER)->convertToOld(); - break; - - case ClauseType::ATTACH: - command->type = ASTAlterCommand::ATTACH_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - - if (has(FROM)) - { - auto table = get(FROM)->convertToOld(); - command->from_database = table->as()->getDatabaseName(); - command->from_table = table->as()->shortName(); - command->replace = false; - command->type = ASTAlterCommand::REPLACE_PARTITION; - } - break; - - case ClauseType::CLEAR_COLUMN: - command->type = ASTAlterCommand::DROP_COLUMN; - command->if_exists = if_exists; - command->clear_column = true; - command->detach = false; - command->column = get(ELEMENT)->convertToOld(); - if (has(IN)) command->partition = get(IN)->convertToOld(); - break; - - case ClauseType::CLEAR_INDEX: - command->type = ASTAlterCommand::DROP_INDEX; - command->if_exists = if_exists; - command->clear_index = true; - command->detach = false; - command->index = get(ELEMENT)->convertToOld(); - if (has(IN)) command->partition = get(IN)->convertToOld(); - break; - - case ClauseType::CLEAR_PROJECTION: - command->type = ASTAlterCommand::DROP_PROJECTION; - command->if_exists = if_exists; - command->clear_projection = true; - command->detach = false; - command->projection = get(ELEMENT)->convertToOld(); - if (has(IN)) command->partition = get(IN)->convertToOld(); - break; - - case ClauseType::CODEC: - command->type = ASTAlterCommand::MODIFY_COLUMN; - command->if_exists = if_exists; - - { - auto column = std::make_shared(); - column->name = get(COLUMN)->toString(); - column->codec = get(CODEC)->convertToOld(); - - command->col_decl = column; - } - break; - - case ClauseType::COMMENT: - command->type = ASTAlterCommand::COMMENT_COLUMN; - command->if_exists = if_exists; - command->column = get(COLUMN)->convertToOld(); - command->comment = get(COMMENT)->convertToOld(); - break; - - case ClauseType::DELETE: - command->type = ASTAlterCommand::DELETE; - command->predicate = get(EXPR)->convertToOld(); - break; - - case ClauseType::DETACH: - command->type = ASTAlterCommand::DROP_PARTITION; - command->detach = true; - command->partition = get(PARTITION)->convertToOld(); - break; - - case ClauseType::DROP_COLUMN: - command->type = ASTAlterCommand::DROP_COLUMN; - command->if_exists = if_exists; - command->detach = false; - command->column = get(ELEMENT)->convertToOld(); - break; - - case ClauseType::DROP_INDEX: - command->type = ASTAlterCommand::DROP_INDEX; - command->if_exists = if_exists; - command->detach = false; - command->index = get(ELEMENT)->convertToOld(); - break; - - case ClauseType::DROP_PROJECTION: - command->type = ASTAlterCommand::DROP_PROJECTION; - command->if_exists = if_exists; - command->detach = false; - command->projection = get(ELEMENT)->convertToOld(); - break; - - case ClauseType::DROP_PARTITION: - command->type = ASTAlterCommand::DROP_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - break; - - case ClauseType::FREEZE_PARTITION: - if (has(PARTITION)) - { - command->type = ASTAlterCommand::FREEZE_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - } - else - command->type = ASTAlterCommand::FREEZE_ALL; - break; - - case ClauseType::MATERIALIZE_INDEX: - command->type = ASTAlterCommand::MATERIALIZE_INDEX; - command->if_exists = if_exists; - command->index = get(ELEMENT)->convertToOld(); - if (has(IN)) command->partition = get(IN)->convertToOld(); - break; - - case ClauseType::MATERIALIZE_PROJECTION: - command->type = ASTAlterCommand::MATERIALIZE_PROJECTION; - command->if_exists = if_exists; - command->projection = get(ELEMENT)->convertToOld(); - if (has(IN)) command->partition = get(IN)->convertToOld(); - break; - - case ClauseType::MODIFY: - command->type = ASTAlterCommand::MODIFY_COLUMN; - command->if_exists = if_exists; - command->col_decl = get(ELEMENT)->convertToOld(); - break; - - case ClauseType::MOVE_PARTITION_TO_DISK: - command->type = ASTAlterCommand::MOVE_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - command->move_destination_type = DataDestinationType::DISK; - command->move_destination_name = get(TO)->convertToOld()->as()->value.get(); - break; - - case ClauseType::MOVE_PARTITION_TO_TABLE: - command->type = ASTAlterCommand::MOVE_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - command->move_destination_type = DataDestinationType::TABLE; - { - auto table = get(TO)->convertToOld(); - command->to_database = table->as()->getDatabaseName(); - command->to_table = table->as()->shortName(); - } - break; - - case ClauseType::MOVE_PARTITION_TO_VOLUME: - command->type = ASTAlterCommand::MOVE_PARTITION; - command->partition = get(PARTITION)->convertToOld(); - command->move_destination_type = DataDestinationType::VOLUME; - command->move_destination_name = get(TO)->convertToOld()->as()->value.get(); - break; - - case ClauseType::REMOVE: - command->type = ASTAlterCommand::MODIFY_COLUMN; - command->if_exists = if_exists; - { - auto col_decl = std::make_shared(); - col_decl->name = get(ELEMENT)->convertToOld()->getColumnName(); - command->col_decl = col_decl; - } - switch(property_type) - { - case TableColumnPropertyType::ALIAS: - command->remove_property = "ALIAS"; - break; - case TableColumnPropertyType::CODEC: - command->remove_property = "CODEC"; - break; - case TableColumnPropertyType::COMMENT: - command->remove_property = "COMMENT"; - break; - case TableColumnPropertyType::DEFAULT: - command->remove_property = "DEFAULT"; - break; - case TableColumnPropertyType::MATERIALIZED: - command->remove_property = "MATERIALIZED"; - break; - case TableColumnPropertyType::TTL: - command->remove_property = "TTL"; - break; - } - break; - - case ClauseType::REMOVE_TTL: - command->type = ASTAlterCommand::REMOVE_TTL; - break; - - case ClauseType::RENAME: - command->type = ASTAlterCommand::RENAME_COLUMN; - command->column = get(COLUMN)->convertToOld(); - command->rename_to = get(TO)->convertToOld(); - break; - - case ClauseType::ORDER_BY: - command->type = ASTAlterCommand::MODIFY_ORDER_BY; - command->order_by = get(EXPR)->convertToOld(); - break; - - case ClauseType::REPLACE: - command->type = ASTAlterCommand::REPLACE_PARTITION; - command->replace = true; - command->partition = get(PARTITION)->convertToOld(); - { - auto table = get(FROM)->convertToOld(); - command->from_database = table->as()->getDatabaseName(); - command->from_table = table->as()->shortName(); - } - break; - - case ClauseType::TTL: - command->type = ASTAlterCommand::MODIFY_TTL; - command->ttl = get(CLAUSE)->convertToOld(); - break; - - case ClauseType::UPDATE: - command->type = ASTAlterCommand::UPDATE; - command->update_assignments = get(ASSIGNMENTS)->convertToOld(); - command->predicate = get(WHERE)->convertToOld(); - break; - } - - if (command->col_decl) - command->children.push_back(command->col_decl); - if (command->column) - command->children.push_back(command->column); - if (command->partition) - command->children.push_back(command->partition); - if (command->order_by) - command->children.push_back(command->order_by); - if (command->sample_by) - command->children.push_back(command->sample_by); - if (command->predicate) - command->children.push_back(command->predicate); - if (command->update_assignments) - command->children.push_back(command->update_assignments); - if (command->values) - command->children.push_back(command->values); - if (command->comment) - command->children.push_back(command->comment); - if (command->ttl) - command->children.push_back(command->ttl); - if (command->settings_changes) - command->children.push_back(command->settings_changes); - - return command; -} - -AlterTableClause::AlterTableClause(ClauseType type, PtrList exprs) : INode(exprs), clause_type(type) -{ -} - -AlterTableQuery::AlterTableQuery(PtrTo cluster, PtrTo identifier, PtrTo> clauses) - : DDLQuery(cluster, {identifier, clauses}) -{ -} - -ASTPtr AlterTableQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = get(TABLE)->convertToOld(); - query->database = table->as()->getDatabaseName(); - query->table = table->as()->shortName(); - } - - query->cluster = cluster_name; - - query->set(query->command_list, get(CLAUSES)->convertToOld()); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAddColumn(ClickHouseParser::AlterTableClauseAddColumnContext * ctx) -{ - auto after = ctx->AFTER() ? visit(ctx->nestedIdentifier()).as>() : nullptr; - return AlterTableClause::createAddColumn(!!ctx->IF(), visit(ctx->tableColumnDfnt()), after); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAddIndex(ClickHouseParser::AlterTableClauseAddIndexContext * ctx) -{ - auto after = ctx->AFTER() ? visit(ctx->nestedIdentifier()).as>() : nullptr; - return AlterTableClause::createAddIndex(!!ctx->IF(), visit(ctx->tableIndexDfnt()), after); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext * ctx) -{ - auto after = ctx->AFTER() ? visit(ctx->nestedIdentifier()).as>() : nullptr; - return AlterTableClause::createAddProjection(!!ctx->IF(), visit(ctx->tableProjectionDfnt()), after); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext *ctx) -{ - auto from = ctx->tableIdentifier() ? visit(ctx->tableIdentifier()).as>() : nullptr; - return AlterTableClause::createAttach(visit(ctx->partitionClause()), from); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext * ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createClearColumn(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext * ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createClearIndex(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext * ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createClearProjection(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext * ctx) -{ - return AlterTableClause::createComment(!!ctx->IF(), visit(ctx->nestedIdentifier()), Literal::createString(ctx->STRING_LITERAL())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDelete(ClickHouseParser::AlterTableClauseDeleteContext *ctx) -{ - return AlterTableClause::createDelete(visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDetach(ClickHouseParser::AlterTableClauseDetachContext *ctx) -{ - return AlterTableClause::createDetach(visit(ctx->partitionClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropColumn(ClickHouseParser::AlterTableClauseDropColumnContext * ctx) -{ - return AlterTableClause::createDropColumn(!!ctx->IF(), visit(ctx->nestedIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropIndex(ClickHouseParser::AlterTableClauseDropIndexContext * ctx) -{ - return AlterTableClause::createDropIndex(!!ctx->IF(), visit(ctx->nestedIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext * ctx) -{ - return AlterTableClause::createDropProjection(!!ctx->IF(), visit(ctx->nestedIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext *ctx) -{ - return AlterTableClause::createDropPartition(visit(ctx->partitionClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseFreezePartition(ClickHouseParser::AlterTableClauseFreezePartitionContext *ctx) -{ - auto clause = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createFreezePartition(clause); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext * ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createMaterializeIndex(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext * ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return AlterTableClause::createMaterializeProjection(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModify(ClickHouseParser::AlterTableClauseModifyContext * ctx) -{ - return AlterTableClause::createModify(!!ctx->IF(), visit(ctx->tableColumnDfnt())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModifyCodec(ClickHouseParser::AlterTableClauseModifyCodecContext * ctx) -{ - return AlterTableClause::createCodec(!!ctx->IF(), visit(ctx->nestedIdentifier()), visit(ctx->codecExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModifyComment(ClickHouseParser::AlterTableClauseModifyCommentContext *ctx) -{ - return AlterTableClause::createComment(!!ctx->IF(), visit(ctx->nestedIdentifier()), Literal::createString(ctx->STRING_LITERAL())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModifyOrderBy(ClickHouseParser::AlterTableClauseModifyOrderByContext * ctx) -{ - return AlterTableClause::createOrderBy(visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModifyRemove(ClickHouseParser::AlterTableClauseModifyRemoveContext *ctx) -{ - return AlterTableClause::createRemove(!!ctx->IF(), visit(ctx->nestedIdentifier()), visit(ctx->tableColumnPropertyType())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModifyTTL(ClickHouseParser::AlterTableClauseModifyTTLContext *ctx) -{ - return AlterTableClause::createTTL(visit(ctx->ttlClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseMovePartition(ClickHouseParser::AlterTableClauseMovePartitionContext *ctx) -{ - if (ctx->DISK()) - return AlterTableClause::createMovePartitionToDisk(visit(ctx->partitionClause()), Literal::createString(ctx->STRING_LITERAL())); - if (ctx->TABLE()) - return AlterTableClause::createMovePartitionToTable(visit(ctx->partitionClause()), visit(ctx->tableIdentifier())); - if (ctx->VOLUME()) - return AlterTableClause::createMovePartitionToVolume(visit(ctx->partitionClause()), Literal::createString(ctx->STRING_LITERAL())); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseRemoveTTL(ClickHouseParser::AlterTableClauseRemoveTTLContext *) -{ - return AlterTableClause::createRemoveTTL(); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseRename(ClickHouseParser::AlterTableClauseRenameContext *ctx) -{ - return AlterTableClause::createRename(!!ctx->IF(), visit(ctx->nestedIdentifier(0)), visit(ctx->nestedIdentifier(1))); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseReplace(ClickHouseParser::AlterTableClauseReplaceContext *ctx) -{ - return AlterTableClause::createReplace(visit(ctx->partitionClause()), visit(ctx->tableIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseUpdate(ClickHouseParser::AlterTableClauseUpdateContext *ctx) -{ - return AlterTableClause::createUpdate(visit(ctx->assignmentExprList()), visit(ctx->whereClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitAlterTableStmt(ClickHouseParser::AlterTableStmtContext * ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto list = std::make_shared>(); - for (auto * clause : ctx->alterTableClause()) list->push(visit(clause)); - return std::make_shared(cluster, visit(ctx->tableIdentifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitAssignmentExpr(ClickHouseParser::AssignmentExprContext *ctx) -{ - return std::make_shared(visit(ctx->nestedIdentifier()), visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitAssignmentExprList(ClickHouseParser::AssignmentExprListContext *ctx) -{ - auto list = std::make_shared(); - for (auto * expr : ctx->assignmentExpr()) list->push(visit(expr)); - return list; -} - -antlrcpp::Any ParseTreeVisitor::visitTableColumnPropertyType(ClickHouseParser::TableColumnPropertyTypeContext *ctx) -{ - if (ctx->ALIAS()) return TableColumnPropertyType::ALIAS; - if (ctx->CODEC()) return TableColumnPropertyType::CODEC; - if (ctx->COMMENT()) return TableColumnPropertyType::COMMENT; - if (ctx->DEFAULT()) return TableColumnPropertyType::DEFAULT; - if (ctx->MATERIALIZED()) return TableColumnPropertyType::MATERIALIZED; - if (ctx->TTL()) return TableColumnPropertyType::TTL; - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitPartitionClause(ClickHouseParser::PartitionClauseContext *ctx) -{ - if (ctx->STRING_LITERAL()) - return std::make_shared(Literal::createString(ctx->STRING_LITERAL())); - - auto expr = visit(ctx->columnExpr()).as>(); - - if (expr->getType() == ColumnExpr::ExprType::LITERAL) - return std::make_shared(PtrTo>(new List{expr->getLiteral()})); - - if (expr->getType() == ColumnExpr::ExprType::FUNCTION && expr->getFunctionName() == "tuple") - { - auto list = std::make_shared>(); - - for (auto it = expr->argumentsBegin(); it != expr->argumentsEnd(); ++it) - { - auto * literal = (*it)->as(); - - if (literal->getType() == ColumnExpr::ExprType::LITERAL) - list->push(literal->getLiteral()); - else - { - // TODO: 'Expected tuple of literals as Partition Expression'. - } - } - - return std::make_shared(list); - } - - // TODO: 'Expected tuple of literals as Partition Expression'. - __builtin_unreachable(); -} - -} diff --git a/src/Parsers/New/AST/AlterTableQuery.h b/src/Parsers/New/AST/AlterTableQuery.h deleted file mode 100644 index 7e7783c49ad..00000000000 --- a/src/Parsers/New/AST/AlterTableQuery.h +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class AssignmentExpr : public INode -{ - public: - AssignmentExpr(PtrTo identifier, PtrTo expr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - IDENTIFIER = 0, // Identifier - EXPR = 1, // ColumnExpr - }; -}; - -enum class TableColumnPropertyType -{ - ALIAS, - CODEC, - COMMENT, - DEFAULT, - MATERIALIZED, - TTL, -}; - -class PartitionClause : public INode -{ - public: - explicit PartitionClause(PtrTo id); - explicit PartitionClause(PtrTo> list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - ID = 0, // Literal - LIST = 0, // List - }; - enum class ClauseType - { - ID, - LIST, - }; - - const ClauseType clause_type; - - PartitionClause(ClauseType type, PtrList exprs); -}; - -class AlterTableClause : public INode -{ - public: - static PtrTo createAddColumn(bool if_not_exists, PtrTo element, PtrTo after); - static PtrTo createAddIndex(bool if_not_exists, PtrTo element, PtrTo after); - static PtrTo createAddProjection(bool if_not_exists, PtrTo element, PtrTo after); - static PtrTo createAttach(PtrTo clause, PtrTo from); - static PtrTo createClearColumn(bool if_exists, PtrTo identifier, PtrTo in); - static PtrTo createClearIndex(bool if_exists, PtrTo identifier, PtrTo in); - static PtrTo createClearProjection(bool if_exists, PtrTo identifier, PtrTo in); - static PtrTo createCodec(bool if_exists, PtrTo identifier, PtrTo codec); - static PtrTo createComment(bool if_exists, PtrTo identifier, PtrTo comment); - static PtrTo createDelete(PtrTo expr); - static PtrTo createDetach(PtrTo clause); - static PtrTo createDropColumn(bool if_exists, PtrTo identifier); - static PtrTo createDropIndex(bool if_exists, PtrTo identifier); - static PtrTo createDropProjection(bool if_exists, PtrTo identifier); - static PtrTo createDropPartition(PtrTo clause); - static PtrTo createFreezePartition(PtrTo clause); - static PtrTo createMaterializeIndex(bool if_exists, PtrTo identifier, PtrTo in); - static PtrTo createMaterializeProjection(bool if_exists, PtrTo identifier, PtrTo in); - static PtrTo createModify(bool if_exists, PtrTo element); - static PtrTo createMovePartitionToDisk(PtrTo clause, PtrTo literal); - static PtrTo createMovePartitionToTable(PtrTo clause, PtrTo identifier); - static PtrTo createMovePartitionToVolume(PtrTo clause, PtrTo literal); - static PtrTo createRemove(bool if_exists, PtrTo identifier, TableColumnPropertyType type); - static PtrTo createRemoveTTL(); - static PtrTo createRename(bool if_exists, PtrTo identifier, PtrTo to); - static PtrTo createOrderBy(PtrTo expr); - static PtrTo createReplace(PtrTo clause, PtrTo from); - static PtrTo createTTL(PtrTo clause); - static PtrTo createUpdate(PtrTo list, PtrTo where); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - // ADD COLUMN, INDEX or PROJECTION - ELEMENT = 0, // TableElementExpr (COLUMN, CONSTRAINT, INDEX, PROJECTION) - AFTER = 1, // Identifier (optional) - - // ATTACH/REPLACE - PARTITION = 0, // PartitionClause - FROM = 1, // TableIdentifier (optional) - - // CLEAR COLUMN, INDEX or PROJECTION - IN = 1, // PartitionClause - - // CODEC, COMMENT and RENAME - COLUMN = 0, // Identifier - CODEC = 1, // CodecExpr - - // COMMENT - COMMENT = 1, // StringLiteral - - // DELETE - EXPR = 0, // ColumnExpr - - // MOVE - // TO = 1, // TableIdentifier or StringLiteral - - // RENAME - TO = 1, // Identifier - - // TTL - CLAUSE = 0, // TTLClause - - // UPDATE - ASSIGNMENTS = 0, // AssignmentExprList - WHERE = 1, // WhereClause - }; - - enum class ClauseType - { - ADD_COLUMN, - ADD_INDEX, - ADD_PROJECTION, - ATTACH, - CLEAR_COLUMN, - CLEAR_INDEX, - CLEAR_PROJECTION, - CODEC, - COMMENT, - DELETE, - DETACH, - DROP_COLUMN, - DROP_INDEX, - DROP_PROJECTION, - DROP_PARTITION, - FREEZE_PARTITION, - MATERIALIZE_INDEX, - MATERIALIZE_PROJECTION, - MODIFY, - MOVE_PARTITION_TO_DISK, - MOVE_PARTITION_TO_TABLE, - MOVE_PARTITION_TO_VOLUME, - ORDER_BY, - REMOVE, - REMOVE_TTL, - RENAME, - REPLACE, - TTL, - UPDATE, - }; - - const ClauseType clause_type; - TableColumnPropertyType property_type = TableColumnPropertyType::ALIAS; // default value to silence PVS-Studio - union - { - bool if_exists; - bool if_not_exists; - }; - - AlterTableClause(ClauseType type, PtrList exprs); -}; - -class AlterTableQuery : public DDLQuery -{ - public: - AlterTableQuery(PtrTo cluster, PtrTo identifier, PtrTo> clauses); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - TABLE = 0, // TableIdentifier - CLAUSES = 1, // List - }; -}; - -} diff --git a/src/Parsers/New/AST/AttachQuery.cpp b/src/Parsers/New/AST/AttachQuery.cpp deleted file mode 100644 index 5fba573972b..00000000000 --- a/src/Parsers/New/AST/AttachQuery.cpp +++ /dev/null @@ -1,57 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo AttachQuery::createDictionary(PtrTo clause, PtrTo identifier) -{ - return PtrTo(new AttachQuery(clause, QueryType::DICTIONARY, {identifier})); -} - -AttachQuery::AttachQuery(PtrTo clause, QueryType type, PtrList exprs) : DDLQuery(clause, exprs), query_type(type) -{ -} - -ASTPtr AttachQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->attach = true; - - switch(query_type) - { - case QueryType::DICTIONARY: - query->is_dictionary = true; - { - auto table = get(NAME)->convertToOld(); - query->database = table->as()->getDatabaseName(); - query->table = table->as()->shortName(); - } - break; - } - - query->cluster = cluster_name; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitAttachDictionaryStmt(ClickHouseParser::AttachDictionaryStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - return AttachQuery::createDictionary(cluster, visit(ctx->tableIdentifier())); -} - -} diff --git a/src/Parsers/New/AST/AttachQuery.h b/src/Parsers/New/AST/AttachQuery.h deleted file mode 100644 index f9b495b5b46..00000000000 --- a/src/Parsers/New/AST/AttachQuery.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class AttachQuery : public DDLQuery -{ - public: - static PtrTo createDictionary(PtrTo clause, PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - }; - - enum class QueryType - { - DICTIONARY, - }; - - const QueryType query_type; - - AttachQuery(PtrTo clause, QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/CheckQuery.cpp b/src/Parsers/New/AST/CheckQuery.cpp deleted file mode 100644 index 87a7544ec34..00000000000 --- a/src/Parsers/New/AST/CheckQuery.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -CheckQuery::CheckQuery(PtrTo identifier, PtrTo clause) : Query{identifier, clause} -{ -} - -ASTPtr CheckQuery::convertToOld() const -{ - auto query = std::make_shared(); - - auto table = std::static_pointer_cast(get(NAME)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - - if (has(PARTITION)) query->partition = get(PARTITION)->convertToOld(); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCheckStmt(ClickHouseParser::CheckStmtContext *ctx) -{ - auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return std::make_shared(visit(ctx->tableIdentifier()), partition); -} - -} diff --git a/src/Parsers/New/AST/CheckQuery.h b/src/Parsers/New/AST/CheckQuery.h deleted file mode 100644 index d29d2c42acd..00000000000 --- a/src/Parsers/New/AST/CheckQuery.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CheckQuery : public Query -{ - public: - CheckQuery(PtrTo identifier, PtrTo clause); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - PARTITION = 1, // PartitionClause (optional) - }; -}; - -} diff --git a/src/Parsers/New/AST/ColumnExpr.cpp b/src/Parsers/New/AST/ColumnExpr.cpp deleted file mode 100644 index 0bfcee594cd..00000000000 --- a/src/Parsers/New/AST/ColumnExpr.cpp +++ /dev/null @@ -1,588 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::ErrorCodes -{ - extern int SYNTAX_ERROR; -} - -namespace DB::AST -{ - -// static -PtrTo ColumnExpr::createAlias(PtrTo expr, PtrTo alias) -{ - return PtrTo(new ColumnExpr(ExprType::ALIAS, {expr, alias})); -} - -// static -PtrTo ColumnExpr::createAsterisk(PtrTo identifier, bool single_column) -{ - auto expr = PtrTo(new ColumnExpr(ExprType::ASTERISK, {identifier})); - expr->expect_single_column = single_column; - return expr; -} - -// static -PtrTo ColumnExpr::createFunction(PtrTo name, PtrTo params, PtrTo args) -{ - // FIXME: make sure that all function names are camel-case. - - // Flatten some consequent binary operators to a single multi-operator, because they are left-associative. - if ((name->getName() == "or" || name->getName() == "and") && args && args->size() == 2) - { - const auto * left = (*args->begin())->as(); - const auto * right = (*++args->begin())->as(); - - if (left && left->getType() == ExprType::FUNCTION && left->getFunctionName() == name->getName()) - { - auto new_args = std::make_shared(); - for (const auto & arg : left->get(ARGS)->as()) - new_args->push(std::static_pointer_cast(arg)); - new_args->push(std::static_pointer_cast(*++args->begin())); - args = new_args; - } - else if (right && right->getType() == ExprType::FUNCTION && right->getFunctionName() == name->getName()) - { - auto new_args = std::make_shared(); - new_args->push(std::static_pointer_cast(*args->begin())); - for (const auto & arg : right->get(ARGS)->as()) - new_args->push(std::static_pointer_cast(arg)); - args = new_args; - } - } - - return PtrTo(new ColumnExpr(ExprType::FUNCTION, {name, params, args})); -} - -// static -PtrTo ColumnExpr::createIdentifier(PtrTo identifier) -{ - return PtrTo(new ColumnExpr(ExprType::IDENTIFIER, {identifier})); -} - -// static -PtrTo ColumnExpr::createLambda(PtrTo> params, PtrTo expr) -{ - return PtrTo(new ColumnExpr(ExprType::LAMBDA, {params, expr})); -} - -// static -PtrTo ColumnExpr::createLiteral(PtrTo literal) -{ - return PtrTo(new ColumnExpr(ExprType::LITERAL, {literal})); -} - -// static -PtrTo ColumnExpr::createSubquery(PtrTo query, bool scalar) -{ - if (scalar) query->shouldBeScalar(); - return PtrTo(new ColumnExpr(ExprType::SUBQUERY, {query})); -} - -ColumnExpr::ColumnExpr(ColumnExpr::ExprType type, PtrList exprs) : INode(exprs), expr_type(type) -{ -} - -ASTPtr ColumnExpr::convertToOld() const -{ - switch (expr_type) - { - case ExprType::ALIAS: - { - ASTPtr expr = get(EXPR)->convertToOld(); - - if (auto * expr_with_alias = dynamic_cast(expr.get())) - expr_with_alias->setAlias(get(ALIAS)->getName()); - else - throw std::runtime_error("Trying to convert new expression with alias to old one without alias support: " + expr->getID()); - - return expr; - } - case ExprType::ASTERISK: - if (has(TABLE)) - { - auto expr = std::make_shared(); - expr->children.push_back(get(TABLE)->convertToOld()); - return expr; - } - return std::make_shared(); - case ExprType::FUNCTION: - { - auto func = std::make_shared(); - - func->name = get(NAME)->getName(); - if (has(ARGS)) - { - func->arguments = get(ARGS)->convertToOld(); - func->children.push_back(func->arguments); - } - if (has(PARAMS)) - { - func->parameters = get(PARAMS)->convertToOld(); - func->children.push_back(func->parameters); - } - - return func; - } - case ExprType::IDENTIFIER: - return get(IDENTIFIER)->convertToOld(); - case ExprType::LAMBDA: - { - auto func = std::make_shared(); - auto tuple = std::make_shared(); - - func->name = "lambda"; - func->arguments = std::make_shared(); - func->arguments->children.push_back(tuple); - func->arguments->children.push_back(get(LAMBDA_EXPR)->convertToOld()); - func->children.push_back(func->arguments); - - tuple->name = "tuple"; - tuple->arguments = get(LAMBDA_ARGS)->convertToOld(); - tuple->children.push_back(tuple->arguments); - - return func; - } - case ExprType::LITERAL: - return get(LITERAL)->convertToOld(); - case ExprType::SUBQUERY: - { - auto subquery = std::make_shared(); - subquery->children.push_back(get(SUBQUERY)->convertToOld()); - return subquery; - } - } - __builtin_unreachable(); -} - -String ColumnExpr::toString() const -{ - switch(expr_type) - { - case ExprType::LITERAL: return get(LITERAL)->toString(); - default: return {}; - } - __builtin_unreachable(); -} - -String ColumnExpr::dumpInfo() const -{ - switch(expr_type) - { - case ExprType::ALIAS: return "ALIAS"; - case ExprType::ASTERISK: return "ASTERISK"; - case ExprType::FUNCTION: return "FUNCTION"; - case ExprType::IDENTIFIER: return "IDENTIFIER"; - case ExprType::LAMBDA: return "LAMBDA"; - case ExprType::LITERAL: return "LITERAL"; - case ExprType::SUBQUERY: return "SUBQUERY"; - } - __builtin_unreachable(); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitColumnArgExpr(ClickHouseParser::ColumnArgExprContext *ctx) -{ - if (ctx->columnExpr()) return visit(ctx->columnExpr()); - if (ctx->columnLambdaExpr()) return visit(ctx->columnLambdaExpr()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnArgList(ClickHouseParser::ColumnArgListContext *ctx) -{ - auto list = std::make_shared(); - for (auto * arg : ctx->columnArgExpr()) list->push(visit(arg)); - return list; -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprAlias(ClickHouseParser::ColumnExprAliasContext *ctx) -{ - if (ctx->AS()) return ColumnExpr::createAlias(visit(ctx->columnExpr()), visit(ctx->identifier())); - else return ColumnExpr::createAlias(visit(ctx->columnExpr()), visit(ctx->alias())); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprAnd(ClickHouseParser::ColumnExprAndContext *ctx) -{ - auto name = std::make_shared("and"); - auto args = std::make_shared(); - - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprArray(ClickHouseParser::ColumnExprArrayContext *ctx) -{ - auto name = std::make_shared("array"); - auto args = ctx->columnExprList() ? visit(ctx->columnExprList()).as>() : nullptr; - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprArrayAccess(ClickHouseParser::ColumnExprArrayAccessContext *ctx) -{ - auto name = std::make_shared("arrayElement"); - auto args = std::make_shared(); - - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprAsterisk(ClickHouseParser::ColumnExprAsteriskContext *ctx) -{ - auto table = ctx->tableIdentifier() ? visit(ctx->tableIdentifier()).as>() : nullptr; - return ColumnExpr::createAsterisk(table, true); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprBetween(ClickHouseParser::ColumnExprBetweenContext *ctx) -{ - PtrTo expr1, expr2; - - { - auto name = std::make_shared(ctx->NOT() ? "lessOrEquals" : "greaterOrEquals"); - auto args = std::make_shared(); - args->push(visit(ctx->columnExpr(0))); - args->push(visit(ctx->columnExpr(1))); - expr1 = ColumnExpr::createFunction(name, nullptr, args); - } - - { - auto name = std::make_shared(ctx->NOT() ? "greaterOrEquals" : "lessOrEquals"); - auto args = std::make_shared(); - args->push(visit(ctx->columnExpr(0))); - args->push(visit(ctx->columnExpr(2))); - expr2 = ColumnExpr::createFunction(name, nullptr, args); - } - - auto name = std::make_shared("and"); - auto args = std::make_shared(); - - args->push(expr1); - args->push(expr2); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprCase(ClickHouseParser::ColumnExprCaseContext *ctx) -{ - auto has_case_expr = (ctx->ELSE() && ctx->columnExpr().size() % 2 == 0) || (!ctx->ELSE() && ctx->columnExpr().size() % 2 == 1); - auto name = std::make_shared(has_case_expr ? "caseWithExpression" : "multiIf"); - auto args = std::make_shared(); - - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - if (!ctx->ELSE()) args->push(ColumnExpr::createLiteral(Literal::createNull())); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprCast(ClickHouseParser::ColumnExprCastContext *ctx) -{ - auto args = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - args->push(ColumnExpr::createLiteral(Literal::createString(visit(ctx->columnTypeExpr()).as>()->toString()))); - - return ColumnExpr::createFunction(std::make_shared("cast"), nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprDate(ClickHouseParser::ColumnExprDateContext *ctx) -{ - auto name = std::make_shared("toDate"); - auto args = std::make_shared(); - - args->push(ColumnExpr::createLiteral(Literal::createString(ctx->STRING_LITERAL()))); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprExtract(ClickHouseParser::ColumnExprExtractContext *ctx) -{ - String name; - auto args = std::make_shared(); - - if (ctx->interval()->SECOND()) name = "toSecond"; - else if (ctx->interval()->MINUTE()) name = "toMinute"; - else if (ctx->interval()->HOUR()) name = "toHour"; - else if (ctx->interval()->DAY()) name = "toDayOfMonth"; - else if (ctx->interval()->WEEK()) - throw Exception( - "The syntax 'EXTRACT(WEEK FROM date)' is not supported, cannot extract the number of a week", ErrorCodes::SYNTAX_ERROR); - else if (ctx->interval()->MONTH()) name = "toMonth"; - else if (ctx->interval()->QUARTER()) name = "toQuarter"; - else if (ctx->interval()->YEAR()) name = "toYear"; - else __builtin_unreachable(); - - args->push(visit(ctx->columnExpr())); - - return ColumnExpr::createFunction(std::make_shared(name), nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprFunction(ClickHouseParser::ColumnExprFunctionContext *ctx) -{ - auto name = visit(ctx->identifier()).as>(); - auto params = ctx->columnExprList() ? visit(ctx->columnExprList()).as>() : nullptr; - auto args = ctx->columnArgList() ? visit(ctx->columnArgList()).as>() : nullptr; - - if (ctx->DISTINCT()) name = std::make_shared(name->getName() + "Distinct"); - - return ColumnExpr::createFunction(name, params, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprIdentifier(ClickHouseParser::ColumnExprIdentifierContext *ctx) -{ - return ColumnExpr::createIdentifier(visit(ctx->columnIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprInterval(ClickHouseParser::ColumnExprIntervalContext *ctx) -{ - PtrTo name; - auto args = std::make_shared(); - - if (ctx->interval()->SECOND()) name = std::make_shared("toIntervalSecond"); - else if (ctx->interval()->MINUTE()) name = std::make_shared("toIntervalMinute"); - else if (ctx->interval()->HOUR()) name = std::make_shared("toIntervalHour"); - else if (ctx->interval()->DAY()) name = std::make_shared("toIntervalDay"); - else if (ctx->interval()->WEEK()) name = std::make_shared("toIntervalWeek"); - else if (ctx->interval()->MONTH()) name = std::make_shared("toIntervalMonth"); - else if (ctx->interval()->QUARTER()) name = std::make_shared("toIntervalQuarter"); - else if (ctx->interval()->YEAR()) name = std::make_shared("toIntervalYear"); - else __builtin_unreachable(); - - args->push(visit(ctx->columnExpr())); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprIsNull(ClickHouseParser::ColumnExprIsNullContext *ctx) -{ - auto name = std::make_shared(ctx->NOT() ? "isNotNull" : "isNull"); - auto args = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprList(ClickHouseParser::ColumnExprListContext *ctx) -{ - auto list = std::make_shared(); - for (auto * expr : ctx->columnsExpr()) list->push(visit(expr)); - return list; -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprLiteral(ClickHouseParser::ColumnExprLiteralContext *ctx) -{ - return ColumnExpr::createLiteral(visit(ctx->literal()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprNegate(ClickHouseParser::ColumnExprNegateContext *ctx) -{ - auto name = std::make_shared("negate"); - auto args = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprNot(ClickHouseParser::ColumnExprNotContext *ctx) -{ - auto name = std::make_shared("not"); - auto args = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprOr(ClickHouseParser::ColumnExprOrContext *ctx) -{ - auto name = std::make_shared("or"); - - auto args = std::make_shared(); - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprParens(ClickHouseParser::ColumnExprParensContext *ctx) -{ - return visit(ctx->columnExpr()); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprPrecedence1(ClickHouseParser::ColumnExprPrecedence1Context *ctx) -{ - PtrTo name; - if (ctx->ASTERISK()) name = std::make_shared("multiply"); - else if (ctx->SLASH()) name = std::make_shared("divide"); - else if (ctx->PERCENT()) name = std::make_shared("modulo"); - - auto args = std::make_shared(); - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprPrecedence2(ClickHouseParser::ColumnExprPrecedence2Context *ctx) -{ - PtrTo name; - if (ctx->PLUS()) name = std::make_shared("plus"); - else if (ctx->DASH()) name = std::make_shared("minus"); - else if (ctx->CONCAT()) name = std::make_shared("concat"); - - auto args = std::make_shared(); - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprPrecedence3(ClickHouseParser::ColumnExprPrecedence3Context *ctx) -{ - PtrTo name; - if (ctx->EQ_DOUBLE() || ctx->EQ_SINGLE()) name = std::make_shared("equals"); - else if (ctx->NOT_EQ()) name = std::make_shared("notEquals"); - else if (ctx->LE()) name = std::make_shared("lessOrEquals"); - else if (ctx->GE()) name = std::make_shared("greaterOrEquals"); - else if (ctx->LT()) name = std::make_shared("less"); - else if (ctx->GT()) name = std::make_shared("greater"); - else if (ctx->LIKE()) - { - if (ctx->NOT()) name = std::make_shared("notLike"); - else name = std::make_shared("like"); - } - else if (ctx->ILIKE()) - { - if (ctx->NOT()) name = std::make_shared("notILike"); - else name = std::make_shared("ilike"); - } - else if (ctx->IN()) - { - if (ctx->GLOBAL()) - { - if (ctx->NOT()) name = std::make_shared("globalNotIn"); - else name = std::make_shared("globalIn"); - } - else - { - if (ctx->NOT()) name = std::make_shared("notIn"); - else name = std::make_shared("in"); - } - } - - auto args = std::make_shared(); - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprSubquery(ClickHouseParser::ColumnExprSubqueryContext *ctx) -{ - // IN-operator is special since it accepts non-scalar subqueries on the right side. - auto * parent = dynamic_cast(ctx->parent); - return ColumnExpr::createSubquery(visit(ctx->selectUnionStmt()), !(parent && parent->IN())); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprSubstring(ClickHouseParser::ColumnExprSubstringContext *ctx) -{ - auto name = std::make_shared("substring"); - auto args = std::make_shared(); - - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprTernaryOp(ClickHouseParser::ColumnExprTernaryOpContext *ctx) -{ - auto name = std::make_shared("if"); - auto args = std::make_shared(); - - for (auto * expr : ctx->columnExpr()) args->push(visit(expr)); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprTimestamp(ClickHouseParser::ColumnExprTimestampContext *ctx) -{ - auto name = std::make_shared("toDateTime"); - auto args = std::make_shared(); - - args->push(ColumnExpr::createLiteral(Literal::createString(ctx->STRING_LITERAL()))); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprTrim(ClickHouseParser::ColumnExprTrimContext *ctx) -{ - auto name = std::make_shared("trim"); - auto args = std::make_shared(); - auto params = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - // TODO: params->append(Literal::createString(???)); - params->push(ColumnExpr::createLiteral(Literal::createString(ctx->STRING_LITERAL()))); - - return ColumnExpr::createFunction(name, params, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprTuple(ClickHouseParser::ColumnExprTupleContext *ctx) -{ - auto name = std::make_shared("tuple"); - auto args = visit(ctx->columnExprList()).as>(); - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnExprTupleAccess(ClickHouseParser::ColumnExprTupleAccessContext *ctx) -{ - auto name = std::make_shared("tupleElement"); - auto args = std::make_shared(); - - args->push(visit(ctx->columnExpr())); - args->push(ColumnExpr::createLiteral(Literal::createNumber(ctx->DECIMAL_LITERAL()))); - - return ColumnExpr::createFunction(name, nullptr, args); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnLambdaExpr(ClickHouseParser::ColumnLambdaExprContext *ctx) -{ - auto params = std::make_shared>(); - for (auto * id : ctx->identifier()) params->push(visit(id)); - return ColumnExpr::createLambda(params, visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnsExprAsterisk(ClickHouseParser::ColumnsExprAsteriskContext *ctx) -{ - auto table = ctx->tableIdentifier() ? visit(ctx->tableIdentifier()).as>() : nullptr; - return ColumnExpr::createAsterisk(table, false); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnsExprSubquery(ClickHouseParser::ColumnsExprSubqueryContext *ctx) -{ - return ColumnExpr::createSubquery(visit(ctx->selectUnionStmt()), false); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnsExprColumn(ClickHouseParser::ColumnsExprColumnContext *ctx) -{ - return visit(ctx->columnExpr()); -} - -} diff --git a/src/Parsers/New/AST/ColumnExpr.h b/src/Parsers/New/AST/ColumnExpr.h deleted file mode 100644 index 6de707d6b2d..00000000000 --- a/src/Parsers/New/AST/ColumnExpr.h +++ /dev/null @@ -1,82 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB::AST -{ - -class ColumnExpr : public INode -{ - public: - static PtrTo createAlias(PtrTo expr, PtrTo alias); - static PtrTo createAsterisk(PtrTo identifier, bool single_column); - static PtrTo createFunction(PtrTo name, PtrTo params, PtrTo args); - static PtrTo createIdentifier(PtrTo identifier); - static PtrTo createLambda(PtrTo> params, PtrTo expr); - static PtrTo createLiteral(PtrTo literal); - static PtrTo createSubquery(PtrTo query, bool scalar); - - enum class ExprType - { - ALIAS, - ASTERISK, - FUNCTION, - IDENTIFIER, - LAMBDA, - LITERAL, - SUBQUERY, - }; - - auto getType() const { return expr_type; }; - - // FUNCTION - auto getFunctionName() const { return get(NAME)->getName(); } - auto argumentsBegin() const { return has(ARGS) ? get(ARGS)->begin() : end(); } - auto argumentsEnd() const { return has(ARGS) ? get(ARGS)->end() : end(); } - - // LITERAL - auto getLiteral() const { return std::static_pointer_cast(get(LITERAL)); } - - ASTPtr convertToOld() const override; - String toString() const override; - - private: - enum ChildIndex : UInt8 - { - // ALIAS - EXPR = 0, // ColumnExpr - ALIAS = 1, // Identifier - - // ASTERISK - TABLE = 0, // TableIdentifier (optional) - - // IDENTIFIER - IDENTIFIER = 0, // ColumnIdentifier - - // FUNCTION - NAME = 0, // Identifier - PARAMS = 1, // ColumnParamList (optional) - ARGS = 2, // ColumnExprList (optional) - - // LAMBDA - LAMBDA_ARGS = 0, - LAMBDA_EXPR = 1, - - // LITERAL - LITERAL = 0, - - // SUBQUERY - SUBQUERY = 0, - }; - - const ExprType expr_type; - bool expect_single_column = false; - - ColumnExpr(ExprType type, PtrList exprs); - - String dumpInfo() const override; -}; - -} diff --git a/src/Parsers/New/AST/ColumnTypeExpr.cpp b/src/Parsers/New/AST/ColumnTypeExpr.cpp deleted file mode 100644 index a2947cf0f63..00000000000 --- a/src/Parsers/New/AST/ColumnTypeExpr.cpp +++ /dev/null @@ -1,166 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -EnumValue::EnumValue(PtrTo name, PtrTo value) : INode{name, value} -{ -} - -ASTPtr EnumValue::convertToOld() const -{ - auto func = std::make_shared(); - - func->name = "equals"; - func->arguments = std::make_shared(); - func->arguments->children.push_back(get(NAME)->convertToOld()); - func->arguments->children.push_back(get(VALUE)->convertToOld()); - func->children.push_back(func->arguments); - - return func; -} - -String EnumValue::toString() const -{ - return fmt::format("{} = {}", get(NAME)->toString(), get(VALUE)->toString()); -} - -// static -PtrTo ColumnTypeExpr::createSimple(PtrTo identifier) -{ - return PtrTo(new ColumnTypeExpr(ExprType::SIMPLE, {identifier})); -} - -// static -PtrTo ColumnTypeExpr::createNamed(PtrTo identifier, PtrTo type) -{ - return PtrTo(new ColumnTypeExpr(ExprType::NAMED, {identifier, type})); -} - -// static -PtrTo ColumnTypeExpr::createComplex(PtrTo identifier, PtrTo list) -{ - return PtrTo(new ColumnTypeExpr(ExprType::COMPLEX, {identifier, list})); -} - -// static -PtrTo ColumnTypeExpr::createEnum(PtrTo identifier, PtrTo list) -{ - return PtrTo(new ColumnTypeExpr(ExprType::ENUM, {identifier, list})); -} - -// static -PtrTo ColumnTypeExpr::createParam(PtrTo identifier, PtrTo list) -{ - return PtrTo(new ColumnTypeExpr(ExprType::PARAM, {identifier, list})); -} - -// static -PtrTo ColumnTypeExpr::createNested(PtrTo identifier, PtrTo list) -{ - // TODO: assert that |list| must contain only expressions of NAMED type - return PtrTo(new ColumnTypeExpr(ExprType::NESTED, {identifier, list})); -} - -ColumnTypeExpr::ColumnTypeExpr(ExprType type, PtrList exprs) : INode(exprs), expr_type(type) -{ -} - -ASTPtr ColumnTypeExpr::convertToOld() const -{ - if (expr_type == ExprType::NAMED) - { - auto pair = std::make_shared(); - - pair->name = get(NAME)->getName(); - pair->type = get(TYPE)->convertToOld(); - pair->children.push_back(pair->type); - - return pair; - } - - auto func = std::make_shared(); - - func->name = get(NAME)->getName(); - func->no_empty_args = true; - if (expr_type != ExprType::SIMPLE && has(LIST)) - { - func->arguments = get(LIST)->convertToOld(); - func->children.push_back(func->arguments); - } - - return func; -} - -String ColumnTypeExpr::toString() const -{ - switch(expr_type) - { - case ExprType::SIMPLE: - return get(NAME)->toString(); - case ExprType::NAMED: - return get(NAME)->toString() + " " + get(TYPE)->toString(); - case ExprType::COMPLEX: - case ExprType::ENUM: - case ExprType::PARAM: - case ExprType::NESTED: - return get(NAME)->toString() + "(" + (has(LIST) ? get(LIST)->toString() : "") + ")"; - } - __builtin_unreachable(); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitColumnTypeExprSimple(ClickHouseParser::ColumnTypeExprSimpleContext *ctx) -{ - return ColumnTypeExpr::createSimple(visit(ctx->identifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnTypeExprParam(ClickHouseParser::ColumnTypeExprParamContext *ctx) -{ - auto list = ctx->columnExprList() ? visit(ctx->columnExprList()).as>() : nullptr; - return ColumnTypeExpr::createParam(visit(ctx->identifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnTypeExprEnum(ClickHouseParser::ColumnTypeExprEnumContext *ctx) -{ - auto list = std::make_shared(); - for (auto * value : ctx->enumValue()) list->push(visit(value)); - return ColumnTypeExpr::createEnum(visit(ctx->identifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnTypeExprComplex(ClickHouseParser::ColumnTypeExprComplexContext *ctx) -{ - auto list = std::make_shared(); - for (auto * expr : ctx->columnTypeExpr()) list->push(visit(expr)); - return ColumnTypeExpr::createComplex(visit(ctx->identifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnTypeExprNested(ClickHouseParser::ColumnTypeExprNestedContext *ctx) -{ - auto list = std::make_shared(); - - for (size_t i = 0; i < ctx->columnTypeExpr().size(); ++i) - list->push(ColumnTypeExpr::createNamed(visit(ctx->identifier(i + 1)), visit(ctx->columnTypeExpr(i)))); - - return ColumnTypeExpr::createNested(visit(ctx->identifier(0)), list); -} - -antlrcpp::Any ParseTreeVisitor::visitEnumValue(ClickHouseParser::EnumValueContext *ctx) -{ - return std::make_shared(Literal::createString(ctx->STRING_LITERAL()), visit(ctx->numberLiteral())); -} - -} diff --git a/src/Parsers/New/AST/ColumnTypeExpr.h b/src/Parsers/New/AST/ColumnTypeExpr.h deleted file mode 100644 index 8c4f3c697e7..00000000000 --- a/src/Parsers/New/AST/ColumnTypeExpr.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include - -#include - - -namespace DB::AST -{ - -class EnumValue : public INode -{ - public: - EnumValue(PtrTo name, PtrTo value); - - ASTPtr convertToOld() const override; - String toString() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // StringLiteral - VALUE = 1, // NumberLiteral - }; -}; - -class ColumnTypeExpr : public INode -{ - public: - static PtrTo createSimple(PtrTo identifier); - static PtrTo createNamed(PtrTo identifier, PtrTo type); - static PtrTo createComplex(PtrTo identifier, PtrTo list); - static PtrTo createEnum(PtrTo identifier, PtrTo list); - static PtrTo createParam(PtrTo identifier, PtrTo list); - static PtrTo createNested(PtrTo identifier, PtrTo list); - - ASTPtr convertToOld() const override; - String toString() const override; - - private: - enum class ExprType - { - SIMPLE, - NAMED, - COMPLEX, - ENUM, - PARAM, - NESTED, - }; - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - TYPE = 1, // ColumnTypeExpr - LIST = 1, // depends on |expr_type| - }; - - ExprType expr_type; - - ColumnTypeExpr(ExprType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/CreateDatabaseQuery.cpp b/src/Parsers/New/AST/CreateDatabaseQuery.cpp deleted file mode 100644 index 9f6c79d592f..00000000000 --- a/src/Parsers/New/AST/CreateDatabaseQuery.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -CreateDatabaseQuery::CreateDatabaseQuery( - PtrTo cluster, bool if_not_exists_, PtrTo identifier, PtrTo expr) - : DDLQuery(cluster, {identifier, expr}), if_not_exists(if_not_exists_) -{ -} - -ASTPtr CreateDatabaseQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->if_not_exists = if_not_exists; - query->database = get(NAME)->getName(); - query->cluster = cluster_name; - if (has(ENGINE)) - { - auto engine = std::make_shared(); - engine->set(engine->engine, get(ENGINE)->convertToOld()); - query->set(query->storage, engine); - } - // TODO: query->uuid - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCreateDatabaseStmt(ClickHouseParser::CreateDatabaseStmtContext *ctx) -{ - auto engine = ctx->engineExpr() ? visit(ctx->engineExpr()).as>() : nullptr; - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - return std::make_shared(cluster, !!ctx->IF(), visit(ctx->databaseIdentifier()), engine); -} - -} diff --git a/src/Parsers/New/AST/CreateDatabaseQuery.h b/src/Parsers/New/AST/CreateDatabaseQuery.h deleted file mode 100644 index 3de16c3dc83..00000000000 --- a/src/Parsers/New/AST/CreateDatabaseQuery.h +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CreateDatabaseQuery: public DDLQuery -{ - public: - CreateDatabaseQuery(PtrTo cluster, bool if_not_exists, PtrTo identifier, PtrTo expr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // DatabaseIdentifier - ENGINE = 1, // EngineExpr (optional) - }; - - const bool if_not_exists; -}; - -} diff --git a/src/Parsers/New/AST/CreateDictionaryQuery.cpp b/src/Parsers/New/AST/CreateDictionaryQuery.cpp deleted file mode 100644 index 75413df495b..00000000000 --- a/src/Parsers/New/AST/CreateDictionaryQuery.cpp +++ /dev/null @@ -1,361 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - - -namespace DB::ErrorCodes -{ - extern const int SYNTAX_ERROR; -} - -namespace DB::AST -{ - -// DictionaryAttributeExpr - -DictionaryAttributeExpr::DictionaryAttributeExpr(PtrTo identifier, PtrTo type) : INode(MAX_INDEX) -{ - set(NAME, identifier); - set(TYPE, type); -} - -void DictionaryAttributeExpr::setDefaultClause(PtrTo literal) -{ - set(DEFAULT, literal); -} - -void DictionaryAttributeExpr::setExpressionClause(PtrTo expr) -{ - set(EXPRESSION, expr); -} - -ASTPtr DictionaryAttributeExpr::convertToOld() const -{ - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); - if (has(TYPE)) - { - expr->type = get(TYPE)->convertToOld(); - expr->children.push_back(expr->type); - } - if (has(DEFAULT)) - { - expr->default_value = get(DEFAULT)->convertToOld(); - expr->children.push_back(expr->default_value); - } - if (has(EXPRESSION)) - { - expr->expression = get(EXPRESSION)->convertToOld(); - expr->children.push_back(expr->expression); - } - expr->hierarchical = hierarchical; - expr->injective = injective; - expr->is_object_id = is_object_id; - - return expr; -} - -// DictionaryArgExpr - -DictionaryArgExpr::DictionaryArgExpr(PtrTo identifier, PtrTo expr) : INode{identifier, expr} -{ - if (expr->getType() != ColumnExpr::ExprType::LITERAL && expr->getType() != ColumnExpr::ExprType::IDENTIFIER - && expr->getType() != ColumnExpr::ExprType::FUNCTION) - throw DB::Exception(ErrorCodes::SYNTAX_ERROR, "Expected literal, identifier or function"); -} - -ASTPtr DictionaryArgExpr::convertToOld() const -{ - auto expr = std::make_shared(false); // FIXME: always true? - - // TODO: probably there are more variants to parse. - - expr->first = Poco::toLower(get(KEY)->getName()); - expr->set(expr->second, get(VALUE)->convertToOld()); - - return expr; -} - -// SourceClause - -SourceClause::SourceClause(PtrTo identifier, PtrTo list) : INode{identifier, list} -{ -} - -ASTPtr SourceClause::convertToOld() const -{ - auto clause = std::make_shared(true); // FIXME: always true? - - clause->name = Poco::toLower(get(NAME)->getName()); - if (has(ARGS)) - { - clause->elements = get(ARGS)->convertToOld(); - clause->children.push_back(clause->elements); - } - - return clause; -} - -// LifetimeClause - -LifetimeClause::LifetimeClause(PtrTo max, PtrTo min) : INode{max, min} -{ -} - -ASTPtr LifetimeClause::convertToOld() const -{ - auto clause = std::make_shared(); - - clause->max_sec = get(MAX)->convertToOld()->as()->value.get(); - if (has(MIN)) clause->min_sec = get(MIN)->convertToOld()->as()->value.get(); - - return clause; -} - -// LayoutClause - -LayoutClause::LayoutClause(PtrTo identifier, PtrTo list) : INode{identifier, list} -{ -} - -ASTPtr LayoutClause::convertToOld() const -{ - auto clause = std::make_shared(); - - clause->layout_type = Poco::toLower(get(NAME)->getName()); - clause->has_brackets = true; // FIXME: maybe not? - if (has(ARGS)) clause->set(clause->parameters, get(ARGS)->convertToOld()); - - return clause; -} - -// RangeClause - -RangeClause::RangeClause(PtrTo max, PtrTo min) : INode{max, min} -{ -} - -ASTPtr RangeClause::convertToOld() const -{ - auto clause = std::make_shared(); - - clause->max_attr_name = get(MAX)->getName(); - clause->min_attr_name = get(MIN)->getName(); - - return clause; -} - -// DictionarySettingsClause - -DictionarySettingsClause::DictionarySettingsClause(PtrTo list) : INode{list} -{ -} - -ASTPtr DictionarySettingsClause::convertToOld() const -{ - auto clause = std::make_shared(); - - for (const auto & child : get(LIST)->as()) - { - const auto * setting = child->as(); - clause->changes.emplace_back(setting->getName()->getName(), setting->getValue()->convertToOld()->as()->value); - } - - return clause; -} - -// DictionaryEngineClause - -DictionaryEngineClause::DictionaryEngineClause(PtrTo clause) : INode(MAX_INDEX) -{ - set(PRIMARY_KEY, clause); -} - -void DictionaryEngineClause::setSourceClause(PtrTo clause) -{ - set(SOURCE, clause); -} - -void DictionaryEngineClause::setLifetimeClause(PtrTo clause) -{ - set(LIFETIME, clause); -} - -void DictionaryEngineClause::setLayoutClause(PtrTo clause) -{ - set(LAYOUT, clause); -} - -void DictionaryEngineClause::setRangeClause(PtrTo clause) -{ - set(RANGE, clause); -} - -void DictionaryEngineClause::setSettingsClause(PtrTo clause) -{ - set(SETTINGS, clause); -} - -ASTPtr DictionaryEngineClause::convertToOld() const -{ - auto clause = std::make_shared(); - - if (has(PRIMARY_KEY)) clause->set(clause->primary_key, get(PRIMARY_KEY)->convertToOld()); - if (has(SOURCE)) clause->set(clause->source, get(SOURCE)->convertToOld()); - if (has(LIFETIME)) clause->set(clause->lifetime, get(LIFETIME)->convertToOld()); - if (has(LAYOUT)) clause->set(clause->layout, get(LAYOUT)->convertToOld()); - if (has(RANGE)) clause->set(clause->range, get(RANGE)->convertToOld()); - if (has(SETTINGS)) clause->set(clause->dict_settings, get(SETTINGS)->convertToOld()); - - return clause; -} - -// CreateDictionaryQuery - -CreateDictionaryQuery::CreateDictionaryQuery( - PtrTo cluster, - bool attach_, - bool if_not_exists_, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo engine) - : DDLQuery(cluster, {identifier, uuid, schema, engine}), attach(attach_), if_not_exists(if_not_exists_) -{ -} - -ASTPtr CreateDictionaryQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = get(NAME)->convertToOld(); - query->database = table->as()->getDatabaseName(); - query->table = table->as()->shortName(); - query->uuid = has(UUID) ? parseFromString(get(UUID)->convertToOld()->as()->value.get()) - : table->as()->uuid; - } - - query->cluster = cluster_name; - - query->is_dictionary = true; - query->attach = attach; - query->if_not_exists = if_not_exists; - - query->set(query->dictionary_attributes_list, get(SCHEMA)->convertToOld()); - query->set(query->dictionary, get(ENGINE)->convertToOld()); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCreateDictionaryStmt(ClickHouseParser::CreateDictionaryStmtContext *ctx) -{ - auto uuid = ctx->uuidClause() ? visit(ctx->uuidClause()).as>() : nullptr; - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto schema = ctx->dictionarySchemaClause() ? visit(ctx->dictionarySchemaClause()).as>() : nullptr; - auto engine = ctx->dictionaryEngineClause() ? visit(ctx->dictionaryEngineClause()).as>() : nullptr; - return std::make_shared( - cluster, !!ctx->ATTACH(), !!ctx->IF(), visit(ctx->tableIdentifier()), uuid, schema, engine); -} - -antlrcpp::Any ParseTreeVisitor::visitDictionaryArgExpr(ClickHouseParser::DictionaryArgExprContext *ctx) -{ - PtrTo expr; - if (ctx->literal()) expr = ColumnExpr::createLiteral(visit(ctx->literal())); - else if (ctx->LPAREN()) expr = ColumnExpr::createFunction(visit(ctx->identifier(1)), nullptr, nullptr); - else expr = ColumnExpr::createIdentifier(visit(ctx->identifier(1))); - return std::make_shared(visit(ctx->identifier(0)), expr); -} - -antlrcpp::Any ParseTreeVisitor::visitDictionaryAttrDfnt(ClickHouseParser::DictionaryAttrDfntContext *ctx) -{ - auto expr = std::make_shared(visit(ctx->identifier()), visit(ctx->columnTypeExpr())); - if (!ctx->DEFAULT().empty()) expr->setDefaultClause(visit(ctx->literal(0))); - if (!ctx->EXPRESSION().empty()) expr->setExpressionClause(visit(ctx->columnExpr(0))); - if (!ctx->HIERARCHICAL().empty()) expr->setHierarchicalFlag(); - if (!ctx->INJECTIVE().empty()) expr->setInjectiveFlag(); - if (!ctx->IS_OBJECT_ID().empty()) expr->setIsObjectIdFlag(); - return expr; -} - -antlrcpp::Any ParseTreeVisitor::visitDictionaryEngineClause(ClickHouseParser::DictionaryEngineClauseContext *ctx) -{ - auto primary_key - = ctx->dictionaryPrimaryKeyClause() ? visit(ctx->dictionaryPrimaryKeyClause()).as>() : nullptr; - auto clause = std::make_shared(primary_key); - if (!ctx->sourceClause().empty()) clause->setSourceClause(visit(ctx->sourceClause(0))); - if (!ctx->lifetimeClause().empty()) clause->setLifetimeClause(visit(ctx->lifetimeClause(0))); - if (!ctx->layoutClause().empty()) clause->setLayoutClause(visit(ctx->layoutClause(0))); - if (!ctx->rangeClause().empty()) clause->setRangeClause(visit(ctx->rangeClause(0))); - if (!ctx->dictionarySettingsClause().empty()) clause->setSettingsClause(visit(ctx->dictionarySettingsClause(0))); - return clause; -} - -antlrcpp::Any ParseTreeVisitor::visitDictionaryPrimaryKeyClause(ClickHouseParser::DictionaryPrimaryKeyClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitDictionarySchemaClause(ClickHouseParser::DictionarySchemaClauseContext *ctx) -{ - auto list = std::make_shared(); - for (auto * attr : ctx->dictionaryAttrDfnt()) list->push(visit(attr)); - return std::make_shared(list); -} - -antlrcpp::Any ParseTreeVisitor::visitDictionarySettingsClause(ClickHouseParser::DictionarySettingsClauseContext *ctx) -{ - return std::make_shared(visit(ctx->settingExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitLayoutClause(ClickHouseParser::LayoutClauseContext *ctx) -{ - auto list = ctx->dictionaryArgExpr().empty() ? nullptr : std::make_shared(); - for (auto * arg : ctx->dictionaryArgExpr()) list->push(visit(arg)); - return std::make_shared(visit(ctx->identifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitLifetimeClause(ClickHouseParser::LifetimeClauseContext *ctx) -{ - if (ctx->DECIMAL_LITERAL().size() == 1) return std::make_shared(Literal::createNumber(ctx->DECIMAL_LITERAL(0))); - if (ctx->MAX()->getSymbol()->getTokenIndex() < ctx->MIN()->getSymbol()->getTokenIndex()) - return std::make_shared( - Literal::createNumber(ctx->DECIMAL_LITERAL(0)), Literal::createNumber(ctx->DECIMAL_LITERAL(1))); - else - return std::make_shared( - Literal::createNumber(ctx->DECIMAL_LITERAL(1)), Literal::createNumber(ctx->DECIMAL_LITERAL(0))); -} - -antlrcpp::Any ParseTreeVisitor::visitRangeClause(ClickHouseParser::RangeClauseContext *ctx) -{ - if (ctx->MAX()->getSymbol()->getTokenIndex() < ctx->MIN()->getSymbol()->getTokenIndex()) - return std::make_shared(visit(ctx->identifier(0)), visit(ctx->identifier(1))); - else - return std::make_shared(visit(ctx->identifier(1)), visit(ctx->identifier(0))); -} - -antlrcpp::Any ParseTreeVisitor::visitSourceClause(ClickHouseParser::SourceClauseContext *ctx) -{ - auto list = ctx->dictionaryArgExpr().empty() ? nullptr : std::make_shared(); - for (auto * arg : ctx->dictionaryArgExpr()) list->push(visit(arg)); - return std::make_shared(visit(ctx->identifier()), list); -} - -} diff --git a/src/Parsers/New/AST/CreateDictionaryQuery.h b/src/Parsers/New/AST/CreateDictionaryQuery.h deleted file mode 100644 index 3c5be3f391c..00000000000 --- a/src/Parsers/New/AST/CreateDictionaryQuery.h +++ /dev/null @@ -1,183 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class DictionaryAttributeExpr : public INode -{ - public: - DictionaryAttributeExpr(PtrTo identifier, PtrTo type); - - void setDefaultClause(PtrTo literal); - void setExpressionClause(PtrTo expr); - - void setHierarchicalFlag() { hierarchical = true; } - void setInjectiveFlag() { injective = true; } - void setIsObjectIdFlag() { is_object_id = true; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - TYPE, // ColumnTypeExpr - DEFAULT, // Literal (optional) - EXPRESSION, // ColumnExpr (optional) - - MAX_INDEX, - }; - - bool hierarchical = false, injective = false, is_object_id = false; -}; - -using DictionaryPrimaryKeyClause = SimpleClause; - -using DictionarySchemaClause = SimpleClause; - -class DictionaryArgExpr : public INode -{ - public: - explicit DictionaryArgExpr(PtrTo identifier, PtrTo expr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - KEY = 0, // Identifier - VALUE, // ColumnExpr: literal, identifier or function - }; -}; - -class SourceClause : public INode -{ - public: - SourceClause(PtrTo identifier, PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - ARGS = 1, // DictionaryArgList (optional) - }; -}; - -class LifetimeClause : public INode -{ - public: - explicit LifetimeClause(PtrTo max, PtrTo min = nullptr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - MAX = 0, // NumberLiteral - MIN, // NumberLiteral (optional) - }; -}; - -class LayoutClause : public INode -{ - public: - LayoutClause(PtrTo identifier, PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - ARGS = 1, // DictionaryArgList (optional) - }; -}; - -class RangeClause : public INode -{ - public: - RangeClause(PtrTo max, PtrTo min); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - MAX = 0, // Identifier - MIN, // Identifier - }; -}; - -class DictionarySettingsClause : public INode -{ - public: - explicit DictionarySettingsClause(PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - LIST = 0, // SettingExprList - }; -}; - -class DictionaryEngineClause : public INode -{ - public: - explicit DictionaryEngineClause(PtrTo clause); - - void setSourceClause(PtrTo clause); - void setLifetimeClause(PtrTo clause); - void setLayoutClause(PtrTo clause); - void setRangeClause(PtrTo clause); - void setSettingsClause(PtrTo clause); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - PRIMARY_KEY = 0, // DictionaryPrimaryKeyClause - SOURCE, // SourceClause (optional) - LIFETIME, // LifetimeClause (optional) - LAYOUT, // LayoutClause (optional) - RANGE, // RangeClause (optional) - SETTINGS, // DictionarySettingsClause (optional) - - MAX_INDEX, - }; -}; - -class CreateDictionaryQuery : public DDLQuery -{ - public: - CreateDictionaryQuery( - PtrTo cluster, - bool attach, - bool if_not_exists, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo engine); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - UUID, // UUIDClause (optional) - SCHEMA, // DictionarySchemaClause - ENGINE, // DictionaryEngineClause - }; - - const bool attach, if_not_exists; -}; - -} diff --git a/src/Parsers/New/AST/CreateLiveViewQuery.cpp b/src/Parsers/New/AST/CreateLiveViewQuery.cpp deleted file mode 100644 index 18501884f02..00000000000 --- a/src/Parsers/New/AST/CreateLiveViewQuery.cpp +++ /dev/null @@ -1,86 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -CreateLiveViewQuery::CreateLiveViewQuery( - PtrTo cluster, - bool attach_, - bool if_not_exists_, - PtrTo identifier, - PtrTo uuid, - PtrTo timeout, - PtrTo destination, - PtrTo schema, - PtrTo query) - : DDLQuery(cluster, {identifier, uuid, timeout, destination, schema, query}), attach(attach_), if_not_exists(if_not_exists_) -{ -} - -ASTPtr CreateLiveViewQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = std::static_pointer_cast(get(NAME)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = has(UUID) ? parseFromString(get(UUID)->convertToOld()->as()->value.get()) : table->uuid; - } - - if (has(TIMEOUT)) - query->live_view_timeout.emplace(get(TIMEOUT)->convertToOld()->as()->value.get()); - - if (has(DESTINATION)) - query->to_table_id = get(DESTINATION)->convertToOld()->as()->getTableId(); - - if (has(SCHEMA)) - { - assert(get(SCHEMA)->getType() == TableSchemaClause::ClauseType::DESCRIPTION); - query->set(query->columns_list, get(SCHEMA)->convertToOld()); - } - - query->attach = attach; - query->if_not_exists = if_not_exists; - query->is_live_view = true; - query->set(query->select, get(SUBQUERY)->convertToOld()); - query->cluster = cluster_name; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCreateLiveViewStmt(ClickHouseParser::CreateLiveViewStmtContext *ctx) -{ - auto uuid = ctx->uuidClause() ? visit(ctx->uuidClause()).as>() : nullptr; - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto timeout = ctx->DECIMAL_LITERAL() ? Literal::createNumber(ctx->DECIMAL_LITERAL()) : nullptr; - auto destination = ctx->destinationClause() ? visit(ctx->destinationClause()).as>() : nullptr; - auto schema = ctx->tableSchemaClause() ? visit(ctx->tableSchemaClause()).as>() : nullptr; - if (ctx->TIMEOUT() && !timeout) timeout = Literal::createNumber(std::to_string(DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC)); - return std::make_shared( - cluster, - !!ctx->ATTACH(), - !!ctx->IF(), - visit(ctx->tableIdentifier()), - uuid, - timeout, - destination, - schema, - visit(ctx->subqueryClause())); -} - -} diff --git a/src/Parsers/New/AST/CreateLiveViewQuery.h b/src/Parsers/New/AST/CreateLiveViewQuery.h deleted file mode 100644 index dd6fe8a2528..00000000000 --- a/src/Parsers/New/AST/CreateLiveViewQuery.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CreateLiveViewQuery : public DDLQuery -{ - public: - CreateLiveViewQuery( - PtrTo cluster, - bool attach, - bool if_not_exists, - PtrTo identifier, - PtrTo uuid, - PtrTo timeout, - PtrTo destination, - PtrTo schema, - PtrTo query); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - UUID, // UUIDClause (optional) - TIMEOUT, // NumberLiteral (optional) - DESTINATION, // DestinationClause (optional) - SCHEMA, // TableSchemaClause (optional) - SUBQUERY, // SelectUnionQuery - }; - - const bool attach, if_not_exists; -}; - -} diff --git a/src/Parsers/New/AST/CreateMaterializedViewQuery.cpp b/src/Parsers/New/AST/CreateMaterializedViewQuery.cpp deleted file mode 100644 index 2b8a1b18b5f..00000000000 --- a/src/Parsers/New/AST/CreateMaterializedViewQuery.cpp +++ /dev/null @@ -1,99 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ -CreateMaterializedViewQuery::CreateMaterializedViewQuery( - PtrTo cluster, - bool attach_, - bool if_not_exists_, - bool populate_, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo destination, - PtrTo engine, - PtrTo query) - : DDLQuery(cluster, {identifier, uuid, schema, destination, engine, query}) - , attach(attach_) - , if_not_exists(if_not_exists_) - , populate(populate_) -{ - assert(!destination != !engine); -} - -ASTPtr CreateMaterializedViewQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = std::static_pointer_cast(get(NAME)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = has(UUID) ? parseFromString(get(UUID)->convertToOld()->as()->value.get()) : table->uuid; - } - - if (has(DESTINATION)) - query->to_table_id = get(DESTINATION)->convertToOld()->as()->getTableId(); - else if (has(ENGINE)) - { - query->set(query->storage, get(ENGINE)->convertToOld()); - query->is_populate = populate; - } - - if (has(SCHEMA)) - { - assert(get(SCHEMA)->getType() == TableSchemaClause::ClauseType::DESCRIPTION); - query->set(query->columns_list, get(SCHEMA)->convertToOld()); - } - - query->attach = attach; - query->if_not_exists = if_not_exists; - query->is_materialized_view = true; - query->set(query->select, get(SUBQUERY)->convertToOld()); - query->cluster = cluster_name; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCreateMaterializedViewStmt(ClickHouseParser::CreateMaterializedViewStmtContext *ctx) -{ - auto uuid = ctx->uuidClause() ? visit(ctx->uuidClause()).as>() : nullptr; - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto schema = ctx->tableSchemaClause() ? visit(ctx->tableSchemaClause()).as>() : nullptr; - auto engine = ctx->engineClause() ? visit(ctx->engineClause()).as>() : nullptr; - auto destination = ctx->destinationClause() ? visit(ctx->destinationClause()).as>() : nullptr; - return std::make_shared( - cluster, - !!ctx->ATTACH(), - !!ctx->IF(), - !!ctx->POPULATE(), - visit(ctx->tableIdentifier()), - uuid, - schema, - destination, - engine, - visit(ctx->subqueryClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitDestinationClause(ClickHouseParser::DestinationClauseContext *ctx) -{ - return std::make_shared(visit(ctx->tableIdentifier()).as>()); -} - -} diff --git a/src/Parsers/New/AST/CreateMaterializedViewQuery.h b/src/Parsers/New/AST/CreateMaterializedViewQuery.h deleted file mode 100644 index 6cd45132371..00000000000 --- a/src/Parsers/New/AST/CreateMaterializedViewQuery.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CreateMaterializedViewQuery : public DDLQuery -{ - public: - CreateMaterializedViewQuery( - PtrTo cluster, - bool attach, - bool if_not_exists, - bool populate, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo destination, - PtrTo engine, - PtrTo query); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - UUID, // UUIDClause (optional) - SCHEMA, // TableSchemaClause (optional) - DESTINATION, // DestinationClause (optional) - ENGINE, // EngineClause (optional) - SUBQUERY, // SelectUnionQuery - }; - - const bool attach, if_not_exists, populate; -}; - -} diff --git a/src/Parsers/New/AST/CreateTableQuery.cpp b/src/Parsers/New/AST/CreateTableQuery.cpp deleted file mode 100644 index 1767c08451a..00000000000 --- a/src/Parsers/New/AST/CreateTableQuery.cpp +++ /dev/null @@ -1,224 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo TableSchemaClause::createDescription(PtrTo list) -{ - return PtrTo(new TableSchemaClause(ClauseType::DESCRIPTION, {list})); -} - -// static -PtrTo TableSchemaClause::createAsTable(PtrTo identifier) -{ - return PtrTo(new TableSchemaClause(ClauseType::TABLE, {identifier})); -} - -// static -PtrTo TableSchemaClause::createAsFunction(PtrTo expr) -{ - return PtrTo(new TableSchemaClause(ClauseType::FUNCTION, {expr})); -} - -TableSchemaClause::TableSchemaClause(ClauseType type, PtrList exprs) : INode(exprs), clause_type(type) -{ -} - -ASTPtr TableSchemaClause::convertToOld() const -{ - switch(clause_type) - { - case ClauseType::DESCRIPTION: - { - auto columns = std::make_shared(); - - auto column_list = std::make_shared(); - auto constraint_list = std::make_shared(); - auto index_list = std::make_shared(); - auto projection_list = std::make_shared(); - - for (const auto & element : get(ELEMENTS)->as()) - { - switch(element->as()->getType()) - { - case TableElementExpr::ExprType::COLUMN: - column_list->children.push_back(element->convertToOld()); - break; - case TableElementExpr::ExprType::CONSTRAINT: - constraint_list->children.push_back(element->convertToOld()); - break; - case TableElementExpr::ExprType::INDEX: - index_list->children.push_back(element->convertToOld()); - break; - case TableElementExpr::ExprType::PROJECTION: - projection_list->children.push_back(element->convertToOld()); - break; - } - } - - if (!column_list->children.empty()) columns->set(columns->columns, column_list); - if (!constraint_list->children.empty()) columns->set(columns->constraints, constraint_list); - if (!index_list->children.empty()) columns->set(columns->indices, index_list); - if (!projection_list->children.empty()) columns->set(columns->projections, projection_list); - - return columns; - } - case ClauseType::FUNCTION: - case ClauseType::TABLE: - return get(EXPR)->convertToOld(); - } - __builtin_unreachable(); // FIXME: old gcc compilers complain about reaching end of non-void function -} - -String TableSchemaClause::dumpInfo() const -{ - switch(clause_type) - { - case ClauseType::DESCRIPTION: return "Description"; - case ClauseType::FUNCTION: return "Function"; - case ClauseType::TABLE: return "Table"; - } - __builtin_unreachable(); // FIXME: old gcc compilers complain about reaching end of non-void function -} - -CreateTableQuery::CreateTableQuery( - PtrTo cluster, - bool attach_, - bool temporary_, - bool if_not_exists_, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo engine, - PtrTo query) - : DDLQuery(cluster, {identifier, uuid, schema, engine, query}), attach(attach_), temporary(temporary_), if_not_exists(if_not_exists_) -{ -} - -ASTPtr CreateTableQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = get(NAME)->convertToOld(); - query->database = table->as()->getDatabaseName(); - query->table = table->as()->shortName(); - query->uuid = has(UUID) ? parseFromString(get(UUID)->convertToOld()->as()->value.get()) - : table->as()->uuid; - } - - query->cluster = cluster_name; - - query->attach = attach; - query->if_not_exists = if_not_exists; - query->temporary = temporary; - - if (has(SCHEMA)) - { - switch(get(SCHEMA)->getType()) - { - case TableSchemaClause::ClauseType::DESCRIPTION: - { - query->set(query->columns_list, get(SCHEMA)->convertToOld()); - break; - } - case TableSchemaClause::ClauseType::TABLE: - { - auto table = std::static_pointer_cast(get(SCHEMA)->convertToOld()); - query->as_database = table->getDatabaseName(); - query->as_table = table->shortName(); - break; - } - case TableSchemaClause::ClauseType::FUNCTION: - { - query->as_table_function = get(SCHEMA)->convertToOld(); - break; - } - } - } - if (has(ENGINE)) query->set(query->storage, get(ENGINE)->convertToOld()); - if (has(SUBQUERY)) query->set(query->select, get(SUBQUERY)->convertToOld()); - - return query; -} - -String CreateTableQuery::dumpInfo() const -{ - String info; - if (attach) info += "attach=true, "; - else info += "attach=false, "; - if (temporary) info += "temporary=true, "; - else info += "temporary=false, "; - if (if_not_exists) info += "if_not_exists=true"; - else info += "if_not_exists=false"; - return info; -} - -} - -namespace DB -{ - -using namespace AST; - -// TODO: assert(!(ctx->parent->TEMPORARY() ^ ctx->engineClause())) - -antlrcpp::Any ParseTreeVisitor::visitClusterClause(ClickHouseParser::ClusterClauseContext *ctx) -{ - auto literal = ctx->STRING_LITERAL() ? Literal::createString(ctx->STRING_LITERAL()) - : Literal::createString(ctx->identifier()->getText()); - return std::make_shared(literal); -} - -antlrcpp::Any ParseTreeVisitor::visitCreateTableStmt(ClickHouseParser::CreateTableStmtContext *ctx) -{ - auto uuid = ctx->uuidClause() ? visit(ctx->uuidClause()).as>() : nullptr; - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto schema = ctx->tableSchemaClause() ? visit(ctx->tableSchemaClause()).as>() : nullptr; - auto engine = ctx->engineClause() ? visit(ctx->engineClause()).as>() : nullptr; - auto query = ctx->subqueryClause() ? visit(ctx->subqueryClause()).as>() : nullptr; - return std::make_shared( - cluster, !!ctx->ATTACH(), !!ctx->TEMPORARY(), !!ctx->IF(), visit(ctx->tableIdentifier()), uuid, schema, engine, query); -} - -antlrcpp::Any ParseTreeVisitor::visitSchemaDescriptionClause(ClickHouseParser::SchemaDescriptionClauseContext *ctx) -{ - auto elems = std::make_shared(); - for (auto * elem : ctx->tableElementExpr()) elems->push(visit(elem)); - return TableSchemaClause::createDescription(elems); -} - -antlrcpp::Any ParseTreeVisitor::visitSchemaAsTableClause(ClickHouseParser::SchemaAsTableClauseContext *ctx) -{ - return TableSchemaClause::createAsTable(visit(ctx->tableIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitSchemaAsFunctionClause(ClickHouseParser::SchemaAsFunctionClauseContext *ctx) -{ - return TableSchemaClause::createAsFunction(visit(ctx->tableFunctionExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitSubqueryClause(ClickHouseParser::SubqueryClauseContext *ctx) -{ - return visit(ctx->selectUnionStmt()); -} - -antlrcpp::Any ParseTreeVisitor::visitUuidClause(ClickHouseParser::UuidClauseContext *ctx) -{ - return std::make_shared(Literal::createString(ctx->STRING_LITERAL())); -} - -} diff --git a/src/Parsers/New/AST/CreateTableQuery.h b/src/Parsers/New/AST/CreateTableQuery.h deleted file mode 100644 index 4fe19832b1d..00000000000 --- a/src/Parsers/New/AST/CreateTableQuery.h +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once - -#include -#include "Parsers/New/AST/SelectUnionQuery.h" - - -namespace DB::AST -{ - -class TableSchemaClause : public INode -{ - public: - static PtrTo createDescription(PtrTo list); - static PtrTo createAsTable(PtrTo identifier); - static PtrTo createAsFunction(PtrTo expr); - - enum class ClauseType - { - DESCRIPTION, - TABLE, - FUNCTION, - }; - - auto getType() const { return clause_type; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - // DESCRIPTION - ELEMENTS = 0, // TableElementList - - // TABLE and FUNCTION - EXPR = 0, // TableIdentifier or TableFunctionExpr - }; - - ClauseType clause_type; - - TableSchemaClause(ClauseType type, PtrList exprs); - - String dumpInfo() const override; -}; - -class CreateTableQuery : public DDLQuery -{ - public: - CreateTableQuery( - PtrTo cluster, - bool attach, - bool temporary, - bool if_not_exists, - PtrTo identifier, - PtrTo uuid, - PtrTo schema, - PtrTo engine, - PtrTo query); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - UUID, // UUIDClause (optional) - SCHEMA, // TableSchemaClause - ENGINE, // EngineClause - SUBQUERY, // SelectUnionQuery - }; - - const bool attach, temporary, if_not_exists; - - String dumpInfo() const override; -}; - -} diff --git a/src/Parsers/New/AST/CreateViewQuery.cpp b/src/Parsers/New/AST/CreateViewQuery.cpp deleted file mode 100644 index 97244e82d52..00000000000 --- a/src/Parsers/New/AST/CreateViewQuery.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ -CreateViewQuery::CreateViewQuery( - PtrTo cluster, - bool attach_, - bool replace_, - bool if_not_exists_, - PtrTo identifier, - PtrTo clause, - PtrTo query) - : DDLQuery(cluster, {identifier, clause, query}), attach(attach_), replace(replace_), if_not_exists(if_not_exists_) -{ -} - -ASTPtr CreateViewQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = std::static_pointer_cast(get(NAME)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = table->uuid; - } - - query->attach = attach; - query->replace_view = replace; - query->if_not_exists = if_not_exists; - query->is_ordinary_view = true; - query->cluster = cluster_name; - - if (has(SCHEMA)) query->set(query->columns_list, get(SCHEMA)->convertToOld()); - query->set(query->select, get(SUBQUERY)->convertToOld()); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCreateViewStmt(ClickHouseParser::CreateViewStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto schema = ctx->tableSchemaClause() ? visit(ctx->tableSchemaClause()).as>() : nullptr; - return std::make_shared( - cluster, !!ctx->ATTACH(), !!ctx->REPLACE(), !!ctx->IF(), visit(ctx->tableIdentifier()), schema, visit(ctx->subqueryClause())); -} - -} diff --git a/src/Parsers/New/AST/CreateViewQuery.h b/src/Parsers/New/AST/CreateViewQuery.h deleted file mode 100644 index 41567c30cdc..00000000000 --- a/src/Parsers/New/AST/CreateViewQuery.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CreateViewQuery : public DDLQuery -{ - public: - CreateViewQuery( - PtrTo cluster, - bool attach, - bool replace, - bool if_not_exists, - PtrTo identifier, - PtrTo clause, - PtrTo query); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - SCHEMA = 1, // TableSchemaClause (optional) - SUBQUERY = 2, // SelectUnionQuery - }; - - const bool attach, replace, if_not_exists; -}; - -} diff --git a/src/Parsers/New/AST/DDLQuery.cpp b/src/Parsers/New/AST/DDLQuery.cpp deleted file mode 100644 index 0cd06e27abe..00000000000 --- a/src/Parsers/New/AST/DDLQuery.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include - - -namespace DB::AST -{ -} diff --git a/src/Parsers/New/AST/DDLQuery.h b/src/Parsers/New/AST/DDLQuery.h deleted file mode 100644 index 6aba46d29e3..00000000000 --- a/src/Parsers/New/AST/DDLQuery.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - - -namespace DB::AST -{ - -class DDLQuery : public Query -{ - protected: - DDLQuery(PtrTo cluster, std::initializer_list list) - : Query(list), cluster_name(cluster ? cluster->convertToOld()->as()->value.get() : String{}) - { - } - - DDLQuery(PtrTo cluster, PtrList list) - : Query(list), cluster_name(cluster ? cluster->convertToOld()->as()->value.get() : String{}) - { - } - - const String cluster_name; -}; - -} diff --git a/src/Parsers/New/AST/DescribeQuery.cpp b/src/Parsers/New/AST/DescribeQuery.cpp deleted file mode 100644 index b924b1b270b..00000000000 --- a/src/Parsers/New/AST/DescribeQuery.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -DescribeQuery::DescribeQuery(PtrTo expr) : Query{expr} -{ -} - -ASTPtr DescribeQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->table_expression = get(EXPR)->convertToOld(); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitDescribeStmt(ClickHouseParser::DescribeStmtContext *ctx) -{ - return std::make_shared(visit(ctx->tableExpr()).as>()); -} - -} diff --git a/src/Parsers/New/AST/DescribeQuery.h b/src/Parsers/New/AST/DescribeQuery.h deleted file mode 100644 index e7323476a43..00000000000 --- a/src/Parsers/New/AST/DescribeQuery.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -// TODO: rewrite to -// `SELECT name, type, default_type, default_expression, comment, codec_expression, ttl_expression FROM system.columns -// WHERE database=db AND table=table` - -class DescribeQuery : public Query -{ - public: - explicit DescribeQuery(PtrTo expr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, - }; -}; - -} diff --git a/src/Parsers/New/AST/DropQuery.cpp b/src/Parsers/New/AST/DropQuery.cpp deleted file mode 100644 index 59a417a6fa9..00000000000 --- a/src/Parsers/New/AST/DropQuery.cpp +++ /dev/null @@ -1,126 +0,0 @@ -#include - -#include - -#include - -#include - - -namespace DB::AST -{ - -// static -PtrTo -DropQuery::createDropDatabase(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster) -{ - auto query = PtrTo(new DropQuery(cluster, QueryType::DATABASE, {identifier})); - query->detach = detach; - query->if_exists = if_exists; - return query; -} - -// static -PtrTo -DropQuery::createDropDictionary(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster) -{ - auto query = PtrTo(new DropQuery(cluster, QueryType::DICTIONARY, {identifier})); - query->detach = detach; - query->if_exists = if_exists; - return query; -} - -// static -PtrTo -DropQuery::createDropTable(bool detach, bool if_exists, bool temporary, PtrTo identifier, PtrTo cluster) -{ - auto query = PtrTo(new DropQuery(cluster, QueryType::TABLE, {identifier})); - query->detach = detach; - query->if_exists = if_exists; - query->temporary = temporary; - return query; -} - -// static -PtrTo -DropQuery::createDropView(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster) -{ - auto query = PtrTo(new DropQuery(cluster, QueryType::VIEW, {identifier})); - query->detach = detach; - query->if_exists = if_exists; - return query; -} - -DropQuery::DropQuery(PtrTo cluster, QueryType type, PtrList exprs) : DDLQuery(cluster, exprs), query_type(type) -{ -} - -ASTPtr DropQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->kind = detach ? ASTDropQuery::Detach : ASTDropQuery::Drop; - query->if_exists = if_exists; - query->temporary = temporary; - query->cluster = cluster_name; - - // TODO: refactor |ASTQueryWithTableAndOutput| to accept |ASTIdentifier| - switch(query_type) - { - case QueryType::DATABASE: - query->database = get(NAME)->getName(); - break; - case QueryType::DICTIONARY: - query->is_dictionary = true; - query->table = get(NAME)->getName(); - if (auto database = get(NAME)->getDatabase()) - query->database = database->getName(); - break; - case QueryType::TABLE: - { - query->table = get(NAME)->getName(); - if (auto database = get(NAME)->getDatabase()) - query->database = database->getName(); - break; - } - case QueryType::VIEW: - { - query->is_view = true; - query->table = get(NAME)->getName(); - if (auto database = get(NAME)->getDatabase()) - query->database = database->getName(); - break; - } - } - - convertToOldPartially(query); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitDropDatabaseStmt(ClickHouseParser::DropDatabaseStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - return DropQuery::createDropDatabase(!!ctx->DETACH(), !!ctx->EXISTS(), visit(ctx->databaseIdentifier()), cluster); -} - -antlrcpp::Any ParseTreeVisitor::visitDropTableStmt(ClickHouseParser::DropTableStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - if (ctx->TABLE()) - return DropQuery::createDropTable(!!ctx->DETACH(), !!ctx->EXISTS(), !!ctx->TEMPORARY(), visit(ctx->tableIdentifier()), cluster); - if (ctx->DICTIONARY()) - return DropQuery::createDropDictionary(!!ctx->DETACH(), !!ctx->EXISTS(), visit(ctx->tableIdentifier()), cluster); - if (ctx->VIEW()) - return DropQuery::createDropView(!!ctx->DETACH(), !!ctx->EXISTS(), visit(ctx->tableIdentifier()), cluster); - __builtin_unreachable(); -} - -} diff --git a/src/Parsers/New/AST/DropQuery.h b/src/Parsers/New/AST/DropQuery.h deleted file mode 100644 index cc70561e90f..00000000000 --- a/src/Parsers/New/AST/DropQuery.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class DropQuery : public DDLQuery -{ - public: - static PtrTo - createDropDatabase(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster); - static PtrTo - createDropTable(bool detach, bool if_exists, bool temporary, PtrTo identifier, PtrTo cluster); - static PtrTo - createDropDictionary(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster); - static PtrTo - createDropView(bool detach, bool if_exists, PtrTo identifier, PtrTo cluster); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, - }; - - enum class QueryType - { - DATABASE, - DICTIONARY, - TABLE, - VIEW, - }; - - const QueryType query_type; - - bool detach = false; - bool if_exists = false; - bool temporary = false; - - DropQuery(PtrTo cluster, QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/EngineExpr.cpp b/src/Parsers/New/AST/EngineExpr.cpp deleted file mode 100644 index 7127882c49d..00000000000 --- a/src/Parsers/New/AST/EngineExpr.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::ErrorCodes -{ - extern const int UNEXPECTED_AST_STRUCTURE; -} - -namespace DB::AST -{ - -EngineClause::EngineClause(PtrTo expr) : INode(MAX_INDEX) -{ - set(ENGINE, expr); -} - -void EngineClause::setOrderByClause(PtrTo clause) -{ - set(ORDER_BY, clause); -} - -void EngineClause::setPartitionByClause(PtrTo clause) -{ - set(PARTITION_BY, clause); -} - -void EngineClause::setPrimaryKeyClause(PtrTo clause) -{ - set(PRIMARY_KEY, clause); -} - -void EngineClause::setSampleByClause(PtrTo clause) -{ - set(SAMPLE_BY, clause); -} - -void EngineClause::setTTLClause(PtrTo clause) -{ - set(TTL, clause); -} - -void EngineClause::setSettingsClause(PtrTo clause) -{ - set(SETTINGS, clause); -} - -ASTPtr EngineClause::convertToOld() const -{ - auto storage = std::make_shared(); - - storage->set(storage->engine, get(ENGINE)->convertToOld()); - if (has(PARTITION_BY)) storage->set(storage->partition_by, get(PARTITION_BY)->convertToOld()); - if (has(PRIMARY_KEY)) storage->set(storage->primary_key, get(PRIMARY_KEY)->convertToOld()); - if (has(ORDER_BY)) - { - /// XXX: old parser used very strange grammar for this case, instead of using OrderByElement's. - auto expr_list = get(ORDER_BY)->convertToOld(); - if (expr_list->children.size() > 1) - throw DB::Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Cannot convert multiple ORDER expression to old AST"); - storage->set(storage->order_by, expr_list->children[0]->children[0]); - } - if (has(SAMPLE_BY)) storage->set(storage->sample_by, get(SAMPLE_BY)->convertToOld()); - if (has(TTL)) storage->set(storage->ttl_table, get(TTL)->convertToOld()); - if (has(SETTINGS)) - { - storage->set(storage->settings, get(SETTINGS)->convertToOld()); - storage->settings->is_standalone = false; - } - - return storage; -} - -EngineExpr::EngineExpr(PtrTo identifier, PtrTo args) : INode{identifier, args} -{ -} - -ASTPtr EngineExpr::convertToOld() const -{ - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); - expr->no_empty_args = true; - if (has(ARGS)) - { - expr->arguments = get(ARGS)->convertToOld(); - expr->children.push_back(expr->arguments); - } - - return expr; -} - -TTLExpr::TTLExpr(PtrTo expr, TTLType type, PtrTo literal) : INode{expr, literal}, ttl_type(type) -{ -} - -ASTPtr TTLExpr::convertToOld() const -{ - TTLMode mode = TTLMode::DELETE; - DataDestinationType destination_type = DataDestinationType::DELETE; - String destination_name; - - switch(ttl_type) - { - case TTLType::DELETE: - mode = TTLMode::DELETE; - destination_type = DataDestinationType::DELETE; - break; - case TTLType::TO_DISK: - mode = TTLMode::MOVE; - destination_type = DataDestinationType::DISK; - destination_name = get(TYPE)->convertToOld()->as()->value.get(); - break; - case TTLType::TO_VOLUME: - mode = TTLMode::MOVE; - destination_type = DataDestinationType::VOLUME; - destination_name = get(TYPE)->convertToOld()->as()->value.get(); - break; - } - - auto expr = std::make_shared(mode, destination_type, destination_name); - expr->setTTL(get(EXPR)->convertToOld()); - return expr; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitEngineClause(ClickHouseParser::EngineClauseContext *ctx) -{ - auto clause = std::make_shared(visit(ctx->engineExpr()).as>()); - - if (!ctx->orderByClause().empty()) clause->setOrderByClause(visit(ctx->orderByClause(0))); - if (!ctx->partitionByClause().empty()) clause->setPartitionByClause(visit(ctx->partitionByClause(0))); - if (!ctx->primaryKeyClause().empty()) clause->setPrimaryKeyClause(visit(ctx->primaryKeyClause(0))); - if (!ctx->sampleByClause().empty()) clause->setSampleByClause(visit(ctx->sampleByClause(0))); - if (!ctx->ttlClause().empty()) clause->setTTLClause(visit(ctx->ttlClause(0))); - if (!ctx->settingsClause().empty()) clause->setSettingsClause(visit(ctx->settingsClause(0))); - - return clause; -} - -antlrcpp::Any ParseTreeVisitor::visitEngineExpr(ClickHouseParser::EngineExprContext *ctx) -{ - auto list = ctx->columnExprList() ? visit(ctx->columnExprList()).as>() : nullptr; - return std::make_shared(visit(ctx->identifierOrNull()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitPartitionByClause(ClickHouseParser::PartitionByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitPrimaryKeyClause(ClickHouseParser::PrimaryKeyClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitSampleByClause(ClickHouseParser::SampleByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitTtlClause(ClickHouseParser::TtlClauseContext *ctx) -{ - auto list = std::make_shared(); - for (auto * expr : ctx->ttlExpr()) list->push(visit(expr)); - return std::make_shared(list); -} - -antlrcpp::Any ParseTreeVisitor::visitTtlExpr(ClickHouseParser::TtlExprContext *ctx) -{ - TTLExpr::TTLType type; - PtrTo literal; - - if (ctx->DISK()) type = TTLExpr::TTLType::TO_DISK; - else if (ctx->VOLUME()) type = TTLExpr::TTLType::TO_VOLUME; - else type = TTLExpr::TTLType::DELETE; - - if (ctx->STRING_LITERAL()) literal = Literal::createString(ctx->STRING_LITERAL()); - - return std::make_shared(visit(ctx->columnExpr()), type, literal); -} - -} diff --git a/src/Parsers/New/AST/EngineExpr.h b/src/Parsers/New/AST/EngineExpr.h deleted file mode 100644 index 809b398d834..00000000000 --- a/src/Parsers/New/AST/EngineExpr.h +++ /dev/null @@ -1,85 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -// Clauses - -using PartitionByClause = SimpleClause; - -using SampleByClause = SimpleClause; - -class EngineClause : public INode -{ - public: - explicit EngineClause(PtrTo expr); - - void setOrderByClause(PtrTo clause); - void setPartitionByClause(PtrTo clause); - void setPrimaryKeyClause(PtrTo clause); - void setSampleByClause(PtrTo clause); - void setTTLClause(PtrTo clause); - void setSettingsClause(PtrTo clause); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - ENGINE = 0, // EngineExpr - ORDER_BY, // OrderByClause (optional) - PARTITION_BY, // PartitionByClause (optional) - PRIMARY_KEY, // PrimaryKeyClause (optional) - SAMPLE_BY, // SampleByClause (optional) - TTL, // TTLClause (optional) - SETTINGS, // SettingsClause (optional) - - MAX_INDEX, - }; -}; - -// Expressions - -class EngineExpr : public INode -{ - public: - EngineExpr(PtrTo identifier, PtrTo args); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - ARGS, // ColumnExprList (optional) - }; -}; - -class TTLExpr : public INode -{ - public: - enum class TTLType - { - DELETE, - TO_DISK, - TO_VOLUME, - }; - - TTLExpr(PtrTo expr, TTLType type, PtrTo literal); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // ColumnExpr - TYPE = 1, // StringLiteral (optional) - }; - - TTLType ttl_type; -}; - -} diff --git a/src/Parsers/New/AST/ExistsQuery.cpp b/src/Parsers/New/AST/ExistsQuery.cpp deleted file mode 100644 index d2b77319dcc..00000000000 --- a/src/Parsers/New/AST/ExistsQuery.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -ExistsQuery::ExistsQuery(QueryType type, bool temporary_, PtrList exprs) - : Query(exprs), query_type(type), temporary(temporary_) -{ -} - -// static -PtrTo ExistsQuery::createTable(QueryType type, bool temporary, PtrTo identifier) -{ - return PtrTo(new ExistsQuery(type, temporary, {identifier})); -} - -// static -PtrTo ExistsQuery::createDatabase(PtrTo identifier) -{ - return PtrTo(new ExistsQuery(QueryType::DATABASE, false, {identifier})); -} - -ASTPtr ExistsQuery::convertToOld() const -{ - std::shared_ptr query; - - switch(query_type) - { - case QueryType::DATABASE: - query = std::make_shared(); - tryGetIdentifierNameInto(get(IDENTIFIER)->convertToOld(), query->database); - return query; - - case QueryType::DICTIONARY: - query = std::make_shared(); - break; - case QueryType::TABLE: - query = std::make_shared(); - break; - case QueryType::VIEW: - query = std::make_shared(); - break; - } - - // FIXME: this won't work if table doesn't exist - auto table_id = std::static_pointer_cast(get(IDENTIFIER)->convertToOld()); - query->database = table_id->getDatabaseName(); - query->table = table_id->shortName(); - query->uuid = table_id->uuid; - query->temporary = temporary; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitExistsTableStmt(ClickHouseParser::ExistsTableStmtContext *ctx) -{ - ExistsQuery::QueryType type; - if (ctx->DICTIONARY()) - type = ExistsQuery::QueryType::DICTIONARY; - else if (ctx->VIEW()) - type = ExistsQuery::QueryType::VIEW; - else // Query 'EXISTS ' is interptered as 'EXISTS TABLE ' - type = ExistsQuery::QueryType::TABLE; - - return ExistsQuery::createTable(type, !!ctx->TEMPORARY(), visit(ctx->tableIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitExistsDatabaseStmt(ClickHouseParser::ExistsDatabaseStmtContext *ctx) -{ - return ExistsQuery::createDatabase(visit(ctx->databaseIdentifier())); -} - -} diff --git a/src/Parsers/New/AST/ExistsQuery.h b/src/Parsers/New/AST/ExistsQuery.h deleted file mode 100644 index 5afe076b580..00000000000 --- a/src/Parsers/New/AST/ExistsQuery.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class ExistsQuery : public Query -{ - public: - enum class QueryType - { - DICTIONARY, - TABLE, - VIEW, - DATABASE, - }; - - static PtrTo createTable(QueryType type, bool temporary, PtrTo identifier); - static PtrTo createDatabase(PtrTo identifier); - - ExistsQuery(QueryType type, bool temporary, PtrList exprs); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - IDENTIFIER = 0, // DatabaseIdentifier or TableIdentifier - }; - - const QueryType query_type; - const bool temporary; -}; - -} diff --git a/src/Parsers/New/AST/ExplainQuery.cpp b/src/Parsers/New/AST/ExplainQuery.cpp deleted file mode 100644 index e6afd480f85..00000000000 --- a/src/Parsers/New/AST/ExplainQuery.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include - -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo ExplainQuery::createExplainAST(PtrTo query) -{ - return PtrTo(new ExplainQuery(QueryType::AST, {query})); -} - -// static -PtrTo ExplainQuery::createExplainSyntax(PtrTo query) -{ - return PtrTo(new ExplainQuery(QueryType::SYNTAX, {query})); -} - -ExplainQuery::ExplainQuery(QueryType type, PtrList exprs) : Query{exprs}, query_type(type) -{ -} - -ASTPtr ExplainQuery::convertToOld() const -{ - ASTPtr query; - - switch (query_type) - { - case QueryType::AST: - query = std::make_shared(ASTExplainQuery::ParsedAST); - break; - case QueryType::SYNTAX: - query = std::make_shared(ASTExplainQuery::AnalyzedSyntax); - break; - } - - query->as()->setExplainedQuery(get(QUERY)->convertToOld()); - - return query; -} - -} - -namespace DB -{ - -using namespace DB::AST; - -antlrcpp::Any ParseTreeVisitor::visitExplainASTStmt(ClickHouseParser::ExplainASTStmtContext *ctx) -{ - return ExplainQuery::createExplainAST(visit(ctx->query()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitExplainSyntaxStmt(ClickHouseParser::ExplainSyntaxStmtContext *ctx) -{ - return ExplainQuery::createExplainSyntax(visit(ctx->query()).as>()); -} - -} diff --git a/src/Parsers/New/AST/ExplainQuery.h b/src/Parsers/New/AST/ExplainQuery.h deleted file mode 100644 index 53bc63e7fd5..00000000000 --- a/src/Parsers/New/AST/ExplainQuery.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class ExplainQuery : public Query -{ - public: - static PtrTo createExplainAST(PtrTo query); - static PtrTo createExplainSyntax(PtrTo query); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - QUERY = 0, // Query - }; - - enum class QueryType - { - AST, - SYNTAX, - }; - - const QueryType query_type; - - ExplainQuery(QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/INode.h b/src/Parsers/New/AST/INode.h deleted file mode 100644 index 68ad774e218..00000000000 --- a/src/Parsers/New/AST/INode.h +++ /dev/null @@ -1,103 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - -#include -#include - - -namespace DB::AST -{ - -class INode : public TypePromotion -{ - public: - virtual ~INode() = default; - - virtual ASTPtr convertToOld() const { return ASTPtr(); } - virtual String toString() const { return {}; } - - void dump() const { dump(0); } - - protected: - INode() = default; - INode(std::initializer_list list) { children = list; } - explicit INode(PtrList list) { children = list; } - explicit INode(size_t size) { children.resize(size); } - - void push(const Ptr& child) { children.push_back(child); } - void set(size_t i, const Ptr& child) { children[i] = child; } - bool has(size_t i) const { return i < children.size() && children[i]; } - const Ptr & get(size_t i) const { return children[i]; } - - template - bool has(size_t i) const { return has(i) && children[i]->as(); } - - template - ChildType * get(size_t i) const { return children[i]->template as(); } - - auto begin() const { return children.cbegin(); } - auto end() const { return children.cend(); } - auto size() const { return children.size(); } - - private: - PtrList children; // any child potentially may point to |nullptr| - - void dump(int indentation) const - { - for (auto i = 0; i < indentation; ++i) std::cout << " "; - std::cout << "⭸ " << demangle(typeid(*this).name()) << " (" << dumpInfo() << ")" << std::endl; - for (const auto & child : children) if (child) child->dump(indentation + 1); - } - - virtual String dumpInfo() const { return ""; } -}; - -template -class List : public INode { - public: - List() = default; - List(std::initializer_list> list) - { - for (const auto & i : list) push(i); - } - - using INode::begin; - using INode::end; - using INode::size; - - void push(const PtrTo & node) { INode::push(node); } - - ASTPtr convertToOld() const override - { - auto list = std::make_shared(Separator); - for (const auto & child : *this) list->children.emplace_back(child->convertToOld()); - return list; - } - - String toString() const override - { - if (!size()) return {}; - - auto string = (*begin())->toString(); - - for (auto next = ++begin(); next != end(); ++next) - string += String(1, Separator) + " " + (*next)->toString(); - - return string; - } -}; - -template -class SimpleClause : public INode -{ - public: - explicit SimpleClause(PtrTo expr) : INode{expr} {} - ASTPtr convertToOld() const override { return get(0)->convertToOld(); } -}; - -} diff --git a/src/Parsers/New/AST/Identifier.cpp b/src/Parsers/New/AST/Identifier.cpp deleted file mode 100644 index 17d4cfe2b2f..00000000000 --- a/src/Parsers/New/AST/Identifier.cpp +++ /dev/null @@ -1,174 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -Identifier::Identifier(const String & name_) : name(name_) -{ - if (name.front() == '`' || name.front() == '"') - { - String s; - ReadBufferFromMemory in(name.data(), name.size()); - - if (name.front() == '`') - readBackQuotedStringWithSQLStyle(s, in); - else - readDoubleQuotedStringWithSQLStyle(s, in); - - assert(in.count() == name.size()); - name = s; - } -} - -Identifier::Identifier(const String & name_, const String & nested_name) : name(name_ + "." + nested_name) -{ -} - -ASTPtr Identifier::convertToOld() const -{ - return std::make_shared(getQualifiedName()); -} - -String Identifier::toString() const -{ - return getQualifiedName(); -} - -DatabaseIdentifier::DatabaseIdentifier(PtrTo name) : Identifier(*name) -{ -} - -TableIdentifier::TableIdentifier(PtrTo database, PtrTo name) : Identifier(*name), db(database) -{ -} - -void TableIdentifier::makeCompound() const -{ - if (db) - { - name = db->getName(); - db.reset(); - } -} - -ASTPtr TableIdentifier::convertToOld() const -{ - if (db) return std::make_shared(db->getName(), getName()); - else return std::make_shared(getName()); -} - -ColumnIdentifier::ColumnIdentifier(PtrTo table_, PtrTo name) : Identifier(name->getName()), table(table_) -{ -} - -void ColumnIdentifier::makeCompound() const -{ - if (table) - { - name = table->getName() + "." + getName(); - if (table->getDatabase()) table->makeCompound(); - else table.reset(); - } -} - -ASTPtr ColumnIdentifier::convertToOld() const -{ - std::vector parts; - - if (table) - { - if (table->getDatabase()) parts.push_back(table->getDatabase()->getName()); - parts.push_back(table->getName()); - } - parts.push_back(getName()); - - return std::make_shared(std::move(parts)); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitAlias(ClickHouseParser::AliasContext *ctx) -{ - if (ctx->IDENTIFIER()) return std::make_shared(ctx->IDENTIFIER()->getText()); - if (ctx->keywordForAlias()) return std::make_shared(ctx->keywordForAlias()->getText()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitColumnIdentifier(ClickHouseParser::ColumnIdentifierContext *ctx) -{ - auto table = ctx->tableIdentifier() ? visit(ctx->tableIdentifier()).as>() : nullptr; - return std::make_shared(table, visit(ctx->nestedIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitDatabaseIdentifier(ClickHouseParser::DatabaseIdentifierContext *ctx) -{ - return std::make_shared(visit(ctx->identifier()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitIdentifier(ClickHouseParser::IdentifierContext *ctx) -{ - if (ctx->IDENTIFIER()) return std::make_shared(ctx->IDENTIFIER()->getText()); - if (ctx->interval()) return std::make_shared(ctx->interval()->getText()); - if (ctx->keyword()) return std::make_shared(ctx->keyword()->getText()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitIdentifierOrNull(ClickHouseParser::IdentifierOrNullContext *ctx) -{ - if (ctx->identifier()) return visit(ctx->identifier()); - if (ctx->NULL_SQL()) - { - if (ctx->NULL_SQL()->getSymbol()->getText() == "Null") return std::make_shared("Null"); - else { - // TODO: raise error - } - } - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitInterval(ClickHouseParser::IntervalContext *) -{ - asm (""); // prevent symbol removal - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitKeyword(ClickHouseParser::KeywordContext *) -{ - asm (""); // prevent symbol removal - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitKeywordForAlias(ClickHouseParser::KeywordForAliasContext *) -{ - asm (""); // prevent symbol removal - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitNestedIdentifier(ClickHouseParser::NestedIdentifierContext *ctx) -{ - if (ctx->identifier().size() == 2) - { - auto name1 = visit(ctx->identifier(0)).as>()->getName(); - auto name2 = visit(ctx->identifier(1)).as>()->getName(); - return std::make_shared(name1, name2); - } - else return visit(ctx->identifier(0)); -} - -antlrcpp::Any ParseTreeVisitor::visitTableIdentifier(ClickHouseParser::TableIdentifierContext *ctx) -{ - auto database = ctx->databaseIdentifier() ? visit(ctx->databaseIdentifier()).as>() : nullptr; - return std::make_shared(database, visit(ctx->identifier())); -} - -} diff --git a/src/Parsers/New/AST/Identifier.h b/src/Parsers/New/AST/Identifier.h deleted file mode 100644 index 3d3688c30ef..00000000000 --- a/src/Parsers/New/AST/Identifier.h +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class Identifier : public INode -{ - public: - explicit Identifier(const String & name_); - Identifier(const String & name_, const String & nested_name); - - const auto & getName() const { return name; } - - ASTPtr convertToOld() const override; - String toString() const override; - - virtual String getQualifiedName() const { return name; }; - - protected: - mutable String name; // protected and non-const because identifiers may become `column.nested` from `table.column` - - String dumpInfo() const override { return getQualifiedName(); } -}; - -class DatabaseIdentifier : public Identifier -{ - public: - explicit DatabaseIdentifier(PtrTo name); -}; - -class TableIdentifier : public Identifier -{ - public: - TableIdentifier(PtrTo database, PtrTo name); - - auto getDatabase() const { return db; } - void makeCompound() const; - - String getQualifiedName() const override { return (db ? db->getQualifiedName() + "." : String()) + getName(); } - - ASTPtr convertToOld() const override; - - private: - mutable PtrTo db; -}; - -class ColumnIdentifier : public Identifier -{ - public: - ColumnIdentifier(PtrTo table, PtrTo name); - - auto getTable() const { return table; } - void makeCompound() const; - - String getQualifiedName() const override { return (table ? table->getQualifiedName() + "." : String()) + getName(); } - - ASTPtr convertToOld() const override; - - private: - mutable PtrTo table; -}; - -} diff --git a/src/Parsers/New/AST/InsertQuery.cpp b/src/Parsers/New/AST/InsertQuery.cpp deleted file mode 100644 index 905748ba441..00000000000 --- a/src/Parsers/New/AST/InsertQuery.cpp +++ /dev/null @@ -1,125 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo DataClause::createFormat(PtrTo identifier, size_t data_offset) -{ - PtrTo clause(new DataClause(ClauseType::FORMAT, {identifier})); - clause->offset = data_offset; - return clause; -} - -// static -PtrTo DataClause::createSelect(PtrTo query) -{ - return PtrTo(new DataClause(ClauseType::SELECT, {query})); -} - -// static -PtrTo DataClause::createValues(size_t data_offset) -{ - PtrTo clause(new DataClause(ClauseType::VALUES, {})); - clause->offset = data_offset; - return clause; -} - -DataClause::DataClause(ClauseType type, PtrList exprs) : INode(exprs), clause_type(type) -{ -} - -ASTPtr DataClause::convertToOld() const -{ - if (clause_type != ClauseType::SELECT) return {}; - return get(SUBQUERY)->convertToOld(); -} - -// static -PtrTo InsertQuery::createTable(PtrTo identifier, PtrTo list, PtrTo clause) -{ - return PtrTo(new InsertQuery(QueryType::TABLE, {identifier, list, clause})); -} - -// static -PtrTo InsertQuery::createFunction(PtrTo function, PtrTo list, PtrTo clause) -{ - return PtrTo(new InsertQuery(QueryType::FUNCTION, {function, list, clause})); -} - -InsertQuery::InsertQuery(QueryType type, PtrList exprs) : Query(exprs), query_type(type) -{ -} - -ASTPtr InsertQuery::convertToOld() const -{ - auto query = std::make_shared(); - - switch(query_type) - { - case QueryType::FUNCTION: - query->table_function = get(FUNCTION)->convertToOld(); - break; - case QueryType::TABLE: - query->table_id = get(IDENTIFIER)->convertToOld()->as()->getTableId(); - break; - } - - if (has(COLUMNS)) query->columns = get(COLUMNS)->convertToOld(); - if (get(DATA)->getType() == DataClause::ClauseType::SELECT) - { - query->select = get(DATA)->convertToOld(); - query->children.push_back(query->select); - } - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitColumnsClause(ClickHouseParser::ColumnsClauseContext *ctx) -{ - auto list = std::make_shared(); - for (auto * name : ctx->nestedIdentifier()) list->push(visit(name)); - return list; -} - -antlrcpp::Any ParseTreeVisitor::visitDataClauseFormat(ClickHouseParser::DataClauseFormatContext *ctx) -{ - return DataClause::createFormat(visit(ctx->identifier()), ctx->getStop()->getStopIndex() + 1); -} - -antlrcpp::Any ParseTreeVisitor::visitDataClauseSelect(ClickHouseParser::DataClauseSelectContext *ctx) -{ - return DataClause::createSelect(visit(ctx->selectUnionStmt())); -} - -antlrcpp::Any ParseTreeVisitor::visitDataClauseValues(ClickHouseParser::DataClauseValuesContext *ctx) -{ - return DataClause::createValues(ctx->getStop()->getStopIndex() + 1); -} - -antlrcpp::Any ParseTreeVisitor::visitInsertStmt(ClickHouseParser::InsertStmtContext *ctx) -{ - auto columns = ctx->columnsClause() ? visit(ctx->columnsClause()).as>() : nullptr; - - if (ctx->FUNCTION()) return InsertQuery::createFunction(visit(ctx->tableFunctionExpr()), columns, visit(ctx->dataClause())); - if (ctx->tableIdentifier()) return InsertQuery::createTable(visit(ctx->tableIdentifier()), columns, visit(ctx->dataClause())); - __builtin_unreachable(); -} - -} diff --git a/src/Parsers/New/AST/InsertQuery.h b/src/Parsers/New/AST/InsertQuery.h deleted file mode 100644 index e7543d6e875..00000000000 --- a/src/Parsers/New/AST/InsertQuery.h +++ /dev/null @@ -1,73 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class DataClause : public INode -{ - public: - enum class ClauseType - { - FORMAT, - SELECT, - VALUES, - }; - - static PtrTo createFormat(PtrTo identifier, size_t data_offset); - static PtrTo createSelect(PtrTo query); - static PtrTo createValues(size_t data_offset); - - auto getType() const { return clause_type; } - auto getOffset() const { return offset; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - FORMAT = 0, // Identifier - SUBQUERY = 0, // SelectUnionQuery - }; - - ClauseType clause_type; - size_t offset = 0; - - DataClause(ClauseType type, PtrList exprs); -}; - -class InsertQuery : public Query -{ - public: - static PtrTo createFunction(PtrTo function, PtrTo list, PtrTo clause); - static PtrTo createTable(PtrTo identifier, PtrTo list, PtrTo clause); - - bool hasData() const { return get(DATA)->getType() != DataClause::ClauseType::SELECT; } - size_t getDataOffset() const { return get(DATA)->getOffset(); } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - IDENTIFIER = 0, // TableIdentifier - FUNCTION = 0, // TableFunctionExpr - COLUMNS = 1, // ColumnNameList - DATA = 2, // DataClause - }; - enum class QueryType - { - FUNCTION, - TABLE, - }; - - QueryType query_type; - - InsertQuery(QueryType type, PtrList exprs); - - String dumpInfo() const override { return String("has_data=") + (hasData() ? "true" : "false"); } -}; - -} diff --git a/src/Parsers/New/AST/JoinExpr.cpp b/src/Parsers/New/AST/JoinExpr.cpp deleted file mode 100644 index acbeae86897..00000000000 --- a/src/Parsers/New/AST/JoinExpr.cpp +++ /dev/null @@ -1,326 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::ErrorCodes -{ - extern const int UNEXPECTED_AST_STRUCTURE; -} - -namespace DB::AST -{ - -JoinConstraintClause::JoinConstraintClause(ConstraintType type_, PtrTo list) : SimpleClause{list}, type(type_) -{ -} - -SampleClause::SampleClause(PtrTo ratio, PtrTo offset) : INode{ratio, offset} -{ -} - -ASTPtr SampleClause::convertToOld() const -{ - auto list = std::make_shared(); - - list->children.push_back(get(RATIO)->convertToOld()); - if (has(OFFSET)) list->children.push_back(get(OFFSET)->convertToOld()); - - return list; -} - -// static -PtrTo JoinExpr::createTableExpr(PtrTo expr, PtrTo clause, bool final) -{ - return PtrTo(new JoinExpr(JoinExpr::ExprType::TABLE, final, {expr, clause})); -} - -// static -PtrTo JoinExpr::createJoinOp( - PtrTo left_expr, PtrTo right_expr, JoinOpType op, JoinOpMode mode, PtrTo clause) -{ - return PtrTo(new JoinExpr(ExprType::JOIN_OP, op, mode, {left_expr, right_expr, clause})); -} - -JoinExpr::JoinExpr(JoinExpr::ExprType type, bool final_, PtrList exprs) : INode(exprs), expr_type(type), final(final_) -{ -} - -JoinExpr::JoinExpr(JoinExpr::ExprType type, JoinExpr::JoinOpType op, JoinExpr::JoinOpMode mode, PtrList exprs) - : INode(exprs), expr_type(type), op_type(op), op_mode(mode) -{ -} - -ASTPtr JoinExpr::convertToOld() const -{ - /** The sole convertible chain of Join's may look like: - * - * … FROM table1 JOIN table2 ON SMTH JOIN table3 ON SMTH JOIN … - * - * Since Join is a left-associative operation then the tree will look like: - * - * JoinExpr - * / \ - * JoinExpr … - * / \ - * JoinExpr table3 - * / \ - * table1 table2 - * - * To linearize this tree we have to start from the top-most expression. - */ - - auto list = std::make_shared(); - - if (expr_type == ExprType::TABLE) - { - auto element = std::make_shared(); - element->children.emplace_back(get(TABLE)->convertToOld()); - element->table_expression = element->children.back(); - element->table_expression->as()->final = final; - if (has(SAMPLE)) - { - auto old_list = get(SAMPLE)->convertToOld(); - - element->table_expression->as()->sample_size = old_list->children[0]; - element->table_expression->children.push_back(element->table_expression->as()->sample_size); - - if (old_list->children.size() > 1) - { - element->table_expression->as()->sample_offset = old_list->children[1]; - element->table_expression->children.push_back(element->table_expression->as()->sample_offset); - } - } - - list->children.emplace_back(element); - } - else if (expr_type == ExprType::JOIN_OP) - { - if (get(RIGHT_EXPR)->expr_type != ExprType::TABLE) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Cannot convert new tree-like JoinExpr to old AST"); - - auto left = get(LEFT_EXPR)->convertToOld(), right = get(RIGHT_EXPR)->convertToOld(); // ASTExpressionList's - list->children.insert(list->children.end(), left->children.begin(), left->children.end()); // Insert all the previously parsed left subtree - list->children.emplace_back(right->children[0]); // Insert only first (single) ASTTablesInSelectQueryElement which should contain only ASTTableExpression - - auto element = std::make_shared(); - switch (op_mode) - { - case JoinOpMode::DEFAULT: - element->locality = ASTTableJoin::Locality::Unspecified; - break; - case JoinOpMode::GLOBAL: - element->locality = ASTTableJoin::Locality::Global; - break; - case JoinOpMode::LOCAL: - element->locality = ASTTableJoin::Locality::Local; - break; - } - switch (op_type) - { - case JoinOpType::CROSS: - element->kind = ASTTableJoin::Kind::Cross; - break; - case JoinOpType::FULL: - element->kind = ASTTableJoin::Kind::Full; - break; - case JoinOpType::FULL_ALL: - element->kind = ASTTableJoin::Kind::Full; - element->strictness = ASTTableJoin::Strictness::All; - break; - case JoinOpType::FULL_ANY: - element->kind = ASTTableJoin::Kind::Full; - element->strictness = ASTTableJoin::Strictness::Any; - break; - case JoinOpType::INNER: - element->kind = ASTTableJoin::Kind::Inner; - break; - case JoinOpType::INNER_ALL: - element->kind = ASTTableJoin::Kind::Inner; - element->strictness = ASTTableJoin::Strictness::All; - break; - case JoinOpType::INNER_ANY: - element->kind = ASTTableJoin::Kind::Inner; - element->strictness = ASTTableJoin::Strictness::Any; - break; - case JoinOpType::INNER_ASOF: - element->kind = ASTTableJoin::Kind::Inner; - element->strictness = ASTTableJoin::Strictness::Asof; - break; - case JoinOpType::LEFT: - element->kind = ASTTableJoin::Kind::Left; - break; - case JoinOpType::LEFT_ALL: - element->kind = ASTTableJoin::Kind::Left; - element->strictness = ASTTableJoin::Strictness::All; - break; - case JoinOpType::LEFT_ANTI: - element->kind = ASTTableJoin::Kind::Left; - element->strictness = ASTTableJoin::Strictness::Anti; - break; - case JoinOpType::LEFT_ANY: - element->kind = ASTTableJoin::Kind::Left; - element->strictness = ASTTableJoin::Strictness::Any; - break; - case JoinOpType::LEFT_ASOF: - element->kind = ASTTableJoin::Kind::Left; - element->strictness = ASTTableJoin::Strictness::Asof; - break; - case JoinOpType::LEFT_SEMI: - element->kind = ASTTableJoin::Kind::Left; - element->strictness = ASTTableJoin::Strictness::Semi; - break; - case JoinOpType::RIGHT: - element->kind = ASTTableJoin::Kind::Right; - break; - case JoinOpType::RIGHT_ANTI: - element->kind = ASTTableJoin::Kind::Right; - element->strictness = ASTTableJoin::Strictness::Anti; - break; - case JoinOpType::RIGHT_ALL: - element->kind = ASTTableJoin::Kind::Right; - element->strictness = ASTTableJoin::Strictness::All; - break; - case JoinOpType::RIGHT_ANY: - element->kind = ASTTableJoin::Kind::Right; - element->strictness = ASTTableJoin::Strictness::Any; - break; - case JoinOpType::RIGHT_ASOF: - element->kind = ASTTableJoin::Kind::Right; - element->strictness = ASTTableJoin::Strictness::Asof; - break; - case JoinOpType::RIGHT_SEMI: - element->kind = ASTTableJoin::Kind::Right; - element->strictness = ASTTableJoin::Strictness::Semi; - break; - } - - if (has(CONSTRAINT)) - { - const auto * constraint = get(CONSTRAINT); - switch(constraint->getType()) - { - case JoinConstraintClause::ConstraintType::ON: - element->on_expression = constraint->convertToOld(); - if (element->on_expression->children.size() > 1) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Cannot convert JoinExpr with more than one ON expression"); - element->on_expression = element->on_expression->children[0]; - element->children.push_back(element->on_expression); - break; - case JoinConstraintClause::ConstraintType::USING: - element->using_expression_list = constraint->convertToOld(); - element->children.push_back(element->using_expression_list); - break; - } - } - - list->children.back()->children.emplace_back(element); - list->children.back()->as()->table_join = element; - } - - return list; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitJoinConstraintClause(ClickHouseParser::JoinConstraintClauseContext *ctx) -{ - return std::make_shared( - ctx->ON() ? JoinConstraintClause::ConstraintType::ON : JoinConstraintClause::ConstraintType::USING, - visit(ctx->columnExprList())); -} - -antlrcpp::Any ParseTreeVisitor::visitJoinExprCrossOp(ClickHouseParser::JoinExprCrossOpContext *ctx) -{ - auto [op, mode] = std::pair(visit(ctx->joinOpCross())); - - return JoinExpr::createJoinOp(visit(ctx->joinExpr(0)), visit(ctx->joinExpr(1)), op, mode, nullptr); -} - -antlrcpp::Any ParseTreeVisitor::visitJoinExprOp(ClickHouseParser::JoinExprOpContext *ctx) -{ - auto mode = JoinExpr::JoinOpMode::DEFAULT; - auto op = ctx->joinOp() ? visit(ctx->joinOp()).as() : JoinExpr::JoinOpType::INNER; - - if (ctx->GLOBAL()) mode = JoinExpr::JoinOpMode::GLOBAL; - else if (ctx->LOCAL()) mode = JoinExpr::JoinOpMode::LOCAL; - - return JoinExpr::createJoinOp(visit(ctx->joinExpr(0)), visit(ctx->joinExpr(1)), op, mode, visit(ctx->joinConstraintClause())); -} - -antlrcpp::Any ParseTreeVisitor::visitJoinExprParens(ClickHouseParser::JoinExprParensContext *ctx) -{ - return visit(ctx->joinExpr()); -} - -antlrcpp::Any ParseTreeVisitor::visitJoinExprTable(ClickHouseParser::JoinExprTableContext *ctx) -{ - auto sample = ctx->sampleClause() ? visit(ctx->sampleClause()).as>() : nullptr; - return JoinExpr::createTableExpr(visit(ctx->tableExpr()), sample, !!ctx->FINAL()); -} - -antlrcpp::Any ParseTreeVisitor::visitJoinOpCross(ClickHouseParser::JoinOpCrossContext *ctx) -{ - std::pair op{ - JoinExpr::JoinOpType::CROSS, JoinExpr::JoinOpMode::DEFAULT}; - - if (ctx->GLOBAL()) op.second = JoinExpr::JoinOpMode::GLOBAL; - else if (ctx->LOCAL()) op.second = JoinExpr::JoinOpMode::LOCAL; - - return op; -} - -antlrcpp::Any ParseTreeVisitor::visitJoinOpFull(ClickHouseParser::JoinOpFullContext *ctx) -{ - if (ctx->ALL()) return JoinExpr::JoinOpType::FULL_ALL; - if (ctx->ANY()) return JoinExpr::JoinOpType::FULL_ANY; - return JoinExpr::JoinOpType::FULL; -} - -antlrcpp::Any ParseTreeVisitor::visitJoinOpInner(ClickHouseParser::JoinOpInnerContext *ctx) -{ - if (ctx->ALL()) return JoinExpr::JoinOpType::INNER_ALL; - if (ctx->ANY()) return JoinExpr::JoinOpType::INNER_ANY; - if (ctx->ASOF()) return JoinExpr::JoinOpType::INNER_ASOF; - return JoinExpr::JoinOpType::INNER; -} - -antlrcpp::Any ParseTreeVisitor::visitJoinOpLeftRight(ClickHouseParser::JoinOpLeftRightContext *ctx) -{ - if (ctx->LEFT()) - { - if (ctx->SEMI()) return JoinExpr::JoinOpType::LEFT_SEMI; - if (ctx->ALL()) return JoinExpr::JoinOpType::LEFT_ALL; - if (ctx->ANTI()) return JoinExpr::JoinOpType::LEFT_ANTI; - if (ctx->ANY()) return JoinExpr::JoinOpType::LEFT_ANY; - if (ctx->ASOF()) return JoinExpr::JoinOpType::LEFT_ASOF; - return JoinExpr::JoinOpType::LEFT; - } - else if (ctx->RIGHT()) - { - if (ctx->SEMI()) return JoinExpr::JoinOpType::RIGHT_SEMI; - if (ctx->ALL()) return JoinExpr::JoinOpType::RIGHT_ALL; - if (ctx->ANTI()) return JoinExpr::JoinOpType::RIGHT_ANTI; - if (ctx->ANY()) return JoinExpr::JoinOpType::RIGHT_ANY; - if (ctx->ASOF()) return JoinExpr::JoinOpType::RIGHT_ASOF; - return JoinExpr::JoinOpType::RIGHT; - } - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitSampleClause(ClickHouseParser::SampleClauseContext *ctx) -{ - auto offset = ctx->ratioExpr().size() == 2 ? visit(ctx->ratioExpr(1)).as>() : nullptr; - return std::make_shared(visit(ctx->ratioExpr(0)), offset); -} - -} diff --git a/src/Parsers/New/AST/JoinExpr.h b/src/Parsers/New/AST/JoinExpr.h deleted file mode 100644 index 08117d6b6e8..00000000000 --- a/src/Parsers/New/AST/JoinExpr.h +++ /dev/null @@ -1,103 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class JoinConstraintClause : public SimpleClause -{ - public: - enum class ConstraintType - { - ON, - USING, - }; - - JoinConstraintClause(ConstraintType type, PtrTo list); - - auto getType() const { return type; } - - private: - const ConstraintType type; -}; - -class SampleClause : public INode -{ - public: - SampleClause(PtrTo ratio_, PtrTo offset_); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - RATIO = 0, // RatioExpr - OFFSET = 1, // RatioExpr (optional) - }; -}; - -class JoinExpr : public INode -{ - public: - enum class JoinOpType - { - INNER, - INNER_ALL, - INNER_ANY, - INNER_ASOF, - LEFT, - LEFT_SEMI, - LEFT_ALL, - LEFT_ANTI, - LEFT_ANY, - LEFT_ASOF, - RIGHT, - RIGHT_SEMI, - RIGHT_ALL, - RIGHT_ANTI, - RIGHT_ANY, - RIGHT_ASOF, - FULL, - FULL_ALL, - FULL_ANY, - CROSS, - }; - enum class JoinOpMode - { - DEFAULT, // actual mode depends on setting's 'distributed_product_mode' value - GLOBAL, - LOCAL, - }; - - static PtrTo createTableExpr(PtrTo expr, PtrTo clause, bool final); - static PtrTo createJoinOp(PtrTo left_expr, PtrTo right_expr, JoinOpType op, JoinOpMode mode, PtrTo clause); - - ASTPtr convertToOld() const override; // returns topologically sorted elements as ASTExpressionList - - private: - enum ChildIndex : UInt8 - { - TABLE = 0, // TableExpr - SAMPLE = 1, // SampleClause (optional) - LEFT_EXPR = 0, // JoinExpr - RIGHT_EXPR = 1, // JoinExpr - CONSTRAINT = 2, // JoinConstraintClause - }; - enum class ExprType - { - TABLE, - JOIN_OP, - }; - - const ExprType expr_type; - const JoinOpType op_type = JoinOpType::INNER; - const JoinOpMode op_mode = JoinOpMode::DEFAULT; - const bool final = false; - - JoinExpr(ExprType type, bool final, PtrList exprs); - JoinExpr(ExprType type, JoinOpType op, JoinOpMode mode, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/KillQuery.cpp b/src/Parsers/New/AST/KillQuery.cpp deleted file mode 100644 index 615b5ec3fe3..00000000000 --- a/src/Parsers/New/AST/KillQuery.cpp +++ /dev/null @@ -1,56 +0,0 @@ -#include - -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo KillQuery::createMutation(PtrTo cluster, bool sync, bool test, PtrTo where) -{ - PtrTo query(new KillQuery(cluster, QueryType::MUTATION, {where})); - query->sync = sync; - query->test = test; - return query; -} - -KillQuery::KillQuery(PtrTo cluster, QueryType type, PtrList exprs) : DDLQuery(cluster, exprs), query_type(type) -{ -} - -ASTPtr KillQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->cluster = cluster_name; - - switch(query_type) - { - case QueryType::MUTATION: - query->type = ASTKillQueryQuery::Type::Mutation; - query->sync = sync; - query->test = test; - query->where_expression = get(WHERE)->convertToOld(); - query->children.push_back(query->where_expression); - break; - } - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitKillMutationStmt(ClickHouseParser::KillMutationStmtContext * ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - return KillQuery::createMutation(cluster, !!ctx->SYNC(), !!ctx->TEST(), visit(ctx->whereClause())); -} - -} diff --git a/src/Parsers/New/AST/KillQuery.h b/src/Parsers/New/AST/KillQuery.h deleted file mode 100644 index 61a73599cec..00000000000 --- a/src/Parsers/New/AST/KillQuery.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class KillQuery : public DDLQuery -{ - public: - static PtrTo createMutation(PtrTo cluster, bool sync, bool test, PtrTo where); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - WHERE = 0, // WhereClause - }; - - enum class QueryType - { - MUTATION, - }; - - const QueryType query_type; - bool sync = false, test = false; - - KillQuery(PtrTo cluster, QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/LimitExpr.cpp b/src/Parsers/New/AST/LimitExpr.cpp deleted file mode 100644 index b41c56d21f9..00000000000 --- a/src/Parsers/New/AST/LimitExpr.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include - -#include -#include - - -namespace DB::AST -{ - -LimitExpr::LimitExpr(PtrTo limit, PtrTo offset) : INode{limit, offset} -{ -} - -ASTPtr LimitExpr::convertToOld() const -{ - auto list = std::make_shared(); - - if (has(OFFSET)) list->children.push_back(get(OFFSET)->convertToOld()); - list->children.push_back(get(LIMIT)->convertToOld()); - - return list; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitLimitExpr(ClickHouseParser::LimitExprContext *ctx) -{ - if (ctx->columnExpr().size() == 2) - return std::make_shared(visit(ctx->columnExpr(0)), visit(ctx->columnExpr(1))); - else - return std::make_shared(visit(ctx->columnExpr(0)).as>()); -} - -} diff --git a/src/Parsers/New/AST/LimitExpr.h b/src/Parsers/New/AST/LimitExpr.h deleted file mode 100644 index 986806c2bd9..00000000000 --- a/src/Parsers/New/AST/LimitExpr.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class LimitExpr : public INode -{ - public: - explicit LimitExpr(PtrTo limit, PtrTo offset = nullptr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - LIMIT = 0, // ColumnExpr - OFFSET = 1, // ColumnExpr (optional) - }; -}; - -} diff --git a/src/Parsers/New/AST/Literal.cpp b/src/Parsers/New/AST/Literal.cpp deleted file mode 100644 index 30aacd3d590..00000000000 --- a/src/Parsers/New/AST/Literal.cpp +++ /dev/null @@ -1,222 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo Literal::createNull() -{ - return PtrTo(new Literal(LiteralType::NULL_LITERAL, String())); -} - -// static -PtrTo Literal::createNumber(antlr4::tree::TerminalNode * literal, bool negative) -{ - auto number = std::make_shared(literal); - if (negative) number->makeNegative(); - return number; -} - -// static -PtrTo Literal::createNumber(const String & literal) -{ - bool has_minus = literal[0] == '-'; - auto number = std::make_shared(has_minus ? literal.substr(1) : literal); - if (has_minus) number->makeNegative(); - return number; -} - -// static -PtrTo Literal::createString(antlr4::tree::TerminalNode * literal) -{ - return std::make_shared(literal); -} - -// static -PtrTo Literal::createString(const String & literal) -{ - return std::make_shared(literal); -} - -Literal::Literal(LiteralType type_, const String & token_) : token(token_), type(type_) -{ -} - -ASTPtr Literal::convertToOld() const -{ - auto as_field = [this] () -> Field - { - switch(type) - { - case LiteralType::NULL_LITERAL: - return Field(Null()); - case LiteralType::NUMBER: - { - const auto * number = this->as(); - - if (!number->isNegative()) - if (auto value = number->as()) return Field(*value); - if (auto value = number->as()) return Field(*value); - if (auto value = number->as()) return Field(*value); - - return Field(); - } - case LiteralType::STRING: - return asString(); - } - __builtin_unreachable(); - }; - - return std::make_shared(as_field()); -} - -String Literal::toString() const -{ - WriteBufferFromOwnString wb; - writeEscapedString(token, wb); - return type == LiteralType::STRING ? "'" + wb.str() + "'" : wb.str(); -} - -NumberLiteral::NumberLiteral(antlr4::tree::TerminalNode * literal) : Literal(LiteralType::NUMBER, literal->getSymbol()->getText()) -{ -} - -NumberLiteral::NumberLiteral(const String & literal) : Literal(LiteralType::NUMBER, literal) -{ -} - -String NumberLiteral::toString() const -{ - return (minus ? String("-") : String()) + Literal::toString(); -} - -ASTSampleRatio::Rational NumberLiteral::convertToOldRational() const -{ - UInt64 num_before = 0; - UInt64 num_after = 0; - Int64 exponent = 0; - - const char * pos = token.data(), * end = token.data() + token.size(); - const char * pos_after_first_num = tryReadIntText(num_before, pos, end); - - bool has_num_before_point [[maybe_unused]] = pos_after_first_num > pos; - pos = pos_after_first_num; - bool has_point = pos < end && *pos == '.'; - - if (has_point) - ++pos; - - assert (has_num_before_point || has_point); - - size_t number_of_digits_after_point = 0; - - if (has_point) - { - const char * pos_after_second_num = tryReadIntText(num_after, pos, end); - number_of_digits_after_point = pos_after_second_num - pos; - pos = pos_after_second_num; - } - - bool has_exponent = pos < end && (*pos == 'e' || *pos == 'E'); - - if (has_exponent) - { - ++pos; - const char * pos_after_exponent [[maybe_unused]] = tryReadIntText(exponent, pos, end); - assert (pos_after_exponent != pos); - } - - ASTSampleRatio::Rational res; - res.numerator = num_before * intExp10(number_of_digits_after_point) + num_after; - res.denominator = intExp10(number_of_digits_after_point); - - if (exponent > 0) - res.numerator *= intExp10(exponent); - if (exponent < 0) - res.denominator *= intExp10(-exponent); - - return res; -} - -StringLiteral::StringLiteral(antlr4::tree::TerminalNode * literal) : Literal(LiteralType::STRING, literal->getSymbol()->getText()) -{ - String s; - ReadBufferFromMemory in(token.data(), token.size()); - - readQuotedStringWithSQLStyle(s, in); - - assert(in.count() == token.size()); - token = s; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitFloatingLiteral(ClickHouseParser::FloatingLiteralContext * ctx) -{ - if (ctx->FLOATING_LITERAL()) return Literal::createNumber(ctx->FLOATING_LITERAL()); - - const auto * dot = ctx->DOT()->getSymbol(); - - if (!ctx->DECIMAL_LITERAL().empty()) - { - // .1234 - if (dot->getTokenIndex() < ctx->DECIMAL_LITERAL(0)->getSymbol()->getTokenIndex()) - return Literal::createNumber(dot->getText() + ctx->DECIMAL_LITERAL(0)->getSymbol()->getText()); - // 1234. - else if (ctx->DECIMAL_LITERAL().size() == 1 && !ctx->OCTAL_LITERAL()) - return Literal::createNumber(ctx->DECIMAL_LITERAL(0)->getSymbol()->getText() + dot->getText()); - // 1234.1234 - else if (ctx->DECIMAL_LITERAL().size() == 2) - return Literal::createNumber( - ctx->DECIMAL_LITERAL(0)->getSymbol()->getText() + dot->getText() + ctx->DECIMAL_LITERAL(1)->getSymbol()->getText()); - // 1234.0123 - else - return Literal::createNumber( - ctx->DECIMAL_LITERAL(0)->getSymbol()->getText() + dot->getText() + ctx->OCTAL_LITERAL()->getSymbol()->getText()); - } - else - // .0123 - return Literal::createNumber(dot->getText() + ctx->OCTAL_LITERAL()->getSymbol()->getText()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitLiteral(ClickHouseParser::LiteralContext * ctx) -{ - if (ctx->NULL_SQL()) - return Literal::createNull(); - if (ctx->STRING_LITERAL()) - return std::static_pointer_cast(Literal::createString(ctx->STRING_LITERAL())); - if (ctx->numberLiteral()) - return std::static_pointer_cast(visit(ctx->numberLiteral()).as>()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitNumberLiteral(ClickHouseParser::NumberLiteralContext *ctx) -{ - if (ctx->floatingLiteral()) - { - auto number = visit(ctx->floatingLiteral()).as>(); - if (ctx->DASH()) number->makeNegative(); - return number; - } - if (ctx->OCTAL_LITERAL()) return Literal::createNumber(ctx->OCTAL_LITERAL(), !!ctx->DASH()); - if (ctx->DECIMAL_LITERAL()) return Literal::createNumber(ctx->DECIMAL_LITERAL(), !!ctx->DASH()); - if (ctx->HEXADECIMAL_LITERAL()) return Literal::createNumber(ctx->HEXADECIMAL_LITERAL(), !!ctx->DASH()); - if (ctx->INF()) return Literal::createNumber(ctx->INF(), !!ctx->DASH()); - if (ctx->NAN_SQL()) return Literal::createNumber(ctx->NAN_SQL()); - __builtin_unreachable(); -} - -} diff --git a/src/Parsers/New/AST/Literal.h b/src/Parsers/New/AST/Literal.h deleted file mode 100644 index 75790daed20..00000000000 --- a/src/Parsers/New/AST/Literal.h +++ /dev/null @@ -1,96 +0,0 @@ -#pragma once - -#include - -#include -#include - -#include -#include - -#include - - -namespace DB::AST -{ - -class Literal : public INode -{ - public: - enum class LiteralType - { - NULL_LITERAL, - NUMBER, - STRING, - }; - - static PtrTo createNull(); - static PtrTo createNumber(antlr4::tree::TerminalNode * literal, bool negative = false); - static PtrTo createNumber(const String& literal); // checks first symbol for '-' character - static PtrTo createString(antlr4::tree::TerminalNode * literal); - static PtrTo createString(const String& literal); // without quotes - - ASTPtr convertToOld() const override; - String toString() const override; - - bool is(LiteralType what) const { return type == what; } - - protected: - String token; // STRING is stored without quotes and interpolated with escape-sequences. - - Literal(LiteralType type, const String & token); - - template - std::optional asNumber(bool minus) const - { - T number; - std::stringstream ss(String(minus ? "-" : "+") + token); - if (token.size() > 2 && (token[1] == 'x' || token[1] == 'X')) ss >> std::hex >> number; - else if (token.size() > 1 && (token[0] == '0')) ss >> std::oct >> number; - else ss >> number; - if (ss.fail() || !ss.eof()) - return {}; - return number; - } - - auto asString() const { return token; } - - private: - LiteralType type; - - String dumpInfo() const override { return token; } -}; - -class NumberLiteral : public Literal -{ - public: - explicit NumberLiteral(antlr4::tree::TerminalNode * literal); - explicit NumberLiteral(const String & literal); - - String toString() const override; - - void makeNegative() { minus = true; } - bool isNegative() const { return minus; } - - template std::optional as() const { return asNumber(minus); } - - ASTSampleRatio::Rational convertToOldRational() const; - - private: - bool minus = false; -}; - -class StringLiteral : public Literal -{ - public: - explicit StringLiteral(antlr4::tree::TerminalNode * literal); - explicit StringLiteral(const String & literal) : Literal(LiteralType::STRING, literal) {} - - template - T as() const - { - return asString(); - } -}; - -} diff --git a/src/Parsers/New/AST/OptimizeQuery.cpp b/src/Parsers/New/AST/OptimizeQuery.cpp deleted file mode 100644 index 5977a2221b9..00000000000 --- a/src/Parsers/New/AST/OptimizeQuery.cpp +++ /dev/null @@ -1,59 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -OptimizeQuery::OptimizeQuery(PtrTo cluster, PtrTo identifier, PtrTo clause, bool final_, bool deduplicate_) - : DDLQuery(cluster, {identifier, clause}), final(final_), deduplicate(deduplicate_) -{ -} - -ASTPtr OptimizeQuery::convertToOld() const -{ - auto query = std::make_shared(); - - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = table->uuid; - } - - if (has(PARTITION)) - { - query->partition = get(PARTITION)->convertToOld(); - query->children.push_back(query->partition); - } - - query->final = final; - query->deduplicate = deduplicate; - query->cluster = cluster_name; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitOptimizeStmt(ClickHouseParser::OptimizeStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - auto clause = ctx->partitionClause() ? visit(ctx->partitionClause()).as>() : nullptr; - return std::make_shared(cluster, visit(ctx->tableIdentifier()), clause, !!ctx->FINAL(), !!ctx->DEDUPLICATE()); -} - -} diff --git a/src/Parsers/New/AST/OptimizeQuery.h b/src/Parsers/New/AST/OptimizeQuery.h deleted file mode 100644 index b94351518a7..00000000000 --- a/src/Parsers/New/AST/OptimizeQuery.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class OptimizeQuery : public DDLQuery -{ - public: - OptimizeQuery( - PtrTo cluster, PtrTo identifier, PtrTo clause, bool final, bool deduplicate); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - TABLE = 0, // TableIdentifier - PARTITION, // PartitionClause - }; - - const bool final, deduplicate; -}; - -} diff --git a/src/Parsers/New/AST/OrderExpr.cpp b/src/Parsers/New/AST/OrderExpr.cpp deleted file mode 100644 index 8511bc23276..00000000000 --- a/src/Parsers/New/AST/OrderExpr.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include - -#include -#include -#include -#include - - -namespace DB::AST -{ - -OrderExpr::OrderExpr(PtrTo expr, NullsOrder nulls_, PtrTo collate, bool ascending) - : INode{expr, collate}, nulls(nulls_), asc(ascending) -{ -} - -ASTPtr OrderExpr::convertToOld() const -{ - auto expr = std::make_shared(); - - expr->children.push_back(get(EXPR)->convertToOld()); - expr->direction = asc ? 1 : -1; - expr->nulls_direction_was_explicitly_specified = (nulls != NATURAL); - if (nulls == NATURAL) expr->nulls_direction = expr->direction; - else expr->nulls_direction = (nulls == NULLS_LAST) ? expr->direction : -expr->direction; - - if (has(COLLATE)) - { - expr->collation = get(COLLATE)->convertToOld(); - expr->children.push_back(expr->collation); - } - - // TODO: WITH FILL? - - return expr; -} - -} - -namespace DB -{ - -antlrcpp::Any ParseTreeVisitor::visitOrderExprList(ClickHouseParser::OrderExprListContext *ctx) -{ - auto expr_list = std::make_shared(); - for (auto* expr : ctx->orderExpr()) expr_list->push(visit(expr)); - return expr_list; -} - -antlrcpp::Any ParseTreeVisitor::visitOrderExpr(ClickHouseParser::OrderExprContext *ctx) -{ - AST::OrderExpr::NullsOrder nulls = AST::OrderExpr::NATURAL; - if (ctx->FIRST()) nulls = AST::OrderExpr::NULLS_FIRST; - else if (ctx->LAST()) nulls = AST::OrderExpr::NULLS_LAST; - - AST::PtrTo collate; - if (ctx->COLLATE()) collate = AST::Literal::createString(ctx->STRING_LITERAL()); - - return std::make_shared(visit(ctx->columnExpr()), nulls, collate, !ctx->DESCENDING() && !ctx->DESC()); -} - -} diff --git a/src/Parsers/New/AST/OrderExpr.h b/src/Parsers/New/AST/OrderExpr.h deleted file mode 100644 index 2c13e7f5298..00000000000 --- a/src/Parsers/New/AST/OrderExpr.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class OrderExpr : public INode -{ - public: - enum NullsOrder { - NATURAL, - NULLS_FIRST, - NULLS_LAST, - }; - - OrderExpr(PtrTo expr, NullsOrder nulls_, PtrTo collate, bool ascending = true); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // ColumnExpr - COLLATE, // StringLiteral (optional) - }; - - NullsOrder nulls; - bool asc; -}; - -} diff --git a/src/Parsers/New/AST/Query.cpp b/src/Parsers/New/AST/Query.cpp deleted file mode 100644 index 1ef2ee935b6..00000000000 --- a/src/Parsers/New/AST/Query.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include - -#include -#include - - -namespace DB::AST -{ - -void Query::setOutFile(PtrTo literal) -{ - out_file = literal; -} - -void Query::setFormat(PtrTo id) -{ - format = id; -} - -void Query::convertToOldPartially(const std::shared_ptr & query) const -{ - if (out_file) - { - query->out_file = out_file->convertToOld(); - query->children.push_back(query->out_file); - } - if (format) - { - query->format = format->convertToOld(); - query->children.push_back(query->format); - } -} - -} diff --git a/src/Parsers/New/AST/Query.h b/src/Parsers/New/AST/Query.h deleted file mode 100644 index 2998d1f0146..00000000000 --- a/src/Parsers/New/AST/Query.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include - -#include - - -namespace DB::AST -{ - -class Query : public INode { - public: - void setOutFile(PtrTo literal); - void setFormat(PtrTo id); - - protected: - Query() = default; - Query(std::initializer_list list) : INode(list) {} - explicit Query(PtrList list) : INode(list) {} - - void convertToOldPartially(const std::shared_ptr & query) const; - - private: - // TODO: put them to |children| - PtrTo out_file; - PtrTo format; -}; - -} diff --git a/src/Parsers/New/AST/README.md b/src/Parsers/New/AST/README.md deleted file mode 100644 index 4216a8dcfdc..00000000000 --- a/src/Parsers/New/AST/README.md +++ /dev/null @@ -1,32 +0,0 @@ -What is AST? -=== -AST stands for Abstract Syntax Tree, which is opposed to Concrete Syntax Tree (or Parse Tree). Read [this](https://eli.thegreenplace.net/2009/02/16/abstract-vs-concrete-syntax-trees/) post to get a sketchy overview of the difference between two concepts. - -AST **must not** repeat the grammar constructions or follow them. It's convenient to have similar structure but nothing more. -The main purpose of AST is to be easily handled by interpreter - the formatting of the original query is not the purpose of AST. - -Basic principles in code -=== - -- The base class for all AST elements is `INode` (INode.h). -- **All** sub-elements must be stored inside `INode::children` vector in a -**predetermined order** and with **predetermined type**: some elements may be `nullptr` to preserve positions of other elements. -- The order may be defined as a position in vector from the start, the last element, and some pattern of variable number of elements -in between. It's convenient to define `enum ChildIndex : Uint8 {…}` with index numbers for each class. -- If there is more than one variable pack of elements or the order can't be deterministic, then wrap elements into the lists and store the -multi-level structure (see `ColumnExpr::ExprType::FUNCTION` for example). -- Don't do multi-level structure just for nothing or to mimic the parse tree: the less is depth the better. -- The whole grammar separates expressions for databases, tables and columns. That way we already assess the semantics on the parser level. -E.g. don't use `identifier` where you know you should use `tableIdentifier`, etc. - -Name conventions -=== - -**Query**. The top-level element that allows to distinguish different types of SQL queries. The base class is `Query` (Query.h). - -**Statement**. An essential part of a query that describes its structure and possible alternatives. - -**Clause**. A part of the statement designed to differ logical parts for more convenient parsing. I.e. there are many clauses in SELECT statement that are optional and contain `columnExpr` elements. Without clauses it will be hard for visitor to distinguish which `columnExpr` refers to what. - -**Expression**. An element that should be somehow calculated or interpreted and result in some value. -** diff --git a/src/Parsers/New/AST/RatioExpr.cpp b/src/Parsers/New/AST/RatioExpr.cpp deleted file mode 100644 index b9f56928227..00000000000 --- a/src/Parsers/New/AST/RatioExpr.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -RatioExpr::RatioExpr(PtrTo num1, PtrTo num2) : INode{num1, num2} -{ -} - -ASTPtr RatioExpr::convertToOld() const -{ - auto numerator = get(NUMERATOR)->convertToOldRational(); - - if (has(DENOMINATOR)) - { - auto denominator = get(DENOMINATOR)->convertToOldRational(); - - numerator.numerator = numerator.numerator * denominator.denominator; - numerator.denominator = numerator.denominator * denominator.numerator; - } - - return std::make_shared(numerator); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitRatioExpr(ClickHouseParser::RatioExprContext *ctx) -{ - auto denominator = ctx->numberLiteral().size() == 2 ? visit(ctx->numberLiteral(1)).as>() : nullptr; - return std::make_shared(visit(ctx->numberLiteral(0)), denominator); -} - -} diff --git a/src/Parsers/New/AST/RatioExpr.h b/src/Parsers/New/AST/RatioExpr.h deleted file mode 100644 index 8e48edbf6ea..00000000000 --- a/src/Parsers/New/AST/RatioExpr.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class RatioExpr : public INode -{ - public: - RatioExpr(PtrTo num1, PtrTo num2); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NUMERATOR = 0, // NumberLiteral - DENOMINATOR = 1, // NumberLiteral (optional) - }; -}; - -} diff --git a/src/Parsers/New/AST/RenameQuery.cpp b/src/Parsers/New/AST/RenameQuery.cpp deleted file mode 100644 index 78a4530a20f..00000000000 --- a/src/Parsers/New/AST/RenameQuery.cpp +++ /dev/null @@ -1,58 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -RenameQuery::RenameQuery(PtrTo cluster, PtrTo> list) : DDLQuery(cluster, {list}) -{ -} - -ASTPtr RenameQuery::convertToOld() const -{ - auto query = std::make_shared(); - - for (auto table = get>(EXPRS)->begin(), end = get>(EXPRS)->end(); table != end; ++table) - { - ASTRenameQuery::Element element; - - if (auto database = (*table)->as()->getDatabase()) - element.from.database = database->getName(); - element.from.table = (*table)->as()->getName(); - - ++table; - - if (auto database = (*table)->as()->getDatabase()) - element.to.database = database->getName(); - element.to.table = (*table)->as()->getName(); - - query->elements.push_back(element); - } - - query->cluster = cluster_name; - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitRenameStmt(ClickHouseParser::RenameStmtContext *ctx) -{ - auto list = std::make_shared>(); - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - for (auto * identifier : ctx->tableIdentifier()) list->push(visit(identifier)); - return std::make_shared(cluster, list); -} - -} diff --git a/src/Parsers/New/AST/RenameQuery.h b/src/Parsers/New/AST/RenameQuery.h deleted file mode 100644 index 74909043d4d..00000000000 --- a/src/Parsers/New/AST/RenameQuery.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class RenameQuery : public DDLQuery -{ - public: - explicit RenameQuery(PtrTo cluster, PtrTo> list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPRS = 0, // List - }; -}; - -} diff --git a/src/Parsers/New/AST/SelectUnionQuery.cpp b/src/Parsers/New/AST/SelectUnionQuery.cpp deleted file mode 100644 index 35eda09b473..00000000000 --- a/src/Parsers/New/AST/SelectUnionQuery.cpp +++ /dev/null @@ -1,444 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB::ErrorCodes -{ - extern const int TOP_AND_LIMIT_TOGETHER; -} - -namespace DB::AST -{ - -// FROM Clause - -FromClause::FromClause(PtrTo expr) : INode{expr} -{ -} - -ASTPtr FromClause::convertToOld() const -{ - auto old_tables = std::make_shared(); - old_tables->children = get(EXPR)->convertToOld()->children; - return old_tables; -} - -// ARRAY JOIN Clause - -ArrayJoinClause::ArrayJoinClause(PtrTo expr_list, bool left_) : INode{expr_list}, left(left_) -{ -} - -ASTPtr ArrayJoinClause::convertToOld() const -{ - auto element = std::make_shared(); - auto array_join = std::make_shared(); - - if (left) array_join->kind = ASTArrayJoin::Kind::Left; - else array_join->kind = ASTArrayJoin::Kind::Inner; - - array_join->expression_list = get(EXPRS)->convertToOld(); - array_join->children.push_back(array_join->expression_list); - - element->array_join = array_join; - element->children.push_back(element->array_join); - - return element; -} - -// LIMIT By Clause - -LimitByClause::LimitByClause(PtrTo expr, PtrTo expr_list) : INode{expr, expr_list} -{ -} - -ASTPtr LimitByClause::convertToOld() const -{ - auto list = std::make_shared(); - - list->children.push_back(get(LIMIT)->convertToOld()); - list->children.push_back(get(EXPRS)->convertToOld()); - - return list; -} - -// LIMIT Clause - -LimitClause::LimitClause(bool with_ties_, PtrTo expr) : INode{expr}, with_ties(with_ties_) -{ -} - -ASTPtr LimitClause::convertToOld() const -{ - return get(EXPR)->convertToOld(); -} - -// SETTINGS Clause - -SettingsClause::SettingsClause(PtrTo expr_list) : INode{expr_list} -{ -} - -ASTPtr SettingsClause::convertToOld() const -{ - auto expr = std::make_shared(); - - for (const auto & child : get(EXPRS)->as()) - { - const auto * setting = child->as(); - expr->changes.emplace_back(setting->getName()->getName(), setting->getValue()->convertToOld()->as()->value); - } - - return expr; -} - -// PROJECTION SELECT Caluse - -ProjectionSelectStmt::ProjectionSelectStmt(PtrTo expr_list) - : INode(MAX_INDEX) -{ - set(COLUMNS, expr_list); -} - -void ProjectionSelectStmt::setWithClause(PtrTo clause) -{ - set(WITH, clause); -} - -void ProjectionSelectStmt::setGroupByClause(PtrTo clause) -{ - set(GROUP_BY, clause); -} - -void ProjectionSelectStmt::setOrderByClause(PtrTo clause) -{ - set(ORDER_BY, clause); -} - -ASTPtr ProjectionSelectStmt::convertToOld() const -{ - auto old_select = std::make_shared(); - - old_select->setExpression(ASTProjectionSelectQuery::Expression::SELECT, get(COLUMNS)->convertToOld()); - - if (has(WITH)) old_select->setExpression(ASTProjectionSelectQuery::Expression::WITH, get(WITH)->convertToOld()); - if (has(GROUP_BY)) old_select->setExpression(ASTProjectionSelectQuery::Expression::GROUP_BY, get(GROUP_BY)->convertToOld()); - if (has(ORDER_BY)) - { - ASTPtr order_expression; - auto expr_list = get(ORDER_BY)->convertToOld(); - if (expr_list->children.size() == 1) - { - order_expression = expr_list->children.front(); - } - else - { - auto function_node = std::make_shared(); - function_node->name = "tuple"; - function_node->arguments = expr_list; - function_node->children.push_back(expr_list); - order_expression = function_node; - } - old_select->setExpression(ASTProjectionSelectQuery::Expression::ORDER_BY, std::move(order_expression)); - } - - return old_select; -} - -// SELECT Statement - -SelectStmt::SelectStmt(bool distinct_, ModifierType type, bool totals, PtrTo expr_list) - : INode(MAX_INDEX), modifier_type(type), distinct(distinct_), with_totals(totals) -{ - set(COLUMNS, expr_list); -} - -void SelectStmt::setWithClause(PtrTo clause) -{ - set(WITH, clause); -} - -void SelectStmt::setFromClause(PtrTo clause) -{ - set(FROM, clause); -} - -void SelectStmt::setArrayJoinClause(PtrTo clause) -{ - set(ARRAY_JOIN, clause); -} - -void SelectStmt::setPrewhereClause(PtrTo clause) -{ - set(PREWHERE, clause); -} - -void SelectStmt::setWhereClause(PtrTo clause) -{ - set(WHERE, clause); -} - -void SelectStmt::setGroupByClause(PtrTo clause) -{ - set(GROUP_BY, clause); -} - -void SelectStmt::setHavingClause(PtrTo clause) -{ - set(HAVING, clause); -} - -void SelectStmt::setOrderByClause(PtrTo clause) -{ - set(ORDER_BY, clause); -} - -void SelectStmt::setLimitByClause(PtrTo clause) -{ - set(LIMIT_BY, clause); -} - -void SelectStmt::setLimitClause(PtrTo clause) -{ - set(LIMIT, clause); -} - -void SelectStmt::setSettingsClause(PtrTo clause) -{ - set(SETTINGS, clause); -} - -ASTPtr SelectStmt::convertToOld() const -{ - auto old_select = std::make_shared(); - - old_select->setExpression(ASTSelectQuery::Expression::SELECT, get(COLUMNS)->convertToOld()); - old_select->distinct = distinct; - old_select->group_by_with_totals = with_totals; - - switch(modifier_type) - { - case ModifierType::NONE: - break; - case ModifierType::CUBE: - old_select->group_by_with_cube = true; - break; - case ModifierType::ROLLUP: - old_select->group_by_with_rollup = true; - break; - } - - if (has(WITH)) old_select->setExpression(ASTSelectQuery::Expression::WITH, get(WITH)->convertToOld()); - if (has(FROM)) old_select->setExpression(ASTSelectQuery::Expression::TABLES, get(FROM)->convertToOld()); - if (has(ARRAY_JOIN)) old_select->tables()->children.push_back(get(ARRAY_JOIN)->convertToOld()); - if (has(PREWHERE)) old_select->setExpression(ASTSelectQuery::Expression::PREWHERE, get(PREWHERE)->convertToOld()); - if (has(WHERE)) old_select->setExpression(ASTSelectQuery::Expression::WHERE, get(WHERE)->convertToOld()); - if (has(GROUP_BY)) old_select->setExpression(ASTSelectQuery::Expression::GROUP_BY, get(GROUP_BY)->convertToOld()); - if (has(HAVING)) old_select->setExpression(ASTSelectQuery::Expression::HAVING, get(HAVING)->convertToOld()); - if (has(ORDER_BY)) old_select->setExpression(ASTSelectQuery::Expression::ORDER_BY, get(ORDER_BY)->convertToOld()); - if (has(LIMIT_BY)) - { - auto old_list = get(LIMIT_BY)->convertToOld(); - old_select->setExpression(ASTSelectQuery::Expression::LIMIT_BY, std::move(old_list->children[1])); - old_select->setExpression(ASTSelectQuery::Expression::LIMIT_BY_LENGTH, std::move(old_list->children[0]->children[0])); - if (old_list->children[0]->children.size() > 1) - old_select->setExpression(ASTSelectQuery::Expression::LIMIT_BY_OFFSET, std::move(old_list->children[0]->children[1])); - } - if (has(LIMIT)) - { - auto old_list = get(LIMIT)->convertToOld(); - old_select->limit_with_ties = get(LIMIT)->with_ties; - old_select->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(old_list->children[0])); - if (old_list->children.size() > 1) - old_select->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, std::move(old_list->children[1])); - } - if (has(SETTINGS)) old_select->setExpression(ASTSelectQuery::Expression::SETTINGS, get(SETTINGS)->convertToOld()); - - return old_select; -} - -SelectUnionQuery::SelectUnionQuery(PtrTo> stmts) : Query{stmts} -{ -} - -void SelectUnionQuery::appendSelect(PtrTo stmt) -{ - if (!has(STMTS)) push(std::make_shared>()); - get>(STMTS)->push(stmt); -} - -void SelectUnionQuery::appendSelect(PtrTo query) -{ - for (const auto & stmt : query->get(STMTS)->as &>()) - appendSelect(std::static_pointer_cast(stmt)); -} - -ASTPtr SelectUnionQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->list_of_selects = std::make_shared(); - query->children.push_back(query->list_of_selects); - - for (const auto & select : get(STMTS)->as &>()) - query->list_of_selects->children.push_back(select->convertToOld()); - - // TODO(ilezhankin): need to parse new UNION DISTINCT - query->list_of_modes - = ASTSelectWithUnionQuery::UnionModes(query->list_of_selects->children.size() - 1, ASTSelectWithUnionQuery::Mode::ALL); - - convertToOldPartially(query); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitWithClause(ClickHouseParser::WithClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitTopClause(ClickHouseParser::TopClauseContext *ctx) -{ - auto limit = std::make_shared(ColumnExpr::createLiteral(Literal::createNumber(ctx->DECIMAL_LITERAL()))); - return std::make_shared(!!ctx->WITH(), limit); -} - -antlrcpp::Any ParseTreeVisitor::visitFromClause(ClickHouseParser::FromClauseContext *ctx) -{ - return std::make_shared(visit(ctx->joinExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitArrayJoinClause(ClickHouseParser::ArrayJoinClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExprList()), !!ctx->LEFT()); -} - -antlrcpp::Any ParseTreeVisitor::visitPrewhereClause(ClickHouseParser::PrewhereClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitWhereClause(ClickHouseParser::WhereClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitGroupByClause(ClickHouseParser::GroupByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitHavingClause(ClickHouseParser::HavingClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitOrderByClause(ClickHouseParser::OrderByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->orderExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->columnExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitLimitByClause(ClickHouseParser::LimitByClauseContext *ctx) -{ - return std::make_shared(visit(ctx->limitExpr()), visit(ctx->columnExprList())); -} - -antlrcpp::Any ParseTreeVisitor::visitLimitClause(ClickHouseParser::LimitClauseContext *ctx) -{ - return std::make_shared(!!ctx->WITH(), visit(ctx->limitExpr()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitSettingsClause(ClickHouseParser::SettingsClauseContext *ctx) -{ - return std::make_shared(visit(ctx->settingExprList()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext *ctx) -{ - PtrTo column_list = visit(ctx->columnExprList()); - auto select_stmt = std::make_shared(column_list); - - if (ctx->withClause()) select_stmt->setWithClause(visit(ctx->withClause())); - if (ctx->groupByClause()) select_stmt->setGroupByClause(visit(ctx->groupByClause())); - if (ctx->projectionOrderByClause()) select_stmt->setOrderByClause(visit(ctx->projectionOrderByClause())); - - return select_stmt; -} - -antlrcpp::Any ParseTreeVisitor::visitSelectStmt(ClickHouseParser::SelectStmtContext *ctx) -{ - SelectStmt::ModifierType type = SelectStmt::ModifierType::NONE; - - if (ctx->CUBE() || (ctx->groupByClause() && ctx->groupByClause()->CUBE())) type = SelectStmt::ModifierType::CUBE; - else if (ctx->ROLLUP() || (ctx->groupByClause() && ctx->groupByClause()->ROLLUP())) type = SelectStmt::ModifierType::ROLLUP; - - auto select_stmt = std::make_shared(!!ctx->DISTINCT(), type, !!ctx->TOTALS(), visit(ctx->columnExprList())); - - if (ctx->topClause() && ctx->limitClause()) - throw Exception("Can not use TOP and LIMIT together", ErrorCodes::TOP_AND_LIMIT_TOGETHER); - - if (ctx->withClause()) select_stmt->setWithClause(visit(ctx->withClause())); - if (ctx->topClause()) select_stmt->setLimitClause(visit(ctx->topClause())); - if (ctx->fromClause()) select_stmt->setFromClause(visit(ctx->fromClause())); - if (ctx->arrayJoinClause()) select_stmt->setArrayJoinClause(visit(ctx->arrayJoinClause())); - if (ctx->prewhereClause()) select_stmt->setPrewhereClause(visit(ctx->prewhereClause())); - if (ctx->whereClause()) select_stmt->setWhereClause(visit(ctx->whereClause())); - if (ctx->groupByClause()) select_stmt->setGroupByClause(visit(ctx->groupByClause())); - if (ctx->havingClause()) select_stmt->setHavingClause(visit(ctx->havingClause())); - if (ctx->orderByClause()) select_stmt->setOrderByClause(visit(ctx->orderByClause())); - if (ctx->limitByClause()) select_stmt->setLimitByClause(visit(ctx->limitByClause())); - if (ctx->limitClause()) select_stmt->setLimitClause(visit(ctx->limitClause())); - if (ctx->settingsClause()) select_stmt->setSettingsClause(visit(ctx->settingsClause())); - - return select_stmt; -} - -antlrcpp::Any ParseTreeVisitor::visitSelectStmtWithParens(ClickHouseParser::SelectStmtWithParensContext *ctx) -{ - PtrTo query; - - if (ctx->selectStmt()) - { - query = std::make_shared(); - query->appendSelect(visit(ctx->selectStmt()).as>()); - } - else if (ctx->selectUnionStmt()) - { - query = visit(ctx->selectUnionStmt()); - } - - return query; -} - -antlrcpp::Any ParseTreeVisitor::visitSelectUnionStmt(ClickHouseParser::SelectUnionStmtContext *ctx) -{ - auto select_union_query = std::make_shared(); - for (auto * stmt : ctx->selectStmtWithParens()) select_union_query->appendSelect(visit(stmt).as>()); - return select_union_query; -} - -} diff --git a/src/Parsers/New/AST/SelectUnionQuery.h b/src/Parsers/New/AST/SelectUnionQuery.h deleted file mode 100644 index 587da271a78..00000000000 --- a/src/Parsers/New/AST/SelectUnionQuery.h +++ /dev/null @@ -1,193 +0,0 @@ -#pragma once - -#include - -#include - -#include - - -namespace DB::AST -{ - -// Clauses - -using WithClause = SimpleClause; - -class FromClause : public INode -{ - public: - explicit FromClause(PtrTo join_expr); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // JoinExpr - }; -}; - -class ArrayJoinClause : public INode -{ - public: - ArrayJoinClause(PtrTo expr_list, bool left); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPRS = 0, // ColumnExprList - }; - - const bool left; -}; - -using PrewhereClause = SimpleClause; - -using GroupByClause = SimpleClause; - -using HavingClause = SimpleClause; - -class LimitByClause : public INode -{ - public: - LimitByClause(PtrTo expr, PtrTo expr_list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - LIMIT = 0, // LimitExpr - EXPRS = 1, // ColumnExprList - }; -}; - -class LimitClause : public INode -{ - public: - LimitClause(bool with_ties, PtrTo expr); - - ASTPtr convertToOld() const override; - - const bool with_ties; // FIXME: bad interface, because old AST stores this inside ASTSelectQuery. - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // LimitExpr - }; -}; - -class SettingsClause : public INode -{ - public: - explicit SettingsClause(PtrTo expr_list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPRS = 0, // SettingExprList - }; -}; - -// Statement - -class ProjectionSelectStmt : public INode -{ - public: - ProjectionSelectStmt(PtrTo expr_list); - - void setWithClause(PtrTo clause); - void setGroupByClause(PtrTo clause); - void setOrderByClause(PtrTo clause); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - COLUMNS = 0, // ColumnExprList - WITH, // WithClause (optional) - GROUP_BY, // GroupByClause (optional) - ORDER_BY, // OrderByClause (optional) - - MAX_INDEX, - }; -}; - -class SelectStmt : public INode -{ - public: - enum class ModifierType - { - NONE, - CUBE, - ROLLUP, - }; - - SelectStmt(bool distinct_, ModifierType type, bool totals, PtrTo expr_list); - - void setWithClause(PtrTo clause); - void setFromClause(PtrTo clause); - void setArrayJoinClause(PtrTo clause); - void setPrewhereClause(PtrTo clause); - void setWhereClause(PtrTo clause); - void setGroupByClause(PtrTo clause); - void setHavingClause(PtrTo clause); - void setOrderByClause(PtrTo clause); - void setLimitByClause(PtrTo clause); - void setLimitClause(PtrTo clause); - void setSettingsClause(PtrTo clause); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - COLUMNS = 0, // ColumnExprList - WITH, // WithClause (optional) - FROM, // FromClause (optional) - ARRAY_JOIN, // ArrayJoinClause (optional) - PREWHERE, // PrewhereClause (optional) - WHERE, // WhereClause (optional) - GROUP_BY, // GroupByClause (optional) - HAVING, // HavingClause (optional) - ORDER_BY, // OrderByClause (optional) - LIMIT_BY, // LimitByClause (optional) - LIMIT, // LimitClause (optional) - SETTINGS, // SettingsClause (optional) - - MAX_INDEX, - }; - - const ModifierType modifier_type; - const bool distinct, with_totals; -}; - -class SelectUnionQuery : public Query -{ - public: - SelectUnionQuery() = default; - explicit SelectUnionQuery(PtrTo> stmts); - - void appendSelect(PtrTo stmt); - void appendSelect(PtrTo query); - void shouldBeScalar() { is_scalar = true; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - STMTS = 0, // List - }; - - bool is_scalar = false; -}; - -} diff --git a/src/Parsers/New/AST/SetQuery.cpp b/src/Parsers/New/AST/SetQuery.cpp deleted file mode 100644 index 1f7087e21e3..00000000000 --- a/src/Parsers/New/AST/SetQuery.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -SetQuery::SetQuery(PtrTo list) : Query{list} -{ -} - -ASTPtr SetQuery::convertToOld() const -{ - auto expr = std::make_shared(); - - for (const auto & child : get(EXPRS)->as()) - { - const auto * setting = child->as(); - expr->changes.emplace_back(setting->getName()->getName(), setting->getValue()->convertToOld()->as()->value); - } - - return expr; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitSetStmt(ClickHouseParser::SetStmtContext *ctx) -{ - return std::make_shared(visit(ctx->settingExprList()).as>()); -} - -} diff --git a/src/Parsers/New/AST/SetQuery.h b/src/Parsers/New/AST/SetQuery.h deleted file mode 100644 index 451371f6896..00000000000 --- a/src/Parsers/New/AST/SetQuery.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class SetQuery : public Query -{ - public: - explicit SetQuery(PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPRS = 0, // SettingExprList - }; -}; - -} diff --git a/src/Parsers/New/AST/SettingExpr.cpp b/src/Parsers/New/AST/SettingExpr.cpp deleted file mode 100644 index e38b9d57ff8..00000000000 --- a/src/Parsers/New/AST/SettingExpr.cpp +++ /dev/null @@ -1,33 +0,0 @@ -#include - -#include -#include - -#include - - -namespace DB::AST -{ - -SettingExpr::SettingExpr(PtrTo name, PtrTo value) : INode{name, value} -{ -} - -} - -namespace DB -{ - -antlrcpp::Any ParseTreeVisitor::visitSettingExprList(ClickHouseParser::SettingExprListContext *ctx) -{ - auto expr_list = std::make_shared(); - for (auto* expr : ctx->settingExpr()) expr_list->push(visit(expr)); - return expr_list; -} - -antlrcpp::Any ParseTreeVisitor::visitSettingExpr(ClickHouseParser::SettingExprContext *ctx) -{ - return std::make_shared(visit(ctx->identifier()), visit(ctx->literal())); -} - -} diff --git a/src/Parsers/New/AST/SettingExpr.h b/src/Parsers/New/AST/SettingExpr.h deleted file mode 100644 index 8dad6166189..00000000000 --- a/src/Parsers/New/AST/SettingExpr.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class SettingExpr : public INode -{ - public: - SettingExpr(PtrTo name, PtrTo value); - - auto getName() const { return std::static_pointer_cast(get(NAME)); } - auto getValue() const { return std::static_pointer_cast(get(VALUE)); } - - private: - enum ChildIndex : UInt8 - { - NAME = 0, - VALUE = 1, - }; -}; - -} diff --git a/src/Parsers/New/AST/ShowCreateQuery.cpp b/src/Parsers/New/AST/ShowCreateQuery.cpp deleted file mode 100644 index 613b5178e62..00000000000 --- a/src/Parsers/New/AST/ShowCreateQuery.cpp +++ /dev/null @@ -1,96 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo ShowCreateQuery::createDatabase(PtrTo identifier) -{ - return PtrTo(new ShowCreateQuery(QueryType::DATABASE, {identifier})); -} - -// static -PtrTo ShowCreateQuery::createDictionary(PtrTo identifier) -{ - return PtrTo(new ShowCreateQuery(QueryType::DICTIONARY, {identifier})); -} - -// static -PtrTo ShowCreateQuery::createTable(bool temporary, PtrTo identifier) -{ - PtrTo query(new ShowCreateQuery(QueryType::TABLE, {identifier})); - query->temporary = temporary; - return query; -} - -ShowCreateQuery::ShowCreateQuery(QueryType type, PtrList exprs) : Query(exprs), query_type(type) -{ -} - -ASTPtr ShowCreateQuery::convertToOld() const -{ - switch(query_type) - { - case QueryType::DATABASE: - { - auto query = std::make_shared(); - query->database = get(IDENTIFIER)->getName(); - return query; - } - case QueryType::DICTIONARY: - { - auto query = std::make_shared(); - - auto table = std::static_pointer_cast(get(IDENTIFIER)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = table->uuid; - - return query; - } - case QueryType::TABLE: - { - auto query = std::make_shared(); - - auto table = std::static_pointer_cast(get(IDENTIFIER)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = table->uuid; - query->temporary = temporary; - - return query; - } - } - __builtin_unreachable(); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitShowCreateDatabaseStmt(ClickHouseParser::ShowCreateDatabaseStmtContext *ctx) -{ - return ShowCreateQuery::createDatabase(visit(ctx->databaseIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitShowCreateDictionaryStmt(ClickHouseParser::ShowCreateDictionaryStmtContext * ctx) -{ - return ShowCreateQuery::createDictionary(visit(ctx->tableIdentifier())); -} - -antlrcpp::Any ParseTreeVisitor::visitShowCreateTableStmt(ClickHouseParser::ShowCreateTableStmtContext *ctx) -{ - return ShowCreateQuery::createTable(!!ctx->TEMPORARY(), visit(ctx->tableIdentifier())); -} - -} diff --git a/src/Parsers/New/AST/ShowCreateQuery.h b/src/Parsers/New/AST/ShowCreateQuery.h deleted file mode 100644 index 5f4d31bce60..00000000000 --- a/src/Parsers/New/AST/ShowCreateQuery.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class ShowCreateQuery : public Query -{ - public: - static PtrTo createDatabase(PtrTo identifier); - static PtrTo createDictionary(PtrTo identifier); - static PtrTo createTable(bool temporary, PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - IDENTIFIER = 0, // DatabaseIdentifier or TableIdentifier - }; - enum class QueryType - { - DATABASE, - DICTIONARY, - TABLE, - }; - - QueryType query_type; - bool temporary = false; - - ShowCreateQuery(QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/ShowQuery.cpp b/src/Parsers/New/AST/ShowQuery.cpp deleted file mode 100644 index e6ea357dd70..00000000000 --- a/src/Parsers/New/AST/ShowQuery.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo ShowQuery::createDictionaries(PtrTo from) -{ - return PtrTo(new ShowQuery(QueryType::DICTIONARIES, {from})); -} - -ShowQuery::ShowQuery(QueryType type, PtrList exprs) : Query(exprs), query_type(type) -{ -} - -ASTPtr ShowQuery::convertToOld() const -{ - auto query = std::make_shared(); - - switch(query_type) - { - case QueryType::DICTIONARIES: - query->dictionaries = true; - if (has(FROM)) query->from = get(FROM)->getQualifiedName(); - break; - } - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitShowDictionariesStmt(ClickHouseParser::ShowDictionariesStmtContext *ctx) -{ - auto from = ctx->databaseIdentifier() ? visit(ctx->databaseIdentifier()).as>() : nullptr; - return ShowQuery::createDictionaries(from); -} - -} diff --git a/src/Parsers/New/AST/ShowQuery.h b/src/Parsers/New/AST/ShowQuery.h deleted file mode 100644 index 93951676bbb..00000000000 --- a/src/Parsers/New/AST/ShowQuery.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class ShowQuery : public Query -{ - public: - static PtrTo createDictionaries(PtrTo from); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - FROM = 0, // DatabaseIdentifier (optional) - }; - - enum class QueryType - { - DICTIONARIES, - }; - - const QueryType query_type; - - ShowQuery(QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/SystemQuery.cpp b/src/Parsers/New/AST/SystemQuery.cpp deleted file mode 100644 index d2fda6a3fbc..00000000000 --- a/src/Parsers/New/AST/SystemQuery.cpp +++ /dev/null @@ -1,191 +0,0 @@ -#include - -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -// static -PtrTo SystemQuery::createDistributedSends(bool stop, PtrTo identifier) -{ - PtrTo query(new SystemQuery(QueryType::DISTRIBUTED_SENDS, {identifier})); - query->stop = stop; - return query; -} - -// static -PtrTo SystemQuery::createFetches(bool stop, PtrTo identifier) -{ - PtrTo query(new SystemQuery(QueryType::FETCHES, {identifier})); - query->stop = stop; - return query; -} - -// static -PtrTo SystemQuery::createFlushDistributed(PtrTo identifier) -{ - return PtrTo(new SystemQuery(QueryType::FLUSH_DISTRIBUTED, {identifier})); -} - -// static -PtrTo SystemQuery::createFlushLogs() -{ - return PtrTo(new SystemQuery(QueryType::FLUSH_LOGS, {})); -} - -// static -PtrTo SystemQuery::createMerges(bool stop, PtrTo identifier) -{ - PtrTo query(new SystemQuery(QueryType::MERGES, {identifier})); - query->stop = stop; - return query; -} - -// static -PtrTo SystemQuery::createReloadDictionaries() -{ - return PtrTo(new SystemQuery(QueryType::RELOAD_DICTIONARIES, {})); -} - -// static -PtrTo SystemQuery::createReloadDictionary(PtrTo identifier) -{ - return PtrTo(new SystemQuery(QueryType::RELOAD_DICTIONARY, {identifier})); -} - -// static -PtrTo SystemQuery::createReplicatedSends(bool stop) -{ - PtrTo query(new SystemQuery(QueryType::REPLICATED_SENDS, {})); - query->stop = stop; - return query; -} - -// static -PtrTo SystemQuery::createSyncReplica(PtrTo identifier) -{ - return PtrTo(new SystemQuery(QueryType::SYNC_REPLICA, {identifier})); -} - -// static -PtrTo SystemQuery::createTTLMerges(bool stop, PtrTo identifier) -{ - PtrTo query(new SystemQuery(QueryType::TTL_MERGES, {identifier})); - query->stop = stop; - return query; -} - -SystemQuery::SystemQuery(QueryType type, PtrList exprs) : Query(exprs), query_type(type) -{ -} - -ASTPtr SystemQuery::convertToOld() const -{ - auto query = std::make_shared(); - - switch(query_type) - { - case QueryType::DISTRIBUTED_SENDS: - query->type = stop ? ASTSystemQuery::Type::STOP_DISTRIBUTED_SENDS : ASTSystemQuery::Type::START_DISTRIBUTED_SENDS; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - case QueryType::FETCHES: - query->type = stop ? ASTSystemQuery::Type::STOP_FETCHES : ASTSystemQuery::Type::START_FETCHES; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - case QueryType::FLUSH_DISTRIBUTED: - query->type = ASTSystemQuery::Type::FLUSH_DISTRIBUTED; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - case QueryType::FLUSH_LOGS: - query->type = ASTSystemQuery::Type::FLUSH_LOGS; - break; - case QueryType::MERGES: - query->type = stop ? ASTSystemQuery::Type::STOP_MERGES : ASTSystemQuery::Type::START_MERGES; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - case QueryType::RELOAD_DICTIONARIES: - query->type = ASTSystemQuery::Type::RELOAD_DICTIONARIES; - break; - case QueryType::RELOAD_DICTIONARY: - query->type = ASTSystemQuery::Type::RELOAD_DICTIONARY; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->getTableId().table_name; - } - break; - case QueryType::REPLICATED_SENDS: - query->type = stop ? ASTSystemQuery::Type::STOP_REPLICATED_SENDS : ASTSystemQuery::Type::START_REPLICATED_SENDS; - break; - case QueryType::SYNC_REPLICA: - query->type = ASTSystemQuery::Type::SYNC_REPLICA; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - case QueryType::TTL_MERGES: - query->type = stop ? ASTSystemQuery::Type::STOP_TTL_MERGES : ASTSystemQuery::Type::START_TTL_MERGES; - { - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - } - break; - } - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitSystemStmt(ClickHouseParser::SystemStmtContext *ctx) -{ - if (ctx->FLUSH() && ctx->DISTRIBUTED()) return SystemQuery::createFlushDistributed(visit(ctx->tableIdentifier())); - if (ctx->FLUSH() && ctx->LOGS()) return SystemQuery::createFlushLogs(); - if (ctx->DISTRIBUTED() && ctx->SENDS()) return SystemQuery::createDistributedSends(!!ctx->STOP(), visit(ctx->tableIdentifier())); - if (ctx->FETCHES()) return SystemQuery::createFetches(!!ctx->STOP(), visit(ctx->tableIdentifier())); - if (ctx->MERGES()) - { - if (ctx->TTL()) return SystemQuery::createTTLMerges(!!ctx->STOP(), visit(ctx->tableIdentifier())); - else return SystemQuery::createMerges(!!ctx->STOP(), visit(ctx->tableIdentifier())); - } - if (ctx->RELOAD()) - { - if (ctx->DICTIONARIES()) return SystemQuery::createReloadDictionaries(); - if (ctx->DICTIONARY()) return SystemQuery::createReloadDictionary(visit(ctx->tableIdentifier())); - } - if (ctx->REPLICATED() && ctx->SENDS()) return SystemQuery::createReplicatedSends(!!ctx->STOP()); - if (ctx->SYNC() && ctx->REPLICA()) return SystemQuery::createSyncReplica(visit(ctx->tableIdentifier())); - __builtin_unreachable(); -} - -} diff --git a/src/Parsers/New/AST/SystemQuery.h b/src/Parsers/New/AST/SystemQuery.h deleted file mode 100644 index 98a5cfd0932..00000000000 --- a/src/Parsers/New/AST/SystemQuery.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class SystemQuery : public Query -{ - public: - static PtrTo createDistributedSends(bool stop, PtrTo identifier); - static PtrTo createFetches(bool stop, PtrTo identifier); - static PtrTo createFlushDistributed(PtrTo identifier); - static PtrTo createFlushLogs(); - static PtrTo createMerges(bool stop, PtrTo identifier); - static PtrTo createReloadDictionaries(); - static PtrTo createReloadDictionary(PtrTo identifier); - static PtrTo createReplicatedSends(bool stop); - static PtrTo createSyncReplica(PtrTo identifier); - static PtrTo createTTLMerges(bool stop, PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - TABLE = 0, - }; - enum class QueryType - { - DISTRIBUTED_SENDS, - FETCHES, - FLUSH_DISTRIBUTED, - FLUSH_LOGS, - MERGES, - RELOAD_DICTIONARIES, - RELOAD_DICTIONARY, - REPLICATED_SENDS, - SYNC_REPLICA, - TTL_MERGES, - }; - - QueryType query_type; - bool stop = false; - - SystemQuery(QueryType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/TableElementExpr.cpp b/src/Parsers/New/AST/TableElementExpr.cpp deleted file mode 100644 index 70855fee697..00000000000 --- a/src/Parsers/New/AST/TableElementExpr.cpp +++ /dev/null @@ -1,264 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -CodecArgExpr::CodecArgExpr(PtrTo identifier, PtrTo list) : INode{identifier, list} -{ -} - -ASTPtr CodecArgExpr::convertToOld() const -{ - auto func = std::make_shared(); - - func->name = get(NAME)->getName(); - if (has(ARGS)) - { - func->arguments = get(ARGS)->convertToOld(); - func->children.push_back(func->arguments); - } - - return func; -} - -CodecExpr::CodecExpr(PtrTo list) : INode{list} -{ -} - -ASTPtr CodecExpr::convertToOld() const -{ - auto func = std::make_shared(); - - func->name = "codec"; - func->arguments = get(ARGS)->convertToOld(); - func->children.push_back(func->arguments); - - return func; -} - -TableColumnPropertyExpr::TableColumnPropertyExpr(PropertyType type, PtrTo expr) : INode{expr}, property_type(type) -{ -} - -ASTPtr TableColumnPropertyExpr::convertToOld() const -{ - return get(EXPR)->convertToOld(); -} - -// static -PtrTo TableElementExpr::createColumn( - PtrTo name, - PtrTo type, - PtrTo property, - PtrTo comment, - PtrTo codec, - PtrTo ttl) -{ - return PtrTo(new TableElementExpr(ExprType::COLUMN, {name, type, property, comment, codec, ttl})); -} - -// static -PtrTo TableElementExpr::createConstraint(PtrTo identifier, PtrTo expr) -{ - return PtrTo(new TableElementExpr(ExprType::CONSTRAINT, {identifier, expr})); -} - -// static -PtrTo -TableElementExpr::createIndex(PtrTo name, PtrTo expr, PtrTo type, PtrTo granularity) -{ - return PtrTo(new TableElementExpr(ExprType::INDEX, {name, expr, type, granularity})); -} - -// static -PtrTo -TableElementExpr::createProjection(PtrTo name, PtrTo query) -{ - return PtrTo(new TableElementExpr(ExprType::PROJECTION, {name, query})); -} - -TableElementExpr::TableElementExpr(ExprType type, PtrList exprs) : INode(exprs), expr_type(type) -{ -} - -ASTPtr TableElementExpr::convertToOld() const -{ - switch(expr_type) - { - case ExprType::COLUMN: - { - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); // FIXME: do we have correct nested identifier here already? - if (has(TYPE)) - { - expr->type = get(TYPE)->convertToOld(); - expr->children.push_back(expr->type); - } - if (has(PROPERTY)) - { - switch(get(PROPERTY)->getType()) - { - case TableColumnPropertyExpr::PropertyType::ALIAS: - expr->default_specifier = "ALIAS"; - break; - case TableColumnPropertyExpr::PropertyType::DEFAULT: - expr->default_specifier = "DEFAULT"; - break; - case TableColumnPropertyExpr::PropertyType::MATERIALIZED: - expr->default_specifier = "MATERIALIZED"; - break; - } - expr->default_expression = get(PROPERTY)->convertToOld(); - expr->children.push_back(expr->default_expression); - } - if (has(COMMENT)) - { - expr->comment = get(COMMENT)->convertToOld(); - expr->children.push_back(expr->comment); - } - if (has(CODEC)) - { - expr->codec = get(CODEC)->convertToOld(); - expr->children.push_back(expr->codec); - } - if (has(TTL)) - { - expr->ttl = get(TTL)->convertToOld(); - expr->children.push_back(expr->ttl); - } - - return expr; - } - case ExprType::CONSTRAINT: - { - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); - expr->set(expr->expr, get(EXPR)->convertToOld()); - - return expr; - } - case ExprType::INDEX: - { - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); - expr->set(expr->expr, get(EXPR)->convertToOld()); - expr->set(expr->type, get(INDEX_TYPE)->convertToOld()); - expr->granularity = get(GRANULARITY)->as().value_or(0); // FIXME: throw exception instead of default. - - return expr; - } - case ExprType::PROJECTION: - { - auto expr = std::make_shared(); - - expr->name = get(NAME)->getName(); - expr->set(expr->query, get(QUERY)->convertToOld()); - - return expr; - } - } - __builtin_unreachable(); -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitCodecArgExpr(ClickHouseParser::CodecArgExprContext *ctx) -{ - auto list = ctx->columnExprList() ? visit(ctx->columnExprList()).as>() : nullptr; - return std::make_shared(visit(ctx->identifier()), list); -} - -antlrcpp::Any ParseTreeVisitor::visitCodecExpr(ClickHouseParser::CodecExprContext *ctx) -{ - auto list = std::make_shared(); - for (auto * arg : ctx->codecArgExpr()) list->push(visit(arg)); - return std::make_shared(list); -} - -antlrcpp::Any ParseTreeVisitor::visitTableColumnDfnt(ClickHouseParser::TableColumnDfntContext *ctx) -{ - PtrTo property; - PtrTo type; - PtrTo comment; - PtrTo codec; - PtrTo ttl; - - if (ctx->tableColumnPropertyExpr()) property = visit(ctx->tableColumnPropertyExpr()); - if (ctx->columnTypeExpr()) type = visit(ctx->columnTypeExpr()); - if (ctx->STRING_LITERAL()) comment = Literal::createString(ctx->STRING_LITERAL()); - if (ctx->codecExpr()) codec = visit(ctx->codecExpr()); - if (ctx->TTL()) ttl = visit(ctx->columnExpr()); - - return TableElementExpr::createColumn(visit(ctx->nestedIdentifier()), type, property, comment, codec, ttl); -} - -antlrcpp::Any ParseTreeVisitor::visitTableColumnPropertyExpr(ClickHouseParser::TableColumnPropertyExprContext *ctx) -{ - TableColumnPropertyExpr::PropertyType type; - - if (ctx->DEFAULT()) type = TableColumnPropertyExpr::PropertyType::DEFAULT; - else if (ctx->MATERIALIZED()) type = TableColumnPropertyExpr::PropertyType::MATERIALIZED; - else if (ctx->ALIAS()) type = TableColumnPropertyExpr::PropertyType::ALIAS; - else __builtin_unreachable(); - - return std::make_shared(type, visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableElementExprColumn(ClickHouseParser::TableElementExprColumnContext *ctx) -{ - return visit(ctx->tableColumnDfnt()); -} - -antlrcpp::Any ParseTreeVisitor::visitTableElementExprConstraint(ClickHouseParser::TableElementExprConstraintContext *ctx) -{ - return TableElementExpr::createConstraint(visit(ctx->identifier()), visit(ctx->columnExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableElementExprIndex(ClickHouseParser::TableElementExprIndexContext *ctx) -{ - return visit(ctx->tableIndexDfnt()); -} - -antlrcpp::Any ParseTreeVisitor::visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext *ctx) -{ - return visit(ctx->tableProjectionDfnt()); -} - -antlrcpp::Any ParseTreeVisitor::visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext *ctx) -{ - return TableElementExpr::createIndex( - visit(ctx->nestedIdentifier()), - visit(ctx->columnExpr()), - visit(ctx->columnTypeExpr()), - Literal::createNumber(ctx->DECIMAL_LITERAL())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext *ctx) -{ - return TableElementExpr::createProjection( - visit(ctx->nestedIdentifier()), - visit(ctx->projectionSelectStmt())); -} - -} diff --git a/src/Parsers/New/AST/TableElementExpr.h b/src/Parsers/New/AST/TableElementExpr.h deleted file mode 100644 index 18d1aa9c456..00000000000 --- a/src/Parsers/New/AST/TableElementExpr.h +++ /dev/null @@ -1,123 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class CodecArgExpr : public INode -{ - public: - CodecArgExpr(PtrTo identifier, PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // Identifier - ARGS = 1, // ColumnExprList (optional) - }; -}; - -class CodecExpr : public INode -{ - public: - explicit CodecExpr(PtrTo list); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - ARGS = 0, // CodecArgList - }; -}; - -class TableColumnPropertyExpr : public INode -{ - public: - enum class PropertyType - { - DEFAULT, - MATERIALIZED, - ALIAS, - }; - - TableColumnPropertyExpr(PropertyType type, PtrTo expr); - - auto getType() const { return property_type; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // ColumnExpr - }; - - PropertyType property_type; -}; - -class TableElementExpr : public INode -{ - public: - enum class ExprType - { - COLUMN, - CONSTRAINT, - INDEX, - PROJECTION, - }; - - static PtrTo createColumn( - PtrTo name, - PtrTo type, - PtrTo property, - PtrTo comment, - PtrTo codec, - PtrTo ttl); - - static PtrTo createConstraint(PtrTo identifier, PtrTo expr); - - static PtrTo - createIndex(PtrTo name, PtrTo expr, PtrTo type, PtrTo granularity); - - static PtrTo - createProjection(PtrTo name, PtrTo query); - - auto getType() const { return expr_type; } - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex: UInt8 - { - // COLUMN - NAME = 0, // Identifier - TYPE = 1, // ColumnExprType (optional) - PROPERTY = 2, // TableColumnPropertyExpr - COMMENT = 3, // StringLiteral (optional) - CODEC = 4, // CodecExpr (optional) - TTL = 5, // ColumnExpr (optional) - - // CONSTRAINT - // NAME = 0, - // EXPR = 1, - - // INDEX - EXPR = 1, // ColumnExpr - INDEX_TYPE = 2, // ColumnTypeExpr - GRANULARITY = 3, // NumberLiteral - - // PROJECTION - QUERY = 1, // ColumnExpr - }; - - const ExprType expr_type; - - TableElementExpr(ExprType type, PtrList exprs); -}; - -} diff --git a/src/Parsers/New/AST/TableExpr.cpp b/src/Parsers/New/AST/TableExpr.cpp deleted file mode 100644 index e14493c6bd6..00000000000 --- a/src/Parsers/New/AST/TableExpr.cpp +++ /dev/null @@ -1,190 +0,0 @@ -#include - -#include -#include -#include -#include - -#include -#include -#include - - -namespace DB::AST -{ - -TableArgExpr::TableArgExpr(PtrTo literal) : INode{literal} -{ -} - -TableArgExpr::TableArgExpr(PtrTo function) : INode{function} -{ -} - -TableArgExpr::TableArgExpr(PtrTo identifier) : INode{identifier} -{ -} - -ASTPtr TableArgExpr::convertToOld() const -{ - return get(EXPR)->convertToOld(); -} - -// static -PtrTo TableExpr::createAlias(PtrTo expr, PtrTo alias) -{ - return PtrTo(new TableExpr(ExprType::ALIAS, {expr, alias})); -} - -// static -PtrTo TableExpr::createFunction(PtrTo function) -{ - return PtrTo(new TableExpr(ExprType::FUNCTION, {function})); -} - -// static -PtrTo TableExpr::createIdentifier(PtrTo identifier) -{ - return PtrTo(new TableExpr(ExprType::IDENTIFIER, {identifier})); -} - -// static -PtrTo TableExpr::createSubquery(PtrTo subquery) -{ - return PtrTo(new TableExpr(ExprType::SUBQUERY, {subquery})); -} - -ASTPtr TableExpr::convertToOld() const -{ - // TODO: SAMPLE and RATIO also goes here somehow - - switch (expr_type) - { - case ExprType::ALIAS: - { - auto expr = get(EXPR)->convertToOld(); - auto * table_expr = expr->as(); - - if (table_expr->database_and_table_name) - table_expr->database_and_table_name->setAlias(get(ALIAS)->getName()); - else if (table_expr->table_function) - table_expr->table_function->setAlias(get(ALIAS)->getName()); - else if (table_expr->subquery) - table_expr->subquery->setAlias(get(ALIAS)->getName()); - - return expr; - } - case ExprType::FUNCTION: - { - auto expr = std::make_shared(); - auto func = get(FUNCTION)->convertToOld(); - - expr->table_function = func; - expr->children.push_back(func); - - return expr; - } - case ExprType::IDENTIFIER: - { - auto expr = std::make_shared(); - - expr->database_and_table_name = get(IDENTIFIER)->convertToOld(); - expr->children.emplace_back(expr->database_and_table_name); - - return expr; - } - case ExprType::SUBQUERY: - { - auto expr = std::make_shared(); - - expr->subquery = std::make_shared(); - expr->subquery->children.push_back(get(SUBQUERY)->convertToOld()); - expr->children.push_back(expr->subquery); - - return expr; - } - } - __builtin_unreachable(); -} - -TableExpr::TableExpr(TableExpr::ExprType type, PtrList exprs) : INode(exprs), expr_type(type) -{ -} - -String TableExpr::dumpInfo() const -{ - switch(expr_type) - { - case ExprType::ALIAS: return "ALIAS"; - case ExprType::FUNCTION: return "FUNCTION"; - case ExprType::IDENTIFIER: return "IDENTIFIER"; - case ExprType::SUBQUERY: return "SUBQUERY"; - } - __builtin_unreachable(); -} - -TableFunctionExpr::TableFunctionExpr(PtrTo name, PtrTo args) : INode{name, args} -{ -} - -ASTPtr TableFunctionExpr::convertToOld() const -{ - auto func = std::make_shared(); - - func->name = get(NAME)->getName(); - func->arguments = has(ARGS) ? get(ARGS)->convertToOld() : std::make_shared()->convertToOld(); - func->children.push_back(func->arguments); - - return func; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitTableArgExpr(ClickHouseParser::TableArgExprContext *ctx) -{ - if (ctx->literal()) return std::make_shared(visit(ctx->literal()).as>()); - if (ctx->tableFunctionExpr()) return std::make_shared(visit(ctx->tableFunctionExpr()).as>()); - if (ctx->nestedIdentifier()) return std::make_shared(visit(ctx->nestedIdentifier()).as>()); - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitTableArgList(ClickHouseParser::TableArgListContext * ctx) -{ - auto list = std::make_shared(); - for (auto * arg : ctx->tableArgExpr()) list->push(visit(arg)); - return list; -} - -antlrcpp::Any ParseTreeVisitor::visitTableExprAlias(ClickHouseParser::TableExprAliasContext *ctx) -{ - if (ctx->AS()) return TableExpr::createAlias(visit(ctx->tableExpr()), visit(ctx->identifier())); - else return TableExpr::createAlias(visit(ctx->tableExpr()), visit(ctx->alias())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableExprFunction(ClickHouseParser::TableExprFunctionContext *ctx) -{ - return TableExpr::createFunction(visit(ctx->tableFunctionExpr())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableExprIdentifier(ClickHouseParser::TableExprIdentifierContext *ctx) -{ - return TableExpr::createIdentifier(visit(ctx->tableIdentifier()).as>()); -} - -antlrcpp::Any ParseTreeVisitor::visitTableExprSubquery(ClickHouseParser::TableExprSubqueryContext *ctx) -{ - return TableExpr::createSubquery(visit(ctx->selectUnionStmt())); -} - -antlrcpp::Any ParseTreeVisitor::visitTableFunctionExpr(ClickHouseParser::TableFunctionExprContext *ctx) -{ - auto list = ctx->tableArgList() ? visit(ctx->tableArgList()).as>() : nullptr; - return std::make_shared(visit(ctx->identifier()), list); -} - -} diff --git a/src/Parsers/New/AST/TableExpr.h b/src/Parsers/New/AST/TableExpr.h deleted file mode 100644 index 1d893753023..00000000000 --- a/src/Parsers/New/AST/TableExpr.h +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class TableArgExpr : public INode -{ - public: - explicit TableArgExpr(PtrTo literal); - explicit TableArgExpr(PtrTo function); - explicit TableArgExpr(PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - EXPR = 0, // Literal or TableFunctionExpr or Identifier - }; -}; - -class TableExpr : public INode -{ - public: - static PtrTo createAlias(PtrTo expr, PtrTo alias); - static PtrTo createFunction(PtrTo function); - static PtrTo createIdentifier(PtrTo identifier); - static PtrTo createSubquery(PtrTo subquery); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - // ALIAS - EXPR = 0, // TableExpr - ALIAS = 1, // Identifier - - // FUNCTION - FUNCTION = 0, // TableFunctionExpr - - // IDENTIFIER - IDENTIFIER = 0, // TableIdentifier - - // SUBQUERY - SUBQUERY = 0, // SelectUnionSubquery - }; - enum class ExprType - { - ALIAS, - FUNCTION, - IDENTIFIER, - SUBQUERY, - }; - - ExprType expr_type; - - TableExpr(ExprType type, PtrList exprs); - - String dumpInfo() const override; -}; - -class TableFunctionExpr : public INode -{ - public: - TableFunctionExpr(PtrTo name, PtrTo args); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, - ARGS = 1, - }; -}; - -} diff --git a/src/Parsers/New/AST/TruncateQuery.cpp b/src/Parsers/New/AST/TruncateQuery.cpp deleted file mode 100644 index 43d7f7ed042..00000000000 --- a/src/Parsers/New/AST/TruncateQuery.cpp +++ /dev/null @@ -1,47 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -TruncateQuery::TruncateQuery(PtrTo cluster, bool temporary_, bool if_exists_, PtrTo identifier) - : DDLQuery(cluster, {identifier}), temporary(temporary_), if_exists(if_exists_) -{ -} - -ASTPtr TruncateQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->kind = ASTDropQuery::Truncate; - query->if_exists = if_exists; - query->temporary = temporary; - query->cluster = cluster_name; - - query->table = get(NAME)->getName(); - if (auto database = get(NAME)->getDatabase()) - query->database = database->getName(); - - convertToOldPartially(query); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitTruncateStmt(ClickHouseParser::TruncateStmtContext *ctx) -{ - auto cluster = ctx->clusterClause() ? visit(ctx->clusterClause()).as>() : nullptr; - return std::make_shared(cluster, !!ctx->TEMPORARY(), !!ctx->IF(), visit(ctx->tableIdentifier())); -} - -} diff --git a/src/Parsers/New/AST/TruncateQuery.h b/src/Parsers/New/AST/TruncateQuery.h deleted file mode 100644 index 463e561890f..00000000000 --- a/src/Parsers/New/AST/TruncateQuery.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class TruncateQuery : public DDLQuery -{ - public: - TruncateQuery(PtrTo cluster, bool temporary, bool if_exists, PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - NAME = 0, // TableIdentifier - }; - - const bool temporary, if_exists; -}; - -} diff --git a/src/Parsers/New/AST/UseQuery.cpp b/src/Parsers/New/AST/UseQuery.cpp deleted file mode 100644 index 4dd4d564c27..00000000000 --- a/src/Parsers/New/AST/UseQuery.cpp +++ /dev/null @@ -1,37 +0,0 @@ -#include - -#include -#include -#include - - -namespace DB::AST -{ - -UseQuery::UseQuery(PtrTo identifier) -{ - push(identifier); -} - -ASTPtr UseQuery::convertToOld() const -{ - auto query = std::make_shared(); - - query->database = get(DATABASE)->getName(); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitUseStmt(ClickHouseParser::UseStmtContext *ctx) -{ - return std::make_shared(visit(ctx->databaseIdentifier()).as>()); -} - -} diff --git a/src/Parsers/New/AST/UseQuery.h b/src/Parsers/New/AST/UseQuery.h deleted file mode 100644 index c71f271edb5..00000000000 --- a/src/Parsers/New/AST/UseQuery.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class UseQuery : public Query -{ - public: - explicit UseQuery(PtrTo identifier); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - DATABASE = 0, - }; -}; - -} diff --git a/src/Parsers/New/AST/WatchQuery.cpp b/src/Parsers/New/AST/WatchQuery.cpp deleted file mode 100644 index 14d71007232..00000000000 --- a/src/Parsers/New/AST/WatchQuery.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - - -namespace DB::AST -{ - -WatchQuery::WatchQuery(bool events_, PtrTo identifier, PtrTo literal) - : Query{identifier, literal}, events(events_) -{ -} - -ASTPtr WatchQuery::convertToOld() const -{ - auto query = std::make_shared(); - - auto table = std::static_pointer_cast(get(TABLE)->convertToOld()); - query->database = table->getDatabaseName(); - query->table = table->shortName(); - query->uuid = table->uuid; - - query->is_watch_events = events; - - if (has(LIMIT)) - query->limit_length = get(LIMIT)->convertToOld(); - - convertToOldPartially(query); - - return query; -} - -} - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitWatchStmt(ClickHouseParser::WatchStmtContext *ctx) -{ - auto limit = ctx->DECIMAL_LITERAL() ? Literal::createNumber(ctx->DECIMAL_LITERAL()) : nullptr; - return std::make_shared(!!ctx->EVENTS(), visit(ctx->tableIdentifier()), limit); -} - -} diff --git a/src/Parsers/New/AST/WatchQuery.h b/src/Parsers/New/AST/WatchQuery.h deleted file mode 100644 index 041f71b75ff..00000000000 --- a/src/Parsers/New/AST/WatchQuery.h +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include - - -namespace DB::AST -{ - -class WatchQuery : public Query -{ - public: - WatchQuery(bool events, PtrTo identifier, PtrTo literal); - - ASTPtr convertToOld() const override; - - private: - enum ChildIndex : UInt8 - { - TABLE = 0, // TableIdentifier - LIMIT = 1, // NumberLiteral (optional) - }; - - const bool events; -}; - -} diff --git a/src/Parsers/New/AST/fwd_decl.h b/src/Parsers/New/AST/fwd_decl.h deleted file mode 100644 index 4f9bde4bbbb..00000000000 --- a/src/Parsers/New/AST/fwd_decl.h +++ /dev/null @@ -1,91 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB::AST -{ - -class INode; - -template -class List; - -template -class SimpleClause; - -template -using PtrTo = std::shared_ptr; - -using Ptr = PtrTo<>; -using PtrList = std::vector; - -class AssignmentExpr; -class CodecArgExpr; -class CodecExpr; -class ColumnExpr; -class ColumnFunctionExpr; -class ColumnIdentifier; -class ColumnLambdaExpr; -class ColumnTypeExpr; -class DatabaseIdentifier; -class DictionaryArgExpr; -class DictionaryAttributeExpr; -class EngineClause; -class EngineExpr; -class EnumValue; -class Identifier; -class JoinExpr; -class JsonExpr; -class JsonValue; -class LimitExpr; -class Literal; -class NumberLiteral; -class OrderExpr; -class PartitionClause; -class Query; -class RatioExpr; -class TableSchemaClause; -class ProjectionSelectStmt; -class SelectStmt; -class SelectUnionQuery; -class SettingExpr; -class SettingsClause; -class StringLiteral; -class TableArgExpr; -class TableColumnPropertyExpr; -class TableElementExpr; -class TableExpr; -class TableFunctionExpr; -class TableIdentifier; -class TTLExpr; - -using AssignmentExprList = List; -using CodecArgList = List; -using ColumnExprList = List; -using ColumnNameList = List; -using ColumnParamList = ColumnExprList; -using ColumnTypeExprList = List; -using DictionaryArgList = List; -using DictionaryAttributeList = List; -using EnumValueList = List; -using JsonExprList = List; -using JsonValueList = List; -using OrderExprList = List; -using QueryList = List; -using SettingExprList = List; -using TableArgList = List; -using TableElementList = List; -using TTLExprList = List; - -using ClusterClause = SimpleClause; -using DestinationClause = SimpleClause; -using OrderByClause = SimpleClause; -using ProjectionOrderByClause = SimpleClause; -using PrimaryKeyClause = SimpleClause; -using TTLClause = SimpleClause; -using UUIDClause = SimpleClause; -using WhereClause = SimpleClause; - -} diff --git a/src/Parsers/New/CMakeLists.txt b/src/Parsers/New/CMakeLists.txt deleted file mode 100644 index b045b0cc123..00000000000 --- a/src/Parsers/New/CMakeLists.txt +++ /dev/null @@ -1,93 +0,0 @@ -set (SRCS - AST/AlterTableQuery.cpp - AST/AttachQuery.cpp - AST/CheckQuery.cpp - AST/ColumnExpr.cpp - AST/ColumnTypeExpr.cpp - AST/CreateDatabaseQuery.cpp - AST/CreateDictionaryQuery.cpp - AST/CreateLiveViewQuery.cpp - AST/CreateMaterializedViewQuery.cpp - AST/CreateTableQuery.cpp - AST/CreateViewQuery.cpp - AST/DDLQuery.cpp - AST/DescribeQuery.cpp - AST/DropQuery.cpp - AST/EngineExpr.cpp - AST/ExistsQuery.cpp - AST/ExplainQuery.cpp - AST/Identifier.cpp - AST/InsertQuery.cpp - AST/JoinExpr.cpp - AST/KillQuery.cpp - AST/LimitExpr.cpp - AST/Literal.cpp - AST/OptimizeQuery.cpp - AST/OrderExpr.cpp - AST/Query.cpp - AST/RatioExpr.cpp - AST/RenameQuery.cpp - AST/SelectUnionQuery.cpp - AST/SetQuery.cpp - AST/SettingExpr.cpp - AST/ShowCreateQuery.cpp - AST/ShowQuery.cpp - AST/SystemQuery.cpp - AST/TableElementExpr.cpp - AST/TableExpr.cpp - AST/TruncateQuery.cpp - AST/UseQuery.cpp - AST/WatchQuery.cpp - CharInputStream.cpp - ClickHouseLexer.cpp - ClickHouseParser.cpp - ClickHouseParserVisitor.cpp - LexerErrorListener.cpp - parseQuery.cpp - ParserErrorListener.cpp - ParseTreeVisitor.cpp -) - -add_library (clickhouse_parsers_new ${SRCS}) - -target_compile_options (clickhouse_parsers_new - PRIVATE - -Wno-c++2a-compat - -Wno-deprecated-this-capture - -Wno-documentation-html - -Wno-documentation - -Wno-documentation-deprecated-sync - -Wno-shadow-field - -Wno-unused-parameter - -Wno-extra-semi - -Wno-inconsistent-missing-destructor-override -) - -# XXX: hack for old clang-10! -if (HAS_SUGGEST_DESTRUCTOR_OVERRIDE) - target_compile_options (clickhouse_parsers_new - PRIVATE - -Wno-suggest-destructor-override - ) -endif () - -# XXX: hack for old gcc-10! -if (HAS_SHADOW) - target_compile_options (clickhouse_parsers_new - PRIVATE - -Wno-shadow - ) -endif () - -target_link_libraries (clickhouse_parsers_new PUBLIC antlr4-runtime clickhouse_common_io clickhouse_parsers) - -# ANTLR generates u8 string literals, which are incompatible with |std::string| in C++20. -# See https://github.com/antlr/antlr4/issues/2683 -set_source_files_properties( - ClickHouseLexer.cpp - ClickHouseParser.cpp - PROPERTIES COMPILE_FLAGS -std=c++17 -) - -# Disable clang-tidy for whole target. -set_target_properties(clickhouse_parsers_new PROPERTIES CXX_CLANG_TIDY "") diff --git a/src/Parsers/New/CharInputStream.cpp b/src/Parsers/New/CharInputStream.cpp deleted file mode 100644 index 71cccafae50..00000000000 --- a/src/Parsers/New/CharInputStream.cpp +++ /dev/null @@ -1,79 +0,0 @@ -#include - -#include - - -namespace DB -{ - -using namespace antlr4; - -CharInputStream::CharInputStream(const char * begin, const char * end) -{ - d = begin; - s = end - begin; -} - -size_t CharInputStream::LA(ssize_t i) -{ - if (i == 0) return 0; // undefined - - ssize_t position = static_cast(p); - if (i < 0) - { - i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1] - if ((position + i - 1) < 0) - return IntStream::EOF; // invalid; no char before first char - } - - if ((position + i - 1) >= static_cast(s)) - return IntStream::EOF; - - return d[static_cast((position + i - 1))]; -} - -void CharInputStream::consume() -{ - if (p >= s) - { - assert(LA(1) == IntStream::EOF); - throw IllegalStateException("cannot consume EOF"); - } - - ++p; -} - -void CharInputStream::seek(size_t i) -{ - if (i <= p) - { - p = i; // just jump; don't update stream state (line, ...) - return; - } - - // seek forward, consume until p hits index or s (whichever comes first) - i = std::min(i, s); - while (p < i) - consume(); -} - -std::string CharInputStream::getText(const antlr4::misc::Interval &interval) -{ - if (interval.a < 0 || interval.b < 0) - return {}; - - size_t start = static_cast(interval.a); - size_t stop = static_cast(interval.b); - - - if (stop >= s) - stop = s - 1; - - size_t count = stop - start + 1; - if (start >= s) - return ""; - - return {d + start, count}; -} - -} diff --git a/src/Parsers/New/CharInputStream.h b/src/Parsers/New/CharInputStream.h deleted file mode 100644 index 735f5c2bc38..00000000000 --- a/src/Parsers/New/CharInputStream.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -class CharInputStream : public antlr4::CharStream -{ - public: - CharInputStream(const char * begin, const char * end); - - private: - const char * d; - size_t s = 0; - size_t p = 0; - - size_t index() override { return p; } - size_t size() override { return s; } - - size_t LA(ssize_t i) override; - void consume() override; - void seek(size_t i) override; - - ssize_t mark() override { return -1; } - void release(ssize_t marker) override {}; - - std::string getSourceName() const override { return "CharInputStream"; }; - std::string getText(const antlr4::misc::Interval &interval) override; - std::string toString() const override { return {d, s}; } -}; - -} diff --git a/src/Parsers/New/ClickHouseLexer.cpp b/src/Parsers/New/ClickHouseLexer.cpp deleted file mode 100644 index 7fb2a0effaa..00000000000 --- a/src/Parsers/New/ClickHouseLexer.cpp +++ /dev/null @@ -1,1603 +0,0 @@ - -// Generated from ClickHouseLexer.g4 by ANTLR 4.7.2 - - -#include "ClickHouseLexer.h" - - -using namespace antlr4; - -using namespace DB; - -ClickHouseLexer::ClickHouseLexer(CharStream *input) : Lexer(input) { - _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache); -} - -ClickHouseLexer::~ClickHouseLexer() { - delete _interpreter; -} - -std::string ClickHouseLexer::getGrammarFileName() const { - return "ClickHouseLexer.g4"; -} - -const std::vector& ClickHouseLexer::getRuleNames() const { - return _ruleNames; -} - -const std::vector& ClickHouseLexer::getChannelNames() const { - return _channelNames; -} - -const std::vector& ClickHouseLexer::getModeNames() const { - return _modeNames; -} - -const std::vector& ClickHouseLexer::getTokenNames() const { - return _tokenNames; -} - -dfa::Vocabulary& ClickHouseLexer::getVocabulary() const { - return _vocabulary; -} - -const std::vector ClickHouseLexer::getSerializedATN() const { - return _serializedATN; -} - -const atn::ATN& ClickHouseLexer::getATN() const { - return _atn; -} - - - - -// Static vars and initialization. -std::vector ClickHouseLexer::_decisionToDFA; -atn::PredictionContextCache ClickHouseLexer::_sharedContextCache; - -// We own the ATN which in turn owns the ATN states. -atn::ATN ClickHouseLexer::_atn; -std::vector ClickHouseLexer::_serializedATN; - -std::vector ClickHouseLexer::_ruleNames = { - u8"ADD", u8"AFTER", u8"ALIAS", u8"ALL", u8"ALTER", u8"AND", u8"ANTI", - u8"ANY", u8"ARRAY", u8"AS", u8"ASCENDING", u8"ASOF", u8"AST", u8"ASYNC", - u8"ATTACH", u8"BETWEEN", u8"BOTH", u8"BY", u8"CASE", u8"CAST", u8"CHECK", - u8"CLEAR", u8"CLUSTER", u8"CODEC", u8"COLLATE", u8"COLUMN", u8"COMMENT", - u8"CONSTRAINT", u8"CREATE", u8"CROSS", u8"CUBE", u8"DATABASE", u8"DATABASES", - u8"DATE", u8"DAY", u8"DEDUPLICATE", u8"DEFAULT", u8"DELAY", u8"DELETE", - u8"DESC", u8"DESCENDING", u8"DESCRIBE", u8"DETACH", u8"DICTIONARIES", - u8"DICTIONARY", u8"DISK", u8"DISTINCT", u8"DISTRIBUTED", u8"DROP", u8"ELSE", - u8"END", u8"ENGINE", u8"EVENTS", u8"EXISTS", u8"EXPLAIN", u8"EXPRESSION", - u8"EXTRACT", u8"FETCHES", u8"FINAL", u8"FIRST", u8"FLUSH", u8"FOR", u8"FORMAT", - u8"FREEZE", u8"FROM", u8"FULL", u8"FUNCTION", u8"GLOBAL", u8"GRANULARITY", - u8"GROUP", u8"HAVING", u8"HIERARCHICAL", u8"HOUR", u8"ID", u8"IF", u8"ILIKE", - u8"IN", u8"INDEX", u8"INF", u8"INJECTIVE", u8"INNER", u8"INSERT", u8"INTERVAL", - u8"INTO", u8"IS", u8"IS_OBJECT_ID", u8"JOIN", u8"KEY", u8"KILL", u8"LAST", - u8"LAYOUT", u8"LEADING", u8"LEFT", u8"LIFETIME", u8"LIKE", u8"LIMIT", - u8"LIVE", u8"LOCAL", u8"LOGS", u8"MATERIALIZE", u8"MATERIALIZED", u8"MAX", - u8"MERGES", u8"MIN", u8"MINUTE", u8"MODIFY", u8"MONTH", u8"MOVE", u8"MUTATION", - u8"NAN_SQL", u8"NO", u8"NOT", u8"NULL_SQL", u8"NULLS", u8"OFFSET", u8"ON", - u8"OPTIMIZE", u8"OR", u8"ORDER", u8"OUTER", u8"OUTFILE", u8"PARTITION", - u8"POPULATE", u8"PREWHERE", u8"PRIMARY", u8"PROJECTION", u8"QUARTER", - u8"RANGE", u8"RELOAD", u8"REMOVE", u8"RENAME", u8"REPLACE", u8"REPLICA", - u8"REPLICATED", u8"RIGHT", u8"ROLLUP", u8"SAMPLE", u8"SECOND", u8"SELECT", - u8"SEMI", u8"SENDS", u8"SET", u8"SETTINGS", u8"SHOW", u8"SOURCE", u8"START", - u8"STOP", u8"SUBSTRING", u8"SYNC", u8"SYNTAX", u8"SYSTEM", u8"TABLE", - u8"TABLES", u8"TEMPORARY", u8"TEST", u8"THEN", u8"TIES", u8"TIMEOUT", - u8"TIMESTAMP", u8"TO", u8"TOP", u8"TOTALS", u8"TRAILING", u8"TRIM", u8"TRUNCATE", - u8"TTL", u8"TYPE", u8"UNION", u8"UPDATE", u8"USE", u8"USING", u8"UUID", - u8"VALUES", u8"VIEW", u8"VOLUME", u8"WATCH", u8"WEEK", u8"WHEN", u8"WHERE", - u8"WITH", u8"YEAR", u8"JSON_FALSE", u8"JSON_TRUE", u8"IDENTIFIER", u8"FLOATING_LITERAL", - u8"OCTAL_LITERAL", u8"DECIMAL_LITERAL", u8"HEXADECIMAL_LITERAL", u8"STRING_LITERAL", - u8"A", u8"B", u8"C", u8"D", u8"E", u8"F", u8"G", u8"H", u8"I", u8"J", - u8"K", u8"L", u8"M", u8"N", u8"O", u8"P", u8"Q", u8"R", u8"S", u8"T", - u8"U", u8"V", u8"W", u8"X", u8"Y", u8"Z", u8"LETTER", u8"OCT_DIGIT", u8"DEC_DIGIT", - u8"HEX_DIGIT", u8"ARROW", u8"ASTERISK", u8"BACKQUOTE", u8"BACKSLASH", - u8"COLON", u8"COMMA", u8"CONCAT", u8"DASH", u8"DOT", u8"EQ_DOUBLE", u8"EQ_SINGLE", - u8"GE", u8"GT", u8"LBRACE", u8"LBRACKET", u8"LE", u8"LPAREN", u8"LT", - u8"NOT_EQ", u8"PERCENT", u8"PLUS", u8"QUERY", u8"QUOTE_DOUBLE", u8"QUOTE_SINGLE", - u8"RBRACE", u8"RBRACKET", u8"RPAREN", u8"SEMICOLON", u8"SLASH", u8"UNDERSCORE", - u8"MULTI_LINE_COMMENT", u8"SINGLE_LINE_COMMENT", u8"WHITESPACE" -}; - -std::vector ClickHouseLexer::_channelNames = { - "DEFAULT_TOKEN_CHANNEL", "HIDDEN" -}; - -std::vector ClickHouseLexer::_modeNames = { - u8"DEFAULT_MODE" -}; - -std::vector ClickHouseLexer::_literalNames = { - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", u8"'false'", u8"'true'", "", "", "", "", "", "", u8"'->'", u8"'*'", - u8"'`'", u8"'\\'", u8"':'", u8"','", u8"'||'", u8"'-'", u8"'.'", u8"'=='", - u8"'='", u8"'>='", u8"'>'", u8"'{'", u8"'['", u8"'<='", u8"'('", u8"'<'", - "", u8"'%'", u8"'+'", u8"'?'", u8"'\"'", u8"'''", u8"'}'", u8"']'", u8"')'", - u8"';'", u8"'/'", u8"'_'" -}; - -std::vector ClickHouseLexer::_symbolicNames = { - "", u8"ADD", u8"AFTER", u8"ALIAS", u8"ALL", u8"ALTER", u8"AND", u8"ANTI", - u8"ANY", u8"ARRAY", u8"AS", u8"ASCENDING", u8"ASOF", u8"AST", u8"ASYNC", - u8"ATTACH", u8"BETWEEN", u8"BOTH", u8"BY", u8"CASE", u8"CAST", u8"CHECK", - u8"CLEAR", u8"CLUSTER", u8"CODEC", u8"COLLATE", u8"COLUMN", u8"COMMENT", - u8"CONSTRAINT", u8"CREATE", u8"CROSS", u8"CUBE", u8"DATABASE", u8"DATABASES", - u8"DATE", u8"DAY", u8"DEDUPLICATE", u8"DEFAULT", u8"DELAY", u8"DELETE", - u8"DESC", u8"DESCENDING", u8"DESCRIBE", u8"DETACH", u8"DICTIONARIES", - u8"DICTIONARY", u8"DISK", u8"DISTINCT", u8"DISTRIBUTED", u8"DROP", u8"ELSE", - u8"END", u8"ENGINE", u8"EVENTS", u8"EXISTS", u8"EXPLAIN", u8"EXPRESSION", - u8"EXTRACT", u8"FETCHES", u8"FINAL", u8"FIRST", u8"FLUSH", u8"FOR", u8"FORMAT", - u8"FREEZE", u8"FROM", u8"FULL", u8"FUNCTION", u8"GLOBAL", u8"GRANULARITY", - u8"GROUP", u8"HAVING", u8"HIERARCHICAL", u8"HOUR", u8"ID", u8"IF", u8"ILIKE", - u8"IN", u8"INDEX", u8"INF", u8"INJECTIVE", u8"INNER", u8"INSERT", u8"INTERVAL", - u8"INTO", u8"IS", u8"IS_OBJECT_ID", u8"JOIN", u8"KEY", u8"KILL", u8"LAST", - u8"LAYOUT", u8"LEADING", u8"LEFT", u8"LIFETIME", u8"LIKE", u8"LIMIT", - u8"LIVE", u8"LOCAL", u8"LOGS", u8"MATERIALIZE", u8"MATERIALIZED", u8"MAX", - u8"MERGES", u8"MIN", u8"MINUTE", u8"MODIFY", u8"MONTH", u8"MOVE", u8"MUTATION", - u8"NAN_SQL", u8"NO", u8"NOT", u8"NULL_SQL", u8"NULLS", u8"OFFSET", u8"ON", - u8"OPTIMIZE", u8"OR", u8"ORDER", u8"OUTER", u8"OUTFILE", u8"PARTITION", - u8"POPULATE", u8"PREWHERE", u8"PRIMARY", u8"PROJECTION", u8"QUARTER", - u8"RANGE", u8"RELOAD", u8"REMOVE", u8"RENAME", u8"REPLACE", u8"REPLICA", - u8"REPLICATED", u8"RIGHT", u8"ROLLUP", u8"SAMPLE", u8"SECOND", u8"SELECT", - u8"SEMI", u8"SENDS", u8"SET", u8"SETTINGS", u8"SHOW", u8"SOURCE", u8"START", - u8"STOP", u8"SUBSTRING", u8"SYNC", u8"SYNTAX", u8"SYSTEM", u8"TABLE", - u8"TABLES", u8"TEMPORARY", u8"TEST", u8"THEN", u8"TIES", u8"TIMEOUT", - u8"TIMESTAMP", u8"TO", u8"TOP", u8"TOTALS", u8"TRAILING", u8"TRIM", u8"TRUNCATE", - u8"TTL", u8"TYPE", u8"UNION", u8"UPDATE", u8"USE", u8"USING", u8"UUID", - u8"VALUES", u8"VIEW", u8"VOLUME", u8"WATCH", u8"WEEK", u8"WHEN", u8"WHERE", - u8"WITH", u8"YEAR", u8"JSON_FALSE", u8"JSON_TRUE", u8"IDENTIFIER", u8"FLOATING_LITERAL", - u8"OCTAL_LITERAL", u8"DECIMAL_LITERAL", u8"HEXADECIMAL_LITERAL", u8"STRING_LITERAL", - u8"ARROW", u8"ASTERISK", u8"BACKQUOTE", u8"BACKSLASH", u8"COLON", u8"COMMA", - u8"CONCAT", u8"DASH", u8"DOT", u8"EQ_DOUBLE", u8"EQ_SINGLE", u8"GE", u8"GT", - u8"LBRACE", u8"LBRACKET", u8"LE", u8"LPAREN", u8"LT", u8"NOT_EQ", u8"PERCENT", - u8"PLUS", u8"QUERY", u8"QUOTE_DOUBLE", u8"QUOTE_SINGLE", u8"RBRACE", u8"RBRACKET", - u8"RPAREN", u8"SEMICOLON", u8"SLASH", u8"UNDERSCORE", u8"MULTI_LINE_COMMENT", - u8"SINGLE_LINE_COMMENT", u8"WHITESPACE" -}; - -dfa::Vocabulary ClickHouseLexer::_vocabulary(_literalNames, _symbolicNames); - -std::vector ClickHouseLexer::_tokenNames; - -ClickHouseLexer::Initializer::Initializer() { - // This code could be in a static initializer lambda, but VS doesn't allow access to private class members from there. - for (size_t i = 0; i < _symbolicNames.size(); ++i) { - std::string name = _vocabulary.getLiteralName(i); - if (name.empty()) { - name = _vocabulary.getSymbolicName(i); - } - - if (name.empty()) { - _tokenNames.push_back(""); - } else { - _tokenNames.push_back(name); - } - } - - _serializedATN = { - 0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964, - 0x2, 0xe0, 0x803, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3, 0x9, 0x3, - 0x4, 0x4, 0x9, 0x4, 0x4, 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, - 0x9, 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x4, 0xa, 0x9, 0xa, - 0x4, 0xb, 0x9, 0xb, 0x4, 0xc, 0x9, 0xc, 0x4, 0xd, 0x9, 0xd, 0x4, 0xe, - 0x9, 0xe, 0x4, 0xf, 0x9, 0xf, 0x4, 0x10, 0x9, 0x10, 0x4, 0x11, 0x9, - 0x11, 0x4, 0x12, 0x9, 0x12, 0x4, 0x13, 0x9, 0x13, 0x4, 0x14, 0x9, 0x14, - 0x4, 0x15, 0x9, 0x15, 0x4, 0x16, 0x9, 0x16, 0x4, 0x17, 0x9, 0x17, 0x4, - 0x18, 0x9, 0x18, 0x4, 0x19, 0x9, 0x19, 0x4, 0x1a, 0x9, 0x1a, 0x4, 0x1b, - 0x9, 0x1b, 0x4, 0x1c, 0x9, 0x1c, 0x4, 0x1d, 0x9, 0x1d, 0x4, 0x1e, 0x9, - 0x1e, 0x4, 0x1f, 0x9, 0x1f, 0x4, 0x20, 0x9, 0x20, 0x4, 0x21, 0x9, 0x21, - 0x4, 0x22, 0x9, 0x22, 0x4, 0x23, 0x9, 0x23, 0x4, 0x24, 0x9, 0x24, 0x4, - 0x25, 0x9, 0x25, 0x4, 0x26, 0x9, 0x26, 0x4, 0x27, 0x9, 0x27, 0x4, 0x28, - 0x9, 0x28, 0x4, 0x29, 0x9, 0x29, 0x4, 0x2a, 0x9, 0x2a, 0x4, 0x2b, 0x9, - 0x2b, 0x4, 0x2c, 0x9, 0x2c, 0x4, 0x2d, 0x9, 0x2d, 0x4, 0x2e, 0x9, 0x2e, - 0x4, 0x2f, 0x9, 0x2f, 0x4, 0x30, 0x9, 0x30, 0x4, 0x31, 0x9, 0x31, 0x4, - 0x32, 0x9, 0x32, 0x4, 0x33, 0x9, 0x33, 0x4, 0x34, 0x9, 0x34, 0x4, 0x35, - 0x9, 0x35, 0x4, 0x36, 0x9, 0x36, 0x4, 0x37, 0x9, 0x37, 0x4, 0x38, 0x9, - 0x38, 0x4, 0x39, 0x9, 0x39, 0x4, 0x3a, 0x9, 0x3a, 0x4, 0x3b, 0x9, 0x3b, - 0x4, 0x3c, 0x9, 0x3c, 0x4, 0x3d, 0x9, 0x3d, 0x4, 0x3e, 0x9, 0x3e, 0x4, - 0x3f, 0x9, 0x3f, 0x4, 0x40, 0x9, 0x40, 0x4, 0x41, 0x9, 0x41, 0x4, 0x42, - 0x9, 0x42, 0x4, 0x43, 0x9, 0x43, 0x4, 0x44, 0x9, 0x44, 0x4, 0x45, 0x9, - 0x45, 0x4, 0x46, 0x9, 0x46, 0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9, 0x48, - 0x4, 0x49, 0x9, 0x49, 0x4, 0x4a, 0x9, 0x4a, 0x4, 0x4b, 0x9, 0x4b, 0x4, - 0x4c, 0x9, 0x4c, 0x4, 0x4d, 0x9, 0x4d, 0x4, 0x4e, 0x9, 0x4e, 0x4, 0x4f, - 0x9, 0x4f, 0x4, 0x50, 0x9, 0x50, 0x4, 0x51, 0x9, 0x51, 0x4, 0x52, 0x9, - 0x52, 0x4, 0x53, 0x9, 0x53, 0x4, 0x54, 0x9, 0x54, 0x4, 0x55, 0x9, 0x55, - 0x4, 0x56, 0x9, 0x56, 0x4, 0x57, 0x9, 0x57, 0x4, 0x58, 0x9, 0x58, 0x4, - 0x59, 0x9, 0x59, 0x4, 0x5a, 0x9, 0x5a, 0x4, 0x5b, 0x9, 0x5b, 0x4, 0x5c, - 0x9, 0x5c, 0x4, 0x5d, 0x9, 0x5d, 0x4, 0x5e, 0x9, 0x5e, 0x4, 0x5f, 0x9, - 0x5f, 0x4, 0x60, 0x9, 0x60, 0x4, 0x61, 0x9, 0x61, 0x4, 0x62, 0x9, 0x62, - 0x4, 0x63, 0x9, 0x63, 0x4, 0x64, 0x9, 0x64, 0x4, 0x65, 0x9, 0x65, 0x4, - 0x66, 0x9, 0x66, 0x4, 0x67, 0x9, 0x67, 0x4, 0x68, 0x9, 0x68, 0x4, 0x69, - 0x9, 0x69, 0x4, 0x6a, 0x9, 0x6a, 0x4, 0x6b, 0x9, 0x6b, 0x4, 0x6c, 0x9, - 0x6c, 0x4, 0x6d, 0x9, 0x6d, 0x4, 0x6e, 0x9, 0x6e, 0x4, 0x6f, 0x9, 0x6f, - 0x4, 0x70, 0x9, 0x70, 0x4, 0x71, 0x9, 0x71, 0x4, 0x72, 0x9, 0x72, 0x4, - 0x73, 0x9, 0x73, 0x4, 0x74, 0x9, 0x74, 0x4, 0x75, 0x9, 0x75, 0x4, 0x76, - 0x9, 0x76, 0x4, 0x77, 0x9, 0x77, 0x4, 0x78, 0x9, 0x78, 0x4, 0x79, 0x9, - 0x79, 0x4, 0x7a, 0x9, 0x7a, 0x4, 0x7b, 0x9, 0x7b, 0x4, 0x7c, 0x9, 0x7c, - 0x4, 0x7d, 0x9, 0x7d, 0x4, 0x7e, 0x9, 0x7e, 0x4, 0x7f, 0x9, 0x7f, 0x4, - 0x80, 0x9, 0x80, 0x4, 0x81, 0x9, 0x81, 0x4, 0x82, 0x9, 0x82, 0x4, 0x83, - 0x9, 0x83, 0x4, 0x84, 0x9, 0x84, 0x4, 0x85, 0x9, 0x85, 0x4, 0x86, 0x9, - 0x86, 0x4, 0x87, 0x9, 0x87, 0x4, 0x88, 0x9, 0x88, 0x4, 0x89, 0x9, 0x89, - 0x4, 0x8a, 0x9, 0x8a, 0x4, 0x8b, 0x9, 0x8b, 0x4, 0x8c, 0x9, 0x8c, 0x4, - 0x8d, 0x9, 0x8d, 0x4, 0x8e, 0x9, 0x8e, 0x4, 0x8f, 0x9, 0x8f, 0x4, 0x90, - 0x9, 0x90, 0x4, 0x91, 0x9, 0x91, 0x4, 0x92, 0x9, 0x92, 0x4, 0x93, 0x9, - 0x93, 0x4, 0x94, 0x9, 0x94, 0x4, 0x95, 0x9, 0x95, 0x4, 0x96, 0x9, 0x96, - 0x4, 0x97, 0x9, 0x97, 0x4, 0x98, 0x9, 0x98, 0x4, 0x99, 0x9, 0x99, 0x4, - 0x9a, 0x9, 0x9a, 0x4, 0x9b, 0x9, 0x9b, 0x4, 0x9c, 0x9, 0x9c, 0x4, 0x9d, - 0x9, 0x9d, 0x4, 0x9e, 0x9, 0x9e, 0x4, 0x9f, 0x9, 0x9f, 0x4, 0xa0, 0x9, - 0xa0, 0x4, 0xa1, 0x9, 0xa1, 0x4, 0xa2, 0x9, 0xa2, 0x4, 0xa3, 0x9, 0xa3, - 0x4, 0xa4, 0x9, 0xa4, 0x4, 0xa5, 0x9, 0xa5, 0x4, 0xa6, 0x9, 0xa6, 0x4, - 0xa7, 0x9, 0xa7, 0x4, 0xa8, 0x9, 0xa8, 0x4, 0xa9, 0x9, 0xa9, 0x4, 0xaa, - 0x9, 0xaa, 0x4, 0xab, 0x9, 0xab, 0x4, 0xac, 0x9, 0xac, 0x4, 0xad, 0x9, - 0xad, 0x4, 0xae, 0x9, 0xae, 0x4, 0xaf, 0x9, 0xaf, 0x4, 0xb0, 0x9, 0xb0, - 0x4, 0xb1, 0x9, 0xb1, 0x4, 0xb2, 0x9, 0xb2, 0x4, 0xb3, 0x9, 0xb3, 0x4, - 0xb4, 0x9, 0xb4, 0x4, 0xb5, 0x9, 0xb5, 0x4, 0xb6, 0x9, 0xb6, 0x4, 0xb7, - 0x9, 0xb7, 0x4, 0xb8, 0x9, 0xb8, 0x4, 0xb9, 0x9, 0xb9, 0x4, 0xba, 0x9, - 0xba, 0x4, 0xbb, 0x9, 0xbb, 0x4, 0xbc, 0x9, 0xbc, 0x4, 0xbd, 0x9, 0xbd, - 0x4, 0xbe, 0x9, 0xbe, 0x4, 0xbf, 0x9, 0xbf, 0x4, 0xc0, 0x9, 0xc0, 0x4, - 0xc1, 0x9, 0xc1, 0x4, 0xc2, 0x9, 0xc2, 0x4, 0xc3, 0x9, 0xc3, 0x4, 0xc4, - 0x9, 0xc4, 0x4, 0xc5, 0x9, 0xc5, 0x4, 0xc6, 0x9, 0xc6, 0x4, 0xc7, 0x9, - 0xc7, 0x4, 0xc8, 0x9, 0xc8, 0x4, 0xc9, 0x9, 0xc9, 0x4, 0xca, 0x9, 0xca, - 0x4, 0xcb, 0x9, 0xcb, 0x4, 0xcc, 0x9, 0xcc, 0x4, 0xcd, 0x9, 0xcd, 0x4, - 0xce, 0x9, 0xce, 0x4, 0xcf, 0x9, 0xcf, 0x4, 0xd0, 0x9, 0xd0, 0x4, 0xd1, - 0x9, 0xd1, 0x4, 0xd2, 0x9, 0xd2, 0x4, 0xd3, 0x9, 0xd3, 0x4, 0xd4, 0x9, - 0xd4, 0x4, 0xd5, 0x9, 0xd5, 0x4, 0xd6, 0x9, 0xd6, 0x4, 0xd7, 0x9, 0xd7, - 0x4, 0xd8, 0x9, 0xd8, 0x4, 0xd9, 0x9, 0xd9, 0x4, 0xda, 0x9, 0xda, 0x4, - 0xdb, 0x9, 0xdb, 0x4, 0xdc, 0x9, 0xdc, 0x4, 0xdd, 0x9, 0xdd, 0x4, 0xde, - 0x9, 0xde, 0x4, 0xdf, 0x9, 0xdf, 0x4, 0xe0, 0x9, 0xe0, 0x4, 0xe1, 0x9, - 0xe1, 0x4, 0xe2, 0x9, 0xe2, 0x4, 0xe3, 0x9, 0xe3, 0x4, 0xe4, 0x9, 0xe4, - 0x4, 0xe5, 0x9, 0xe5, 0x4, 0xe6, 0x9, 0xe6, 0x4, 0xe7, 0x9, 0xe7, 0x4, - 0xe8, 0x9, 0xe8, 0x4, 0xe9, 0x9, 0xe9, 0x4, 0xea, 0x9, 0xea, 0x4, 0xeb, - 0x9, 0xeb, 0x4, 0xec, 0x9, 0xec, 0x4, 0xed, 0x9, 0xed, 0x4, 0xee, 0x9, - 0xee, 0x4, 0xef, 0x9, 0xef, 0x4, 0xf0, 0x9, 0xf0, 0x4, 0xf1, 0x9, 0xf1, - 0x4, 0xf2, 0x9, 0xf2, 0x4, 0xf3, 0x9, 0xf3, 0x4, 0xf4, 0x9, 0xf4, 0x4, - 0xf5, 0x9, 0xf5, 0x4, 0xf6, 0x9, 0xf6, 0x4, 0xf7, 0x9, 0xf7, 0x4, 0xf8, - 0x9, 0xf8, 0x4, 0xf9, 0x9, 0xf9, 0x4, 0xfa, 0x9, 0xfa, 0x4, 0xfb, 0x9, - 0xfb, 0x4, 0xfc, 0x9, 0xfc, 0x4, 0xfd, 0x9, 0xfd, 0x3, 0x2, 0x3, 0x2, - 0x3, 0x2, 0x3, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, - 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x6, 0x3, 0x6, 0x3, 0x6, - 0x3, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, - 0x3, 0x8, 0x3, 0x8, 0x3, 0x8, 0x3, 0x8, 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, - 0x3, 0x9, 0x3, 0x9, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, - 0x3, 0xa, 0x3, 0xb, 0x3, 0xb, 0x3, 0xb, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x23a, 0xa, 0xc, 0x3, - 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, 0xe, 0x3, 0xe, 0x3, - 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, - 0xf, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, - 0x3, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x3, - 0x11, 0x3, 0x11, 0x3, 0x11, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, - 0x3, 0x12, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x14, 0x3, 0x14, 0x3, - 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, - 0x3, 0x15, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, - 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, - 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, - 0x18, 0x3, 0x18, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, - 0x3, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, - 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, - 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, - 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1d, 0x3, 0x1d, - 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, - 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, - 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, - 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, - 0x3, 0x20, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, - 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, - 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, - 0x22, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x24, - 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, - 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, - 0x3, 0x25, 0x3, 0x25, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, - 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, - 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, - 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, - 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, - 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, - 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, - 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, - 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, - 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, - 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, - 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, - 0x3, 0x2e, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, - 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, - 0x3, 0x30, 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, - 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, - 0x3, 0x31, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, - 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x34, 0x3, 0x34, - 0x3, 0x34, 0x3, 0x34, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, - 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, - 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x37, 0x3, 0x37, 0x3, 0x37, 0x3, - 0x37, 0x3, 0x37, 0x3, 0x37, 0x3, 0x37, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, - 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x39, 0x3, - 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, - 0x3, 0x39, 0x3, 0x39, 0x3, 0x39, 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3a, 0x3, - 0x3a, 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, - 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, - 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3d, - 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3e, 0x3, - 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3f, 0x3, 0x3f, - 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, - 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, - 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, - 0x42, 0x3, 0x42, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, - 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, - 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, - 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, - 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, - 0x3, 0x46, 0x3, 0x46, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, - 0x47, 0x3, 0x47, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, - 0x3, 0x48, 0x3, 0x48, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, - 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, - 0x3, 0x49, 0x3, 0x49, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, - 0x4a, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4c, 0x3, 0x4c, 0x3, 0x4c, - 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x3, - 0x4e, 0x3, 0x4e, 0x3, 0x4e, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, - 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, - 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, 0x3, 0x50, - 0x3, 0x50, 0x3, 0x50, 0x5, 0x50, 0x41a, 0xa, 0x50, 0x3, 0x51, 0x3, 0x51, - 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, - 0x51, 0x3, 0x51, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, - 0x3, 0x52, 0x3, 0x53, 0x3, 0x53, 0x3, 0x53, 0x3, 0x53, 0x3, 0x53, 0x3, - 0x53, 0x3, 0x53, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, - 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x55, 0x3, 0x55, 0x3, - 0x55, 0x3, 0x55, 0x3, 0x55, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x57, - 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, - 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, 0x3, 0x58, - 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x5a, 0x3, 0x5a, 0x3, 0x5a, 0x3, 0x5a, 0x3, 0x5a, - 0x3, 0x5b, 0x3, 0x5b, 0x3, 0x5b, 0x3, 0x5b, 0x3, 0x5b, 0x3, 0x5c, 0x3, - 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5d, - 0x3, 0x5d, 0x3, 0x5d, 0x3, 0x5d, 0x3, 0x5d, 0x3, 0x5d, 0x3, 0x5d, 0x3, - 0x5d, 0x3, 0x5e, 0x3, 0x5e, 0x3, 0x5e, 0x3, 0x5e, 0x3, 0x5e, 0x3, 0x5f, - 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, - 0x5f, 0x3, 0x5f, 0x3, 0x60, 0x3, 0x60, 0x3, 0x60, 0x3, 0x60, 0x3, 0x60, - 0x3, 0x61, 0x3, 0x61, 0x3, 0x61, 0x3, 0x61, 0x3, 0x61, 0x3, 0x61, 0x3, - 0x62, 0x3, 0x62, 0x3, 0x62, 0x3, 0x62, 0x3, 0x62, 0x3, 0x63, 0x3, 0x63, - 0x3, 0x63, 0x3, 0x63, 0x3, 0x63, 0x3, 0x63, 0x3, 0x64, 0x3, 0x64, 0x3, - 0x64, 0x3, 0x64, 0x3, 0x64, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, - 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, - 0x65, 0x3, 0x65, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, - 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, - 0x66, 0x3, 0x66, 0x3, 0x67, 0x3, 0x67, 0x3, 0x67, 0x3, 0x67, 0x3, 0x68, - 0x3, 0x68, 0x3, 0x68, 0x3, 0x68, 0x3, 0x68, 0x3, 0x68, 0x3, 0x68, 0x3, - 0x69, 0x3, 0x69, 0x3, 0x69, 0x3, 0x69, 0x3, 0x6a, 0x3, 0x6a, 0x3, 0x6a, - 0x3, 0x6a, 0x3, 0x6a, 0x3, 0x6a, 0x3, 0x6a, 0x3, 0x6b, 0x3, 0x6b, 0x3, - 0x6b, 0x3, 0x6b, 0x3, 0x6b, 0x3, 0x6b, 0x3, 0x6b, 0x3, 0x6c, 0x3, 0x6c, - 0x3, 0x6c, 0x3, 0x6c, 0x3, 0x6c, 0x3, 0x6c, 0x3, 0x6d, 0x3, 0x6d, 0x3, - 0x6d, 0x3, 0x6d, 0x3, 0x6d, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, - 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6f, 0x3, - 0x6f, 0x3, 0x6f, 0x3, 0x6f, 0x3, 0x70, 0x3, 0x70, 0x3, 0x70, 0x3, 0x71, - 0x3, 0x71, 0x3, 0x71, 0x3, 0x71, 0x3, 0x72, 0x3, 0x72, 0x3, 0x72, 0x3, - 0x72, 0x3, 0x72, 0x3, 0x73, 0x3, 0x73, 0x3, 0x73, 0x3, 0x73, 0x3, 0x73, - 0x3, 0x73, 0x3, 0x74, 0x3, 0x74, 0x3, 0x74, 0x3, 0x74, 0x3, 0x74, 0x3, - 0x74, 0x3, 0x74, 0x3, 0x75, 0x3, 0x75, 0x3, 0x75, 0x3, 0x76, 0x3, 0x76, - 0x3, 0x76, 0x3, 0x76, 0x3, 0x76, 0x3, 0x76, 0x3, 0x76, 0x3, 0x76, 0x3, - 0x76, 0x3, 0x77, 0x3, 0x77, 0x3, 0x77, 0x3, 0x78, 0x3, 0x78, 0x3, 0x78, - 0x3, 0x78, 0x3, 0x78, 0x3, 0x78, 0x3, 0x79, 0x3, 0x79, 0x3, 0x79, 0x3, - 0x79, 0x3, 0x79, 0x3, 0x79, 0x3, 0x7a, 0x3, 0x7a, 0x3, 0x7a, 0x3, 0x7a, - 0x3, 0x7a, 0x3, 0x7a, 0x3, 0x7a, 0x3, 0x7a, 0x3, 0x7b, 0x3, 0x7b, 0x3, - 0x7b, 0x3, 0x7b, 0x3, 0x7b, 0x3, 0x7b, 0x3, 0x7b, 0x3, 0x7b, 0x3, 0x7b, - 0x3, 0x7b, 0x3, 0x7c, 0x3, 0x7c, 0x3, 0x7c, 0x3, 0x7c, 0x3, 0x7c, 0x3, - 0x7c, 0x3, 0x7c, 0x3, 0x7c, 0x3, 0x7c, 0x3, 0x7d, 0x3, 0x7d, 0x3, 0x7d, - 0x3, 0x7d, 0x3, 0x7d, 0x3, 0x7d, 0x3, 0x7d, 0x3, 0x7d, 0x3, 0x7d, 0x3, - 0x7e, 0x3, 0x7e, 0x3, 0x7e, 0x3, 0x7e, 0x3, 0x7e, 0x3, 0x7e, 0x3, 0x7e, - 0x3, 0x7e, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, - 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x7f, 0x3, 0x80, - 0x3, 0x80, 0x3, 0x80, 0x3, 0x80, 0x3, 0x80, 0x3, 0x80, 0x3, 0x80, 0x3, - 0x80, 0x3, 0x81, 0x3, 0x81, 0x3, 0x81, 0x3, 0x81, 0x3, 0x81, 0x3, 0x81, - 0x3, 0x82, 0x3, 0x82, 0x3, 0x82, 0x3, 0x82, 0x3, 0x82, 0x3, 0x82, 0x3, - 0x82, 0x3, 0x83, 0x3, 0x83, 0x3, 0x83, 0x3, 0x83, 0x3, 0x83, 0x3, 0x83, - 0x3, 0x83, 0x3, 0x84, 0x3, 0x84, 0x3, 0x84, 0x3, 0x84, 0x3, 0x84, 0x3, - 0x84, 0x3, 0x84, 0x3, 0x85, 0x3, 0x85, 0x3, 0x85, 0x3, 0x85, 0x3, 0x85, - 0x3, 0x85, 0x3, 0x85, 0x3, 0x85, 0x3, 0x86, 0x3, 0x86, 0x3, 0x86, 0x3, - 0x86, 0x3, 0x86, 0x3, 0x86, 0x3, 0x86, 0x3, 0x86, 0x3, 0x87, 0x3, 0x87, - 0x3, 0x87, 0x3, 0x87, 0x3, 0x87, 0x3, 0x87, 0x3, 0x87, 0x3, 0x87, 0x3, - 0x87, 0x3, 0x87, 0x3, 0x87, 0x3, 0x88, 0x3, 0x88, 0x3, 0x88, 0x3, 0x88, - 0x3, 0x88, 0x3, 0x88, 0x3, 0x89, 0x3, 0x89, 0x3, 0x89, 0x3, 0x89, 0x3, - 0x89, 0x3, 0x89, 0x3, 0x89, 0x3, 0x8a, 0x3, 0x8a, 0x3, 0x8a, 0x3, 0x8a, - 0x3, 0x8a, 0x3, 0x8a, 0x3, 0x8a, 0x3, 0x8b, 0x3, 0x8b, 0x3, 0x8b, 0x3, - 0x8b, 0x3, 0x8b, 0x3, 0x8b, 0x3, 0x8b, 0x3, 0x8c, 0x3, 0x8c, 0x3, 0x8c, - 0x3, 0x8c, 0x3, 0x8c, 0x3, 0x8c, 0x3, 0x8c, 0x3, 0x8d, 0x3, 0x8d, 0x3, - 0x8d, 0x3, 0x8d, 0x3, 0x8d, 0x3, 0x8e, 0x3, 0x8e, 0x3, 0x8e, 0x3, 0x8e, - 0x3, 0x8e, 0x3, 0x8e, 0x3, 0x8f, 0x3, 0x8f, 0x3, 0x8f, 0x3, 0x8f, 0x3, - 0x90, 0x3, 0x90, 0x3, 0x90, 0x3, 0x90, 0x3, 0x90, 0x3, 0x90, 0x3, 0x90, - 0x3, 0x90, 0x3, 0x90, 0x3, 0x91, 0x3, 0x91, 0x3, 0x91, 0x3, 0x91, 0x3, - 0x91, 0x3, 0x92, 0x3, 0x92, 0x3, 0x92, 0x3, 0x92, 0x3, 0x92, 0x3, 0x92, - 0x3, 0x92, 0x3, 0x93, 0x3, 0x93, 0x3, 0x93, 0x3, 0x93, 0x3, 0x93, 0x3, - 0x93, 0x3, 0x94, 0x3, 0x94, 0x3, 0x94, 0x3, 0x94, 0x3, 0x94, 0x3, 0x95, - 0x3, 0x95, 0x3, 0x95, 0x3, 0x95, 0x3, 0x95, 0x3, 0x95, 0x3, 0x95, 0x3, - 0x95, 0x3, 0x95, 0x3, 0x95, 0x3, 0x96, 0x3, 0x96, 0x3, 0x96, 0x3, 0x96, - 0x3, 0x96, 0x3, 0x97, 0x3, 0x97, 0x3, 0x97, 0x3, 0x97, 0x3, 0x97, 0x3, - 0x97, 0x3, 0x97, 0x3, 0x98, 0x3, 0x98, 0x3, 0x98, 0x3, 0x98, 0x3, 0x98, - 0x3, 0x98, 0x3, 0x98, 0x3, 0x99, 0x3, 0x99, 0x3, 0x99, 0x3, 0x99, 0x3, - 0x99, 0x3, 0x99, 0x3, 0x9a, 0x3, 0x9a, 0x3, 0x9a, 0x3, 0x9a, 0x3, 0x9a, - 0x3, 0x9a, 0x3, 0x9a, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, - 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9b, 0x3, 0x9c, - 0x3, 0x9c, 0x3, 0x9c, 0x3, 0x9c, 0x3, 0x9c, 0x3, 0x9d, 0x3, 0x9d, 0x3, - 0x9d, 0x3, 0x9d, 0x3, 0x9d, 0x3, 0x9e, 0x3, 0x9e, 0x3, 0x9e, 0x3, 0x9e, - 0x3, 0x9e, 0x3, 0x9f, 0x3, 0x9f, 0x3, 0x9f, 0x3, 0x9f, 0x3, 0x9f, 0x3, - 0x9f, 0x3, 0x9f, 0x3, 0x9f, 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, - 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, 0x3, 0xa0, 0x3, - 0xa1, 0x3, 0xa1, 0x3, 0xa1, 0x3, 0xa2, 0x3, 0xa2, 0x3, 0xa2, 0x3, 0xa2, - 0x3, 0xa3, 0x3, 0xa3, 0x3, 0xa3, 0x3, 0xa3, 0x3, 0xa3, 0x3, 0xa3, 0x3, - 0xa3, 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa4, - 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa4, 0x3, 0xa5, 0x3, 0xa5, 0x3, 0xa5, 0x3, - 0xa5, 0x3, 0xa5, 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa6, - 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa6, 0x3, 0xa7, 0x3, 0xa7, 0x3, - 0xa7, 0x3, 0xa7, 0x3, 0xa8, 0x3, 0xa8, 0x3, 0xa8, 0x3, 0xa8, 0x3, 0xa8, - 0x3, 0xa9, 0x3, 0xa9, 0x3, 0xa9, 0x3, 0xa9, 0x3, 0xa9, 0x3, 0xa9, 0x3, - 0xaa, 0x3, 0xaa, 0x3, 0xaa, 0x3, 0xaa, 0x3, 0xaa, 0x3, 0xaa, 0x3, 0xaa, - 0x3, 0xab, 0x3, 0xab, 0x3, 0xab, 0x3, 0xab, 0x3, 0xac, 0x3, 0xac, 0x3, - 0xac, 0x3, 0xac, 0x3, 0xac, 0x3, 0xac, 0x3, 0xad, 0x3, 0xad, 0x3, 0xad, - 0x3, 0xad, 0x3, 0xad, 0x3, 0xae, 0x3, 0xae, 0x3, 0xae, 0x3, 0xae, 0x3, - 0xae, 0x3, 0xae, 0x3, 0xae, 0x3, 0xaf, 0x3, 0xaf, 0x3, 0xaf, 0x3, 0xaf, - 0x3, 0xaf, 0x3, 0xb0, 0x3, 0xb0, 0x3, 0xb0, 0x3, 0xb0, 0x3, 0xb0, 0x3, - 0xb0, 0x3, 0xb0, 0x3, 0xb1, 0x3, 0xb1, 0x3, 0xb1, 0x3, 0xb1, 0x3, 0xb1, - 0x3, 0xb1, 0x3, 0xb2, 0x3, 0xb2, 0x3, 0xb2, 0x3, 0xb2, 0x3, 0xb2, 0x3, - 0xb3, 0x3, 0xb3, 0x3, 0xb3, 0x3, 0xb3, 0x3, 0xb3, 0x3, 0xb4, 0x3, 0xb4, - 0x3, 0xb4, 0x3, 0xb4, 0x3, 0xb4, 0x3, 0xb4, 0x3, 0xb5, 0x3, 0xb5, 0x3, - 0xb5, 0x3, 0xb5, 0x3, 0xb5, 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, - 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, 0x3, 0xb6, 0x5, - 0xb6, 0x6bb, 0xa, 0xb6, 0x3, 0xb7, 0x3, 0xb7, 0x3, 0xb7, 0x3, 0xb7, - 0x3, 0xb7, 0x3, 0xb7, 0x3, 0xb8, 0x3, 0xb8, 0x3, 0xb8, 0x3, 0xb8, 0x3, - 0xb8, 0x3, 0xb9, 0x3, 0xb9, 0x5, 0xb9, 0x6ca, 0xa, 0xb9, 0x3, 0xb9, - 0x3, 0xb9, 0x3, 0xb9, 0x7, 0xb9, 0x6cf, 0xa, 0xb9, 0xc, 0xb9, 0xe, 0xb9, - 0x6d2, 0xb, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, - 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x7, 0xb9, 0x6dc, 0xa, 0xb9, 0xc, 0xb9, - 0xe, 0xb9, 0x6df, 0xb, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, - 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x3, 0xb9, 0x7, - 0xb9, 0x6eb, 0xa, 0xb9, 0xc, 0xb9, 0xe, 0xb9, 0x6ee, 0xb, 0xb9, 0x3, - 0xb9, 0x3, 0xb9, 0x5, 0xb9, 0x6f2, 0xa, 0xb9, 0x3, 0xba, 0x3, 0xba, - 0x3, 0xba, 0x7, 0xba, 0x6f7, 0xa, 0xba, 0xc, 0xba, 0xe, 0xba, 0x6fa, - 0xb, 0xba, 0x3, 0xba, 0x3, 0xba, 0x5, 0xba, 0x6fe, 0xa, 0xba, 0x3, 0xba, - 0x3, 0xba, 0x5, 0xba, 0x702, 0xa, 0xba, 0x3, 0xba, 0x6, 0xba, 0x705, - 0xa, 0xba, 0xd, 0xba, 0xe, 0xba, 0x706, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, - 0x5, 0xba, 0x70c, 0xa, 0xba, 0x3, 0xba, 0x3, 0xba, 0x5, 0xba, 0x710, - 0xa, 0xba, 0x3, 0xba, 0x6, 0xba, 0x713, 0xa, 0xba, 0xd, 0xba, 0xe, 0xba, - 0x714, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, 0x7, 0xba, 0x71a, 0xa, 0xba, - 0xc, 0xba, 0xe, 0xba, 0x71d, 0xb, 0xba, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, - 0x5, 0xba, 0x722, 0xa, 0xba, 0x3, 0xba, 0x6, 0xba, 0x725, 0xa, 0xba, - 0xd, 0xba, 0xe, 0xba, 0x726, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, - 0x3, 0xba, 0x5, 0xba, 0x72e, 0xa, 0xba, 0x3, 0xba, 0x6, 0xba, 0x731, - 0xa, 0xba, 0xd, 0xba, 0xe, 0xba, 0x732, 0x3, 0xba, 0x3, 0xba, 0x3, 0xba, - 0x3, 0xba, 0x5, 0xba, 0x739, 0xa, 0xba, 0x3, 0xba, 0x6, 0xba, 0x73c, - 0xa, 0xba, 0xd, 0xba, 0xe, 0xba, 0x73d, 0x5, 0xba, 0x740, 0xa, 0xba, - 0x3, 0xbb, 0x3, 0xbb, 0x6, 0xbb, 0x744, 0xa, 0xbb, 0xd, 0xbb, 0xe, 0xbb, - 0x745, 0x3, 0xbc, 0x6, 0xbc, 0x749, 0xa, 0xbc, 0xd, 0xbc, 0xe, 0xbc, - 0x74a, 0x3, 0xbd, 0x3, 0xbd, 0x3, 0xbd, 0x6, 0xbd, 0x750, 0xa, 0xbd, - 0xd, 0xbd, 0xe, 0xbd, 0x751, 0x3, 0xbe, 0x3, 0xbe, 0x3, 0xbe, 0x3, 0xbe, - 0x3, 0xbe, 0x3, 0xbe, 0x3, 0xbe, 0x3, 0xbe, 0x7, 0xbe, 0x75c, 0xa, 0xbe, - 0xc, 0xbe, 0xe, 0xbe, 0x75f, 0xb, 0xbe, 0x3, 0xbe, 0x3, 0xbe, 0x3, 0xbf, - 0x3, 0xbf, 0x3, 0xc0, 0x3, 0xc0, 0x3, 0xc1, 0x3, 0xc1, 0x3, 0xc2, 0x3, - 0xc2, 0x3, 0xc3, 0x3, 0xc3, 0x3, 0xc4, 0x3, 0xc4, 0x3, 0xc5, 0x3, 0xc5, - 0x3, 0xc6, 0x3, 0xc6, 0x3, 0xc7, 0x3, 0xc7, 0x3, 0xc8, 0x3, 0xc8, 0x3, - 0xc9, 0x3, 0xc9, 0x3, 0xca, 0x3, 0xca, 0x3, 0xcb, 0x3, 0xcb, 0x3, 0xcc, - 0x3, 0xcc, 0x3, 0xcd, 0x3, 0xcd, 0x3, 0xce, 0x3, 0xce, 0x3, 0xcf, 0x3, - 0xcf, 0x3, 0xd0, 0x3, 0xd0, 0x3, 0xd1, 0x3, 0xd1, 0x3, 0xd2, 0x3, 0xd2, - 0x3, 0xd3, 0x3, 0xd3, 0x3, 0xd4, 0x3, 0xd4, 0x3, 0xd5, 0x3, 0xd5, 0x3, - 0xd6, 0x3, 0xd6, 0x3, 0xd7, 0x3, 0xd7, 0x3, 0xd8, 0x3, 0xd8, 0x3, 0xd9, - 0x3, 0xd9, 0x3, 0xda, 0x3, 0xda, 0x3, 0xdb, 0x3, 0xdb, 0x3, 0xdc, 0x3, - 0xdc, 0x3, 0xdd, 0x3, 0xdd, 0x3, 0xdd, 0x3, 0xde, 0x3, 0xde, 0x3, 0xdf, - 0x3, 0xdf, 0x3, 0xe0, 0x3, 0xe0, 0x3, 0xe1, 0x3, 0xe1, 0x3, 0xe2, 0x3, - 0xe2, 0x3, 0xe3, 0x3, 0xe3, 0x3, 0xe3, 0x3, 0xe4, 0x3, 0xe4, 0x3, 0xe5, - 0x3, 0xe5, 0x3, 0xe6, 0x3, 0xe6, 0x3, 0xe6, 0x3, 0xe7, 0x3, 0xe7, 0x3, - 0xe8, 0x3, 0xe8, 0x3, 0xe8, 0x3, 0xe9, 0x3, 0xe9, 0x3, 0xea, 0x3, 0xea, - 0x3, 0xeb, 0x3, 0xeb, 0x3, 0xec, 0x3, 0xec, 0x3, 0xec, 0x3, 0xed, 0x3, - 0xed, 0x3, 0xee, 0x3, 0xee, 0x3, 0xef, 0x3, 0xef, 0x3, 0xef, 0x3, 0xef, - 0x5, 0xef, 0x7cc, 0xa, 0xef, 0x3, 0xf0, 0x3, 0xf0, 0x3, 0xf1, 0x3, 0xf1, - 0x3, 0xf2, 0x3, 0xf2, 0x3, 0xf3, 0x3, 0xf3, 0x3, 0xf4, 0x3, 0xf4, 0x3, - 0xf5, 0x3, 0xf5, 0x3, 0xf6, 0x3, 0xf6, 0x3, 0xf7, 0x3, 0xf7, 0x3, 0xf8, - 0x3, 0xf8, 0x3, 0xf9, 0x3, 0xf9, 0x3, 0xfa, 0x3, 0xfa, 0x3, 0xfb, 0x3, - 0xfb, 0x3, 0xfb, 0x3, 0xfb, 0x7, 0xfb, 0x7e8, 0xa, 0xfb, 0xc, 0xfb, - 0xe, 0xfb, 0x7eb, 0xb, 0xfb, 0x3, 0xfb, 0x3, 0xfb, 0x3, 0xfb, 0x3, 0xfb, - 0x3, 0xfb, 0x3, 0xfc, 0x3, 0xfc, 0x3, 0xfc, 0x3, 0xfc, 0x7, 0xfc, 0x7f6, - 0xa, 0xfc, 0xc, 0xfc, 0xe, 0xfc, 0x7f9, 0xb, 0xfc, 0x3, 0xfc, 0x5, 0xfc, - 0x7fc, 0xa, 0xfc, 0x3, 0xfc, 0x3, 0xfc, 0x3, 0xfd, 0x3, 0xfd, 0x3, 0xfd, - 0x3, 0xfd, 0x3, 0x7e9, 0x2, 0xfe, 0x3, 0x3, 0x5, 0x4, 0x7, 0x5, 0x9, - 0x6, 0xb, 0x7, 0xd, 0x8, 0xf, 0x9, 0x11, 0xa, 0x13, 0xb, 0x15, 0xc, - 0x17, 0xd, 0x19, 0xe, 0x1b, 0xf, 0x1d, 0x10, 0x1f, 0x11, 0x21, 0x12, - 0x23, 0x13, 0x25, 0x14, 0x27, 0x15, 0x29, 0x16, 0x2b, 0x17, 0x2d, 0x18, - 0x2f, 0x19, 0x31, 0x1a, 0x33, 0x1b, 0x35, 0x1c, 0x37, 0x1d, 0x39, 0x1e, - 0x3b, 0x1f, 0x3d, 0x20, 0x3f, 0x21, 0x41, 0x22, 0x43, 0x23, 0x45, 0x24, - 0x47, 0x25, 0x49, 0x26, 0x4b, 0x27, 0x4d, 0x28, 0x4f, 0x29, 0x51, 0x2a, - 0x53, 0x2b, 0x55, 0x2c, 0x57, 0x2d, 0x59, 0x2e, 0x5b, 0x2f, 0x5d, 0x30, - 0x5f, 0x31, 0x61, 0x32, 0x63, 0x33, 0x65, 0x34, 0x67, 0x35, 0x69, 0x36, - 0x6b, 0x37, 0x6d, 0x38, 0x6f, 0x39, 0x71, 0x3a, 0x73, 0x3b, 0x75, 0x3c, - 0x77, 0x3d, 0x79, 0x3e, 0x7b, 0x3f, 0x7d, 0x40, 0x7f, 0x41, 0x81, 0x42, - 0x83, 0x43, 0x85, 0x44, 0x87, 0x45, 0x89, 0x46, 0x8b, 0x47, 0x8d, 0x48, - 0x8f, 0x49, 0x91, 0x4a, 0x93, 0x4b, 0x95, 0x4c, 0x97, 0x4d, 0x99, 0x4e, - 0x9b, 0x4f, 0x9d, 0x50, 0x9f, 0x51, 0xa1, 0x52, 0xa3, 0x53, 0xa5, 0x54, - 0xa7, 0x55, 0xa9, 0x56, 0xab, 0x57, 0xad, 0x58, 0xaf, 0x59, 0xb1, 0x5a, - 0xb3, 0x5b, 0xb5, 0x5c, 0xb7, 0x5d, 0xb9, 0x5e, 0xbb, 0x5f, 0xbd, 0x60, - 0xbf, 0x61, 0xc1, 0x62, 0xc3, 0x63, 0xc5, 0x64, 0xc7, 0x65, 0xc9, 0x66, - 0xcb, 0x67, 0xcd, 0x68, 0xcf, 0x69, 0xd1, 0x6a, 0xd3, 0x6b, 0xd5, 0x6c, - 0xd7, 0x6d, 0xd9, 0x6e, 0xdb, 0x6f, 0xdd, 0x70, 0xdf, 0x71, 0xe1, 0x72, - 0xe3, 0x73, 0xe5, 0x74, 0xe7, 0x75, 0xe9, 0x76, 0xeb, 0x77, 0xed, 0x78, - 0xef, 0x79, 0xf1, 0x7a, 0xf3, 0x7b, 0xf5, 0x7c, 0xf7, 0x7d, 0xf9, 0x7e, - 0xfb, 0x7f, 0xfd, 0x80, 0xff, 0x81, 0x101, 0x82, 0x103, 0x83, 0x105, - 0x84, 0x107, 0x85, 0x109, 0x86, 0x10b, 0x87, 0x10d, 0x88, 0x10f, 0x89, - 0x111, 0x8a, 0x113, 0x8b, 0x115, 0x8c, 0x117, 0x8d, 0x119, 0x8e, 0x11b, - 0x8f, 0x11d, 0x90, 0x11f, 0x91, 0x121, 0x92, 0x123, 0x93, 0x125, 0x94, - 0x127, 0x95, 0x129, 0x96, 0x12b, 0x97, 0x12d, 0x98, 0x12f, 0x99, 0x131, - 0x9a, 0x133, 0x9b, 0x135, 0x9c, 0x137, 0x9d, 0x139, 0x9e, 0x13b, 0x9f, - 0x13d, 0xa0, 0x13f, 0xa1, 0x141, 0xa2, 0x143, 0xa3, 0x145, 0xa4, 0x147, - 0xa5, 0x149, 0xa6, 0x14b, 0xa7, 0x14d, 0xa8, 0x14f, 0xa9, 0x151, 0xaa, - 0x153, 0xab, 0x155, 0xac, 0x157, 0xad, 0x159, 0xae, 0x15b, 0xaf, 0x15d, - 0xb0, 0x15f, 0xb1, 0x161, 0xb2, 0x163, 0xb3, 0x165, 0xb4, 0x167, 0xb5, - 0x169, 0xb6, 0x16b, 0xb7, 0x16d, 0xb8, 0x16f, 0xb9, 0x171, 0xba, 0x173, - 0xbb, 0x175, 0xbc, 0x177, 0xbd, 0x179, 0xbe, 0x17b, 0xbf, 0x17d, 0x2, - 0x17f, 0x2, 0x181, 0x2, 0x183, 0x2, 0x185, 0x2, 0x187, 0x2, 0x189, 0x2, - 0x18b, 0x2, 0x18d, 0x2, 0x18f, 0x2, 0x191, 0x2, 0x193, 0x2, 0x195, 0x2, - 0x197, 0x2, 0x199, 0x2, 0x19b, 0x2, 0x19d, 0x2, 0x19f, 0x2, 0x1a1, 0x2, - 0x1a3, 0x2, 0x1a5, 0x2, 0x1a7, 0x2, 0x1a9, 0x2, 0x1ab, 0x2, 0x1ad, 0x2, - 0x1af, 0x2, 0x1b1, 0x2, 0x1b3, 0x2, 0x1b5, 0x2, 0x1b7, 0x2, 0x1b9, 0xc0, - 0x1bb, 0xc1, 0x1bd, 0xc2, 0x1bf, 0xc3, 0x1c1, 0xc4, 0x1c3, 0xc5, 0x1c5, - 0xc6, 0x1c7, 0xc7, 0x1c9, 0xc8, 0x1cb, 0xc9, 0x1cd, 0xca, 0x1cf, 0xcb, - 0x1d1, 0xcc, 0x1d3, 0xcd, 0x1d5, 0xce, 0x1d7, 0xcf, 0x1d9, 0xd0, 0x1db, - 0xd1, 0x1dd, 0xd2, 0x1df, 0xd3, 0x1e1, 0xd4, 0x1e3, 0xd5, 0x1e5, 0xd6, - 0x1e7, 0xd7, 0x1e9, 0xd8, 0x1eb, 0xd9, 0x1ed, 0xda, 0x1ef, 0xdb, 0x1f1, - 0xdc, 0x1f3, 0xdd, 0x1f5, 0xde, 0x1f7, 0xdf, 0x1f9, 0xe0, 0x3, 0x2, - 0x26, 0x4, 0x2, 0x5e, 0x5e, 0x62, 0x62, 0x4, 0x2, 0x24, 0x24, 0x5e, - 0x5e, 0x4, 0x2, 0x29, 0x29, 0x5e, 0x5e, 0x4, 0x2, 0x43, 0x43, 0x63, - 0x63, 0x4, 0x2, 0x44, 0x44, 0x64, 0x64, 0x4, 0x2, 0x45, 0x45, 0x65, - 0x65, 0x4, 0x2, 0x46, 0x46, 0x66, 0x66, 0x4, 0x2, 0x47, 0x47, 0x67, - 0x67, 0x4, 0x2, 0x48, 0x48, 0x68, 0x68, 0x4, 0x2, 0x49, 0x49, 0x69, - 0x69, 0x4, 0x2, 0x4a, 0x4a, 0x6a, 0x6a, 0x4, 0x2, 0x4b, 0x4b, 0x6b, - 0x6b, 0x4, 0x2, 0x4c, 0x4c, 0x6c, 0x6c, 0x4, 0x2, 0x4d, 0x4d, 0x6d, - 0x6d, 0x4, 0x2, 0x4e, 0x4e, 0x6e, 0x6e, 0x4, 0x2, 0x4f, 0x4f, 0x6f, - 0x6f, 0x4, 0x2, 0x50, 0x50, 0x70, 0x70, 0x4, 0x2, 0x51, 0x51, 0x71, - 0x71, 0x4, 0x2, 0x52, 0x52, 0x72, 0x72, 0x4, 0x2, 0x53, 0x53, 0x73, - 0x73, 0x4, 0x2, 0x54, 0x54, 0x74, 0x74, 0x4, 0x2, 0x55, 0x55, 0x75, - 0x75, 0x4, 0x2, 0x56, 0x56, 0x76, 0x76, 0x4, 0x2, 0x57, 0x57, 0x77, - 0x77, 0x4, 0x2, 0x58, 0x58, 0x78, 0x78, 0x4, 0x2, 0x59, 0x59, 0x79, - 0x79, 0x4, 0x2, 0x5a, 0x5a, 0x7a, 0x7a, 0x4, 0x2, 0x5b, 0x5b, 0x7b, - 0x7b, 0x4, 0x2, 0x5c, 0x5c, 0x7c, 0x7c, 0x4, 0x2, 0x43, 0x5c, 0x63, - 0x7c, 0x3, 0x2, 0x32, 0x39, 0x3, 0x2, 0x32, 0x3b, 0x5, 0x2, 0x32, 0x3b, - 0x43, 0x48, 0x63, 0x68, 0x4, 0x2, 0xc, 0xc, 0xf, 0xf, 0x4, 0x3, 0xc, - 0xc, 0xf, 0xf, 0x4, 0x2, 0xb, 0xf, 0x22, 0x22, 0x2, 0x813, 0x2, 0x3, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x5, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xd, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf, 0x3, 0x2, 0x2, 0x2, 0x2, 0x11, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x13, 0x3, 0x2, 0x2, 0x2, 0x2, 0x15, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x17, 0x3, 0x2, 0x2, 0x2, 0x2, 0x19, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x1b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x21, 0x3, 0x2, 0x2, 0x2, 0x2, 0x23, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x25, 0x3, 0x2, 0x2, 0x2, 0x2, 0x27, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x29, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2d, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x2f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x31, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x33, 0x3, 0x2, 0x2, 0x2, 0x2, 0x35, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x37, 0x3, 0x2, 0x2, 0x2, 0x2, 0x39, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3b, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x3d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3f, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x41, 0x3, 0x2, 0x2, 0x2, 0x2, 0x43, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x45, 0x3, 0x2, 0x2, 0x2, 0x2, 0x47, 0x3, 0x2, 0x2, 0x2, 0x2, 0x49, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x4d, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x4f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x51, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x53, 0x3, 0x2, 0x2, 0x2, 0x2, 0x55, 0x3, 0x2, 0x2, 0x2, 0x2, 0x57, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x59, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5b, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x5d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5f, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x61, 0x3, 0x2, 0x2, 0x2, 0x2, 0x63, 0x3, 0x2, 0x2, 0x2, 0x2, 0x65, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x67, 0x3, 0x2, 0x2, 0x2, 0x2, 0x69, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x6b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x6d, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x6f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x71, 0x3, 0x2, 0x2, 0x2, 0x2, 0x73, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x75, 0x3, 0x2, 0x2, 0x2, 0x2, 0x77, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x79, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7b, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x7d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x81, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x83, 0x3, 0x2, 0x2, 0x2, 0x2, 0x85, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x87, 0x3, 0x2, 0x2, 0x2, 0x2, 0x89, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x8b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x8d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x8f, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x91, 0x3, 0x2, 0x2, 0x2, 0x2, 0x93, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x95, 0x3, 0x2, 0x2, 0x2, 0x2, 0x97, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x99, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9d, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x9f, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa1, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xa3, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa5, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xa7, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xab, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xad, 0x3, 0x2, 0x2, 0x2, 0x2, 0xaf, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xb1, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb3, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xb5, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb7, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb9, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xbb, 0x3, 0x2, 0x2, 0x2, 0x2, 0xbd, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xbf, 0x3, 0x2, 0x2, 0x2, 0x2, 0xc1, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xc3, 0x3, 0x2, 0x2, 0x2, 0x2, 0xc5, 0x3, 0x2, 0x2, 0x2, 0x2, 0xc7, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xc9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xcb, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xcd, 0x3, 0x2, 0x2, 0x2, 0x2, 0xcf, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xd1, 0x3, 0x2, 0x2, 0x2, 0x2, 0xd3, 0x3, 0x2, 0x2, 0x2, 0x2, 0xd5, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xd7, 0x3, 0x2, 0x2, 0x2, 0x2, 0xd9, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xdb, 0x3, 0x2, 0x2, 0x2, 0x2, 0xdd, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xdf, 0x3, 0x2, 0x2, 0x2, 0x2, 0xe1, 0x3, 0x2, 0x2, 0x2, 0x2, 0xe3, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xe5, 0x3, 0x2, 0x2, 0x2, 0x2, 0xe7, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xe9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xeb, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xed, 0x3, 0x2, 0x2, 0x2, 0x2, 0xef, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf1, - 0x3, 0x2, 0x2, 0x2, 0x2, 0xf3, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf5, 0x3, 0x2, - 0x2, 0x2, 0x2, 0xf7, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf9, 0x3, 0x2, 0x2, 0x2, - 0x2, 0xfb, 0x3, 0x2, 0x2, 0x2, 0x2, 0xfd, 0x3, 0x2, 0x2, 0x2, 0x2, 0xff, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x101, 0x3, 0x2, 0x2, 0x2, 0x2, 0x103, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x105, 0x3, 0x2, 0x2, 0x2, 0x2, 0x107, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x109, 0x3, 0x2, 0x2, 0x2, 0x2, 0x10b, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x10d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x10f, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x111, 0x3, 0x2, 0x2, 0x2, 0x2, 0x113, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x115, 0x3, 0x2, 0x2, 0x2, 0x2, 0x117, 0x3, 0x2, 0x2, 0x2, 0x2, 0x119, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x11b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x11d, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x11f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x121, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x123, 0x3, 0x2, 0x2, 0x2, 0x2, 0x125, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x127, 0x3, 0x2, 0x2, 0x2, 0x2, 0x129, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x12b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x12d, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x12f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x131, 0x3, 0x2, 0x2, 0x2, 0x2, 0x133, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x135, 0x3, 0x2, 0x2, 0x2, 0x2, 0x137, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x139, 0x3, 0x2, 0x2, 0x2, 0x2, 0x13b, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x13d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x13f, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x141, 0x3, 0x2, 0x2, 0x2, 0x2, 0x143, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x145, 0x3, 0x2, 0x2, 0x2, 0x2, 0x147, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x149, 0x3, 0x2, 0x2, 0x2, 0x2, 0x14b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x14d, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x14f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x151, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x153, 0x3, 0x2, 0x2, 0x2, 0x2, 0x155, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x157, 0x3, 0x2, 0x2, 0x2, 0x2, 0x159, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x15b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x15d, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x15f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x161, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x163, 0x3, 0x2, 0x2, 0x2, 0x2, 0x165, 0x3, 0x2, 0x2, 0x2, 0x2, 0x167, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x169, 0x3, 0x2, 0x2, 0x2, 0x2, 0x16b, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x16d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x16f, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x171, 0x3, 0x2, 0x2, 0x2, 0x2, 0x173, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x175, 0x3, 0x2, 0x2, 0x2, 0x2, 0x177, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x179, 0x3, 0x2, 0x2, 0x2, 0x2, 0x17b, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x1b9, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1bb, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1bd, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x1bf, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1c1, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x1c3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1c5, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x1c7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1c9, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x1cb, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1cd, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x1cf, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d1, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x1d3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d5, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d7, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d9, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1db, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x1dd, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1df, 0x3, 0x2, - 0x2, 0x2, 0x2, 0x1e1, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1e3, 0x3, 0x2, 0x2, - 0x2, 0x2, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1e7, 0x3, 0x2, 0x2, 0x2, - 0x2, 0x1e9, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1eb, 0x3, 0x2, 0x2, 0x2, 0x2, - 0x1ed, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1ef, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f1, - 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f5, 0x3, - 0x2, 0x2, 0x2, 0x2, 0x1f7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1f9, 0x3, 0x2, - 0x2, 0x2, 0x3, 0x1fb, 0x3, 0x2, 0x2, 0x2, 0x5, 0x1ff, 0x3, 0x2, 0x2, - 0x2, 0x7, 0x205, 0x3, 0x2, 0x2, 0x2, 0x9, 0x20b, 0x3, 0x2, 0x2, 0x2, - 0xb, 0x20f, 0x3, 0x2, 0x2, 0x2, 0xd, 0x215, 0x3, 0x2, 0x2, 0x2, 0xf, - 0x219, 0x3, 0x2, 0x2, 0x2, 0x11, 0x21e, 0x3, 0x2, 0x2, 0x2, 0x13, 0x222, - 0x3, 0x2, 0x2, 0x2, 0x15, 0x228, 0x3, 0x2, 0x2, 0x2, 0x17, 0x239, 0x3, - 0x2, 0x2, 0x2, 0x19, 0x23b, 0x3, 0x2, 0x2, 0x2, 0x1b, 0x240, 0x3, 0x2, - 0x2, 0x2, 0x1d, 0x244, 0x3, 0x2, 0x2, 0x2, 0x1f, 0x24a, 0x3, 0x2, 0x2, - 0x2, 0x21, 0x251, 0x3, 0x2, 0x2, 0x2, 0x23, 0x259, 0x3, 0x2, 0x2, 0x2, - 0x25, 0x25e, 0x3, 0x2, 0x2, 0x2, 0x27, 0x261, 0x3, 0x2, 0x2, 0x2, 0x29, - 0x266, 0x3, 0x2, 0x2, 0x2, 0x2b, 0x26b, 0x3, 0x2, 0x2, 0x2, 0x2d, 0x271, - 0x3, 0x2, 0x2, 0x2, 0x2f, 0x277, 0x3, 0x2, 0x2, 0x2, 0x31, 0x27f, 0x3, - 0x2, 0x2, 0x2, 0x33, 0x285, 0x3, 0x2, 0x2, 0x2, 0x35, 0x28d, 0x3, 0x2, - 0x2, 0x2, 0x37, 0x294, 0x3, 0x2, 0x2, 0x2, 0x39, 0x29c, 0x3, 0x2, 0x2, - 0x2, 0x3b, 0x2a7, 0x3, 0x2, 0x2, 0x2, 0x3d, 0x2ae, 0x3, 0x2, 0x2, 0x2, - 0x3f, 0x2b4, 0x3, 0x2, 0x2, 0x2, 0x41, 0x2b9, 0x3, 0x2, 0x2, 0x2, 0x43, - 0x2c2, 0x3, 0x2, 0x2, 0x2, 0x45, 0x2cc, 0x3, 0x2, 0x2, 0x2, 0x47, 0x2d1, - 0x3, 0x2, 0x2, 0x2, 0x49, 0x2d5, 0x3, 0x2, 0x2, 0x2, 0x4b, 0x2e1, 0x3, - 0x2, 0x2, 0x2, 0x4d, 0x2e9, 0x3, 0x2, 0x2, 0x2, 0x4f, 0x2ef, 0x3, 0x2, - 0x2, 0x2, 0x51, 0x2f6, 0x3, 0x2, 0x2, 0x2, 0x53, 0x2fb, 0x3, 0x2, 0x2, - 0x2, 0x55, 0x306, 0x3, 0x2, 0x2, 0x2, 0x57, 0x30f, 0x3, 0x2, 0x2, 0x2, - 0x59, 0x316, 0x3, 0x2, 0x2, 0x2, 0x5b, 0x323, 0x3, 0x2, 0x2, 0x2, 0x5d, - 0x32e, 0x3, 0x2, 0x2, 0x2, 0x5f, 0x333, 0x3, 0x2, 0x2, 0x2, 0x61, 0x33c, - 0x3, 0x2, 0x2, 0x2, 0x63, 0x348, 0x3, 0x2, 0x2, 0x2, 0x65, 0x34d, 0x3, - 0x2, 0x2, 0x2, 0x67, 0x352, 0x3, 0x2, 0x2, 0x2, 0x69, 0x356, 0x3, 0x2, - 0x2, 0x2, 0x6b, 0x35d, 0x3, 0x2, 0x2, 0x2, 0x6d, 0x364, 0x3, 0x2, 0x2, - 0x2, 0x6f, 0x36b, 0x3, 0x2, 0x2, 0x2, 0x71, 0x373, 0x3, 0x2, 0x2, 0x2, - 0x73, 0x37e, 0x3, 0x2, 0x2, 0x2, 0x75, 0x386, 0x3, 0x2, 0x2, 0x2, 0x77, - 0x38e, 0x3, 0x2, 0x2, 0x2, 0x79, 0x394, 0x3, 0x2, 0x2, 0x2, 0x7b, 0x39a, - 0x3, 0x2, 0x2, 0x2, 0x7d, 0x3a0, 0x3, 0x2, 0x2, 0x2, 0x7f, 0x3a4, 0x3, - 0x2, 0x2, 0x2, 0x81, 0x3ab, 0x3, 0x2, 0x2, 0x2, 0x83, 0x3b2, 0x3, 0x2, - 0x2, 0x2, 0x85, 0x3b7, 0x3, 0x2, 0x2, 0x2, 0x87, 0x3bc, 0x3, 0x2, 0x2, - 0x2, 0x89, 0x3c5, 0x3, 0x2, 0x2, 0x2, 0x8b, 0x3cc, 0x3, 0x2, 0x2, 0x2, - 0x8d, 0x3d8, 0x3, 0x2, 0x2, 0x2, 0x8f, 0x3de, 0x3, 0x2, 0x2, 0x2, 0x91, - 0x3e5, 0x3, 0x2, 0x2, 0x2, 0x93, 0x3f2, 0x3, 0x2, 0x2, 0x2, 0x95, 0x3f7, - 0x3, 0x2, 0x2, 0x2, 0x97, 0x3fa, 0x3, 0x2, 0x2, 0x2, 0x99, 0x3fd, 0x3, - 0x2, 0x2, 0x2, 0x9b, 0x403, 0x3, 0x2, 0x2, 0x2, 0x9d, 0x406, 0x3, 0x2, - 0x2, 0x2, 0x9f, 0x419, 0x3, 0x2, 0x2, 0x2, 0xa1, 0x41b, 0x3, 0x2, 0x2, - 0x2, 0xa3, 0x425, 0x3, 0x2, 0x2, 0x2, 0xa5, 0x42b, 0x3, 0x2, 0x2, 0x2, - 0xa7, 0x432, 0x3, 0x2, 0x2, 0x2, 0xa9, 0x43b, 0x3, 0x2, 0x2, 0x2, 0xab, - 0x440, 0x3, 0x2, 0x2, 0x2, 0xad, 0x443, 0x3, 0x2, 0x2, 0x2, 0xaf, 0x450, - 0x3, 0x2, 0x2, 0x2, 0xb1, 0x455, 0x3, 0x2, 0x2, 0x2, 0xb3, 0x459, 0x3, - 0x2, 0x2, 0x2, 0xb5, 0x45e, 0x3, 0x2, 0x2, 0x2, 0xb7, 0x463, 0x3, 0x2, - 0x2, 0x2, 0xb9, 0x46a, 0x3, 0x2, 0x2, 0x2, 0xbb, 0x472, 0x3, 0x2, 0x2, - 0x2, 0xbd, 0x477, 0x3, 0x2, 0x2, 0x2, 0xbf, 0x480, 0x3, 0x2, 0x2, 0x2, - 0xc1, 0x485, 0x3, 0x2, 0x2, 0x2, 0xc3, 0x48b, 0x3, 0x2, 0x2, 0x2, 0xc5, - 0x490, 0x3, 0x2, 0x2, 0x2, 0xc7, 0x496, 0x3, 0x2, 0x2, 0x2, 0xc9, 0x49b, - 0x3, 0x2, 0x2, 0x2, 0xcb, 0x4a7, 0x3, 0x2, 0x2, 0x2, 0xcd, 0x4b4, 0x3, - 0x2, 0x2, 0x2, 0xcf, 0x4b8, 0x3, 0x2, 0x2, 0x2, 0xd1, 0x4bf, 0x3, 0x2, - 0x2, 0x2, 0xd3, 0x4c3, 0x3, 0x2, 0x2, 0x2, 0xd5, 0x4ca, 0x3, 0x2, 0x2, - 0x2, 0xd7, 0x4d1, 0x3, 0x2, 0x2, 0x2, 0xd9, 0x4d7, 0x3, 0x2, 0x2, 0x2, - 0xdb, 0x4dc, 0x3, 0x2, 0x2, 0x2, 0xdd, 0x4e5, 0x3, 0x2, 0x2, 0x2, 0xdf, - 0x4e9, 0x3, 0x2, 0x2, 0x2, 0xe1, 0x4ec, 0x3, 0x2, 0x2, 0x2, 0xe3, 0x4f0, - 0x3, 0x2, 0x2, 0x2, 0xe5, 0x4f5, 0x3, 0x2, 0x2, 0x2, 0xe7, 0x4fb, 0x3, - 0x2, 0x2, 0x2, 0xe9, 0x502, 0x3, 0x2, 0x2, 0x2, 0xeb, 0x505, 0x3, 0x2, - 0x2, 0x2, 0xed, 0x50e, 0x3, 0x2, 0x2, 0x2, 0xef, 0x511, 0x3, 0x2, 0x2, - 0x2, 0xf1, 0x517, 0x3, 0x2, 0x2, 0x2, 0xf3, 0x51d, 0x3, 0x2, 0x2, 0x2, - 0xf5, 0x525, 0x3, 0x2, 0x2, 0x2, 0xf7, 0x52f, 0x3, 0x2, 0x2, 0x2, 0xf9, - 0x538, 0x3, 0x2, 0x2, 0x2, 0xfb, 0x541, 0x3, 0x2, 0x2, 0x2, 0xfd, 0x549, - 0x3, 0x2, 0x2, 0x2, 0xff, 0x554, 0x3, 0x2, 0x2, 0x2, 0x101, 0x55c, 0x3, - 0x2, 0x2, 0x2, 0x103, 0x562, 0x3, 0x2, 0x2, 0x2, 0x105, 0x569, 0x3, - 0x2, 0x2, 0x2, 0x107, 0x570, 0x3, 0x2, 0x2, 0x2, 0x109, 0x577, 0x3, - 0x2, 0x2, 0x2, 0x10b, 0x57f, 0x3, 0x2, 0x2, 0x2, 0x10d, 0x587, 0x3, - 0x2, 0x2, 0x2, 0x10f, 0x592, 0x3, 0x2, 0x2, 0x2, 0x111, 0x598, 0x3, - 0x2, 0x2, 0x2, 0x113, 0x59f, 0x3, 0x2, 0x2, 0x2, 0x115, 0x5a6, 0x3, - 0x2, 0x2, 0x2, 0x117, 0x5ad, 0x3, 0x2, 0x2, 0x2, 0x119, 0x5b4, 0x3, - 0x2, 0x2, 0x2, 0x11b, 0x5b9, 0x3, 0x2, 0x2, 0x2, 0x11d, 0x5bf, 0x3, - 0x2, 0x2, 0x2, 0x11f, 0x5c3, 0x3, 0x2, 0x2, 0x2, 0x121, 0x5cc, 0x3, - 0x2, 0x2, 0x2, 0x123, 0x5d1, 0x3, 0x2, 0x2, 0x2, 0x125, 0x5d8, 0x3, - 0x2, 0x2, 0x2, 0x127, 0x5de, 0x3, 0x2, 0x2, 0x2, 0x129, 0x5e3, 0x3, - 0x2, 0x2, 0x2, 0x12b, 0x5ed, 0x3, 0x2, 0x2, 0x2, 0x12d, 0x5f2, 0x3, - 0x2, 0x2, 0x2, 0x12f, 0x5f9, 0x3, 0x2, 0x2, 0x2, 0x131, 0x600, 0x3, - 0x2, 0x2, 0x2, 0x133, 0x606, 0x3, 0x2, 0x2, 0x2, 0x135, 0x60d, 0x3, - 0x2, 0x2, 0x2, 0x137, 0x617, 0x3, 0x2, 0x2, 0x2, 0x139, 0x61c, 0x3, - 0x2, 0x2, 0x2, 0x13b, 0x621, 0x3, 0x2, 0x2, 0x2, 0x13d, 0x626, 0x3, - 0x2, 0x2, 0x2, 0x13f, 0x62e, 0x3, 0x2, 0x2, 0x2, 0x141, 0x638, 0x3, - 0x2, 0x2, 0x2, 0x143, 0x63b, 0x3, 0x2, 0x2, 0x2, 0x145, 0x63f, 0x3, - 0x2, 0x2, 0x2, 0x147, 0x646, 0x3, 0x2, 0x2, 0x2, 0x149, 0x64f, 0x3, - 0x2, 0x2, 0x2, 0x14b, 0x654, 0x3, 0x2, 0x2, 0x2, 0x14d, 0x65d, 0x3, - 0x2, 0x2, 0x2, 0x14f, 0x661, 0x3, 0x2, 0x2, 0x2, 0x151, 0x666, 0x3, - 0x2, 0x2, 0x2, 0x153, 0x66c, 0x3, 0x2, 0x2, 0x2, 0x155, 0x673, 0x3, - 0x2, 0x2, 0x2, 0x157, 0x677, 0x3, 0x2, 0x2, 0x2, 0x159, 0x67d, 0x3, - 0x2, 0x2, 0x2, 0x15b, 0x682, 0x3, 0x2, 0x2, 0x2, 0x15d, 0x689, 0x3, - 0x2, 0x2, 0x2, 0x15f, 0x68e, 0x3, 0x2, 0x2, 0x2, 0x161, 0x695, 0x3, - 0x2, 0x2, 0x2, 0x163, 0x69b, 0x3, 0x2, 0x2, 0x2, 0x165, 0x6a0, 0x3, - 0x2, 0x2, 0x2, 0x167, 0x6a5, 0x3, 0x2, 0x2, 0x2, 0x169, 0x6ab, 0x3, - 0x2, 0x2, 0x2, 0x16b, 0x6ba, 0x3, 0x2, 0x2, 0x2, 0x16d, 0x6bc, 0x3, - 0x2, 0x2, 0x2, 0x16f, 0x6c2, 0x3, 0x2, 0x2, 0x2, 0x171, 0x6f1, 0x3, - 0x2, 0x2, 0x2, 0x173, 0x73f, 0x3, 0x2, 0x2, 0x2, 0x175, 0x741, 0x3, - 0x2, 0x2, 0x2, 0x177, 0x748, 0x3, 0x2, 0x2, 0x2, 0x179, 0x74c, 0x3, - 0x2, 0x2, 0x2, 0x17b, 0x753, 0x3, 0x2, 0x2, 0x2, 0x17d, 0x762, 0x3, - 0x2, 0x2, 0x2, 0x17f, 0x764, 0x3, 0x2, 0x2, 0x2, 0x181, 0x766, 0x3, - 0x2, 0x2, 0x2, 0x183, 0x768, 0x3, 0x2, 0x2, 0x2, 0x185, 0x76a, 0x3, - 0x2, 0x2, 0x2, 0x187, 0x76c, 0x3, 0x2, 0x2, 0x2, 0x189, 0x76e, 0x3, - 0x2, 0x2, 0x2, 0x18b, 0x770, 0x3, 0x2, 0x2, 0x2, 0x18d, 0x772, 0x3, - 0x2, 0x2, 0x2, 0x18f, 0x774, 0x3, 0x2, 0x2, 0x2, 0x191, 0x776, 0x3, - 0x2, 0x2, 0x2, 0x193, 0x778, 0x3, 0x2, 0x2, 0x2, 0x195, 0x77a, 0x3, - 0x2, 0x2, 0x2, 0x197, 0x77c, 0x3, 0x2, 0x2, 0x2, 0x199, 0x77e, 0x3, - 0x2, 0x2, 0x2, 0x19b, 0x780, 0x3, 0x2, 0x2, 0x2, 0x19d, 0x782, 0x3, - 0x2, 0x2, 0x2, 0x19f, 0x784, 0x3, 0x2, 0x2, 0x2, 0x1a1, 0x786, 0x3, - 0x2, 0x2, 0x2, 0x1a3, 0x788, 0x3, 0x2, 0x2, 0x2, 0x1a5, 0x78a, 0x3, - 0x2, 0x2, 0x2, 0x1a7, 0x78c, 0x3, 0x2, 0x2, 0x2, 0x1a9, 0x78e, 0x3, - 0x2, 0x2, 0x2, 0x1ab, 0x790, 0x3, 0x2, 0x2, 0x2, 0x1ad, 0x792, 0x3, - 0x2, 0x2, 0x2, 0x1af, 0x794, 0x3, 0x2, 0x2, 0x2, 0x1b1, 0x796, 0x3, - 0x2, 0x2, 0x2, 0x1b3, 0x798, 0x3, 0x2, 0x2, 0x2, 0x1b5, 0x79a, 0x3, - 0x2, 0x2, 0x2, 0x1b7, 0x79c, 0x3, 0x2, 0x2, 0x2, 0x1b9, 0x79e, 0x3, - 0x2, 0x2, 0x2, 0x1bb, 0x7a1, 0x3, 0x2, 0x2, 0x2, 0x1bd, 0x7a3, 0x3, - 0x2, 0x2, 0x2, 0x1bf, 0x7a5, 0x3, 0x2, 0x2, 0x2, 0x1c1, 0x7a7, 0x3, - 0x2, 0x2, 0x2, 0x1c3, 0x7a9, 0x3, 0x2, 0x2, 0x2, 0x1c5, 0x7ab, 0x3, - 0x2, 0x2, 0x2, 0x1c7, 0x7ae, 0x3, 0x2, 0x2, 0x2, 0x1c9, 0x7b0, 0x3, - 0x2, 0x2, 0x2, 0x1cb, 0x7b2, 0x3, 0x2, 0x2, 0x2, 0x1cd, 0x7b5, 0x3, - 0x2, 0x2, 0x2, 0x1cf, 0x7b7, 0x3, 0x2, 0x2, 0x2, 0x1d1, 0x7ba, 0x3, - 0x2, 0x2, 0x2, 0x1d3, 0x7bc, 0x3, 0x2, 0x2, 0x2, 0x1d5, 0x7be, 0x3, - 0x2, 0x2, 0x2, 0x1d7, 0x7c0, 0x3, 0x2, 0x2, 0x2, 0x1d9, 0x7c3, 0x3, - 0x2, 0x2, 0x2, 0x1db, 0x7c5, 0x3, 0x2, 0x2, 0x2, 0x1dd, 0x7cb, 0x3, - 0x2, 0x2, 0x2, 0x1df, 0x7cd, 0x3, 0x2, 0x2, 0x2, 0x1e1, 0x7cf, 0x3, - 0x2, 0x2, 0x2, 0x1e3, 0x7d1, 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x7d3, 0x3, - 0x2, 0x2, 0x2, 0x1e7, 0x7d5, 0x3, 0x2, 0x2, 0x2, 0x1e9, 0x7d7, 0x3, - 0x2, 0x2, 0x2, 0x1eb, 0x7d9, 0x3, 0x2, 0x2, 0x2, 0x1ed, 0x7db, 0x3, - 0x2, 0x2, 0x2, 0x1ef, 0x7dd, 0x3, 0x2, 0x2, 0x2, 0x1f1, 0x7df, 0x3, - 0x2, 0x2, 0x2, 0x1f3, 0x7e1, 0x3, 0x2, 0x2, 0x2, 0x1f5, 0x7e3, 0x3, - 0x2, 0x2, 0x2, 0x1f7, 0x7f1, 0x3, 0x2, 0x2, 0x2, 0x1f9, 0x7ff, 0x3, - 0x2, 0x2, 0x2, 0x1fb, 0x1fc, 0x5, 0x17d, 0xbf, 0x2, 0x1fc, 0x1fd, 0x5, - 0x183, 0xc2, 0x2, 0x1fd, 0x1fe, 0x5, 0x183, 0xc2, 0x2, 0x1fe, 0x4, 0x3, - 0x2, 0x2, 0x2, 0x1ff, 0x200, 0x5, 0x17d, 0xbf, 0x2, 0x200, 0x201, 0x5, - 0x187, 0xc4, 0x2, 0x201, 0x202, 0x5, 0x1a3, 0xd2, 0x2, 0x202, 0x203, - 0x5, 0x185, 0xc3, 0x2, 0x203, 0x204, 0x5, 0x19f, 0xd0, 0x2, 0x204, 0x6, - 0x3, 0x2, 0x2, 0x2, 0x205, 0x206, 0x5, 0x17d, 0xbf, 0x2, 0x206, 0x207, - 0x5, 0x193, 0xca, 0x2, 0x207, 0x208, 0x5, 0x18d, 0xc7, 0x2, 0x208, 0x209, - 0x5, 0x17d, 0xbf, 0x2, 0x209, 0x20a, 0x5, 0x1a1, 0xd1, 0x2, 0x20a, 0x8, - 0x3, 0x2, 0x2, 0x2, 0x20b, 0x20c, 0x5, 0x17d, 0xbf, 0x2, 0x20c, 0x20d, - 0x5, 0x193, 0xca, 0x2, 0x20d, 0x20e, 0x5, 0x193, 0xca, 0x2, 0x20e, 0xa, - 0x3, 0x2, 0x2, 0x2, 0x20f, 0x210, 0x5, 0x17d, 0xbf, 0x2, 0x210, 0x211, - 0x5, 0x193, 0xca, 0x2, 0x211, 0x212, 0x5, 0x1a3, 0xd2, 0x2, 0x212, 0x213, - 0x5, 0x185, 0xc3, 0x2, 0x213, 0x214, 0x5, 0x19f, 0xd0, 0x2, 0x214, 0xc, - 0x3, 0x2, 0x2, 0x2, 0x215, 0x216, 0x5, 0x17d, 0xbf, 0x2, 0x216, 0x217, - 0x5, 0x197, 0xcc, 0x2, 0x217, 0x218, 0x5, 0x183, 0xc2, 0x2, 0x218, 0xe, - 0x3, 0x2, 0x2, 0x2, 0x219, 0x21a, 0x5, 0x17d, 0xbf, 0x2, 0x21a, 0x21b, - 0x5, 0x197, 0xcc, 0x2, 0x21b, 0x21c, 0x5, 0x1a3, 0xd2, 0x2, 0x21c, 0x21d, - 0x5, 0x18d, 0xc7, 0x2, 0x21d, 0x10, 0x3, 0x2, 0x2, 0x2, 0x21e, 0x21f, - 0x5, 0x17d, 0xbf, 0x2, 0x21f, 0x220, 0x5, 0x197, 0xcc, 0x2, 0x220, 0x221, - 0x5, 0x1ad, 0xd7, 0x2, 0x221, 0x12, 0x3, 0x2, 0x2, 0x2, 0x222, 0x223, - 0x5, 0x17d, 0xbf, 0x2, 0x223, 0x224, 0x5, 0x19f, 0xd0, 0x2, 0x224, 0x225, - 0x5, 0x19f, 0xd0, 0x2, 0x225, 0x226, 0x5, 0x17d, 0xbf, 0x2, 0x226, 0x227, - 0x5, 0x1ad, 0xd7, 0x2, 0x227, 0x14, 0x3, 0x2, 0x2, 0x2, 0x228, 0x229, - 0x5, 0x17d, 0xbf, 0x2, 0x229, 0x22a, 0x5, 0x1a1, 0xd1, 0x2, 0x22a, 0x16, - 0x3, 0x2, 0x2, 0x2, 0x22b, 0x22c, 0x5, 0x17d, 0xbf, 0x2, 0x22c, 0x22d, - 0x5, 0x1a1, 0xd1, 0x2, 0x22d, 0x22e, 0x5, 0x181, 0xc1, 0x2, 0x22e, 0x23a, - 0x3, 0x2, 0x2, 0x2, 0x22f, 0x230, 0x5, 0x17d, 0xbf, 0x2, 0x230, 0x231, - 0x5, 0x1a1, 0xd1, 0x2, 0x231, 0x232, 0x5, 0x181, 0xc1, 0x2, 0x232, 0x233, - 0x5, 0x185, 0xc3, 0x2, 0x233, 0x234, 0x5, 0x197, 0xcc, 0x2, 0x234, 0x235, - 0x5, 0x183, 0xc2, 0x2, 0x235, 0x236, 0x5, 0x18d, 0xc7, 0x2, 0x236, 0x237, - 0x5, 0x197, 0xcc, 0x2, 0x237, 0x238, 0x5, 0x189, 0xc5, 0x2, 0x238, 0x23a, - 0x3, 0x2, 0x2, 0x2, 0x239, 0x22b, 0x3, 0x2, 0x2, 0x2, 0x239, 0x22f, - 0x3, 0x2, 0x2, 0x2, 0x23a, 0x18, 0x3, 0x2, 0x2, 0x2, 0x23b, 0x23c, 0x5, - 0x17d, 0xbf, 0x2, 0x23c, 0x23d, 0x5, 0x1a1, 0xd1, 0x2, 0x23d, 0x23e, - 0x5, 0x199, 0xcd, 0x2, 0x23e, 0x23f, 0x5, 0x187, 0xc4, 0x2, 0x23f, 0x1a, - 0x3, 0x2, 0x2, 0x2, 0x240, 0x241, 0x5, 0x17d, 0xbf, 0x2, 0x241, 0x242, - 0x5, 0x1a1, 0xd1, 0x2, 0x242, 0x243, 0x5, 0x1a3, 0xd2, 0x2, 0x243, 0x1c, - 0x3, 0x2, 0x2, 0x2, 0x244, 0x245, 0x5, 0x17d, 0xbf, 0x2, 0x245, 0x246, - 0x5, 0x1a1, 0xd1, 0x2, 0x246, 0x247, 0x5, 0x1ad, 0xd7, 0x2, 0x247, 0x248, - 0x5, 0x197, 0xcc, 0x2, 0x248, 0x249, 0x5, 0x181, 0xc1, 0x2, 0x249, 0x1e, - 0x3, 0x2, 0x2, 0x2, 0x24a, 0x24b, 0x5, 0x17d, 0xbf, 0x2, 0x24b, 0x24c, - 0x5, 0x1a3, 0xd2, 0x2, 0x24c, 0x24d, 0x5, 0x1a3, 0xd2, 0x2, 0x24d, 0x24e, - 0x5, 0x17d, 0xbf, 0x2, 0x24e, 0x24f, 0x5, 0x181, 0xc1, 0x2, 0x24f, 0x250, - 0x5, 0x18b, 0xc6, 0x2, 0x250, 0x20, 0x3, 0x2, 0x2, 0x2, 0x251, 0x252, - 0x5, 0x17f, 0xc0, 0x2, 0x252, 0x253, 0x5, 0x185, 0xc3, 0x2, 0x253, 0x254, - 0x5, 0x1a3, 0xd2, 0x2, 0x254, 0x255, 0x5, 0x1a9, 0xd5, 0x2, 0x255, 0x256, - 0x5, 0x185, 0xc3, 0x2, 0x256, 0x257, 0x5, 0x185, 0xc3, 0x2, 0x257, 0x258, - 0x5, 0x197, 0xcc, 0x2, 0x258, 0x22, 0x3, 0x2, 0x2, 0x2, 0x259, 0x25a, - 0x5, 0x17f, 0xc0, 0x2, 0x25a, 0x25b, 0x5, 0x199, 0xcd, 0x2, 0x25b, 0x25c, - 0x5, 0x1a3, 0xd2, 0x2, 0x25c, 0x25d, 0x5, 0x18b, 0xc6, 0x2, 0x25d, 0x24, - 0x3, 0x2, 0x2, 0x2, 0x25e, 0x25f, 0x5, 0x17f, 0xc0, 0x2, 0x25f, 0x260, - 0x5, 0x1ad, 0xd7, 0x2, 0x260, 0x26, 0x3, 0x2, 0x2, 0x2, 0x261, 0x262, - 0x5, 0x181, 0xc1, 0x2, 0x262, 0x263, 0x5, 0x17d, 0xbf, 0x2, 0x263, 0x264, - 0x5, 0x1a1, 0xd1, 0x2, 0x264, 0x265, 0x5, 0x185, 0xc3, 0x2, 0x265, 0x28, - 0x3, 0x2, 0x2, 0x2, 0x266, 0x267, 0x5, 0x181, 0xc1, 0x2, 0x267, 0x268, - 0x5, 0x17d, 0xbf, 0x2, 0x268, 0x269, 0x5, 0x1a1, 0xd1, 0x2, 0x269, 0x26a, - 0x5, 0x1a3, 0xd2, 0x2, 0x26a, 0x2a, 0x3, 0x2, 0x2, 0x2, 0x26b, 0x26c, - 0x5, 0x181, 0xc1, 0x2, 0x26c, 0x26d, 0x5, 0x18b, 0xc6, 0x2, 0x26d, 0x26e, - 0x5, 0x185, 0xc3, 0x2, 0x26e, 0x26f, 0x5, 0x181, 0xc1, 0x2, 0x26f, 0x270, - 0x5, 0x191, 0xc9, 0x2, 0x270, 0x2c, 0x3, 0x2, 0x2, 0x2, 0x271, 0x272, - 0x5, 0x181, 0xc1, 0x2, 0x272, 0x273, 0x5, 0x193, 0xca, 0x2, 0x273, 0x274, - 0x5, 0x185, 0xc3, 0x2, 0x274, 0x275, 0x5, 0x17d, 0xbf, 0x2, 0x275, 0x276, - 0x5, 0x19f, 0xd0, 0x2, 0x276, 0x2e, 0x3, 0x2, 0x2, 0x2, 0x277, 0x278, - 0x5, 0x181, 0xc1, 0x2, 0x278, 0x279, 0x5, 0x193, 0xca, 0x2, 0x279, 0x27a, - 0x5, 0x1a5, 0xd3, 0x2, 0x27a, 0x27b, 0x5, 0x1a1, 0xd1, 0x2, 0x27b, 0x27c, - 0x5, 0x1a3, 0xd2, 0x2, 0x27c, 0x27d, 0x5, 0x185, 0xc3, 0x2, 0x27d, 0x27e, - 0x5, 0x19f, 0xd0, 0x2, 0x27e, 0x30, 0x3, 0x2, 0x2, 0x2, 0x27f, 0x280, - 0x5, 0x181, 0xc1, 0x2, 0x280, 0x281, 0x5, 0x199, 0xcd, 0x2, 0x281, 0x282, - 0x5, 0x183, 0xc2, 0x2, 0x282, 0x283, 0x5, 0x185, 0xc3, 0x2, 0x283, 0x284, - 0x5, 0x181, 0xc1, 0x2, 0x284, 0x32, 0x3, 0x2, 0x2, 0x2, 0x285, 0x286, - 0x5, 0x181, 0xc1, 0x2, 0x286, 0x287, 0x5, 0x199, 0xcd, 0x2, 0x287, 0x288, - 0x5, 0x193, 0xca, 0x2, 0x288, 0x289, 0x5, 0x193, 0xca, 0x2, 0x289, 0x28a, - 0x5, 0x17d, 0xbf, 0x2, 0x28a, 0x28b, 0x5, 0x1a3, 0xd2, 0x2, 0x28b, 0x28c, - 0x5, 0x185, 0xc3, 0x2, 0x28c, 0x34, 0x3, 0x2, 0x2, 0x2, 0x28d, 0x28e, - 0x5, 0x181, 0xc1, 0x2, 0x28e, 0x28f, 0x5, 0x199, 0xcd, 0x2, 0x28f, 0x290, - 0x5, 0x193, 0xca, 0x2, 0x290, 0x291, 0x5, 0x1a5, 0xd3, 0x2, 0x291, 0x292, - 0x5, 0x195, 0xcb, 0x2, 0x292, 0x293, 0x5, 0x197, 0xcc, 0x2, 0x293, 0x36, - 0x3, 0x2, 0x2, 0x2, 0x294, 0x295, 0x5, 0x181, 0xc1, 0x2, 0x295, 0x296, - 0x5, 0x199, 0xcd, 0x2, 0x296, 0x297, 0x5, 0x195, 0xcb, 0x2, 0x297, 0x298, - 0x5, 0x195, 0xcb, 0x2, 0x298, 0x299, 0x5, 0x185, 0xc3, 0x2, 0x299, 0x29a, - 0x5, 0x197, 0xcc, 0x2, 0x29a, 0x29b, 0x5, 0x1a3, 0xd2, 0x2, 0x29b, 0x38, - 0x3, 0x2, 0x2, 0x2, 0x29c, 0x29d, 0x5, 0x181, 0xc1, 0x2, 0x29d, 0x29e, - 0x5, 0x199, 0xcd, 0x2, 0x29e, 0x29f, 0x5, 0x197, 0xcc, 0x2, 0x29f, 0x2a0, - 0x5, 0x1a1, 0xd1, 0x2, 0x2a0, 0x2a1, 0x5, 0x1a3, 0xd2, 0x2, 0x2a1, 0x2a2, - 0x5, 0x19f, 0xd0, 0x2, 0x2a2, 0x2a3, 0x5, 0x17d, 0xbf, 0x2, 0x2a3, 0x2a4, - 0x5, 0x18d, 0xc7, 0x2, 0x2a4, 0x2a5, 0x5, 0x197, 0xcc, 0x2, 0x2a5, 0x2a6, - 0x5, 0x1a3, 0xd2, 0x2, 0x2a6, 0x3a, 0x3, 0x2, 0x2, 0x2, 0x2a7, 0x2a8, - 0x5, 0x181, 0xc1, 0x2, 0x2a8, 0x2a9, 0x5, 0x19f, 0xd0, 0x2, 0x2a9, 0x2aa, - 0x5, 0x185, 0xc3, 0x2, 0x2aa, 0x2ab, 0x5, 0x17d, 0xbf, 0x2, 0x2ab, 0x2ac, - 0x5, 0x1a3, 0xd2, 0x2, 0x2ac, 0x2ad, 0x5, 0x185, 0xc3, 0x2, 0x2ad, 0x3c, - 0x3, 0x2, 0x2, 0x2, 0x2ae, 0x2af, 0x5, 0x181, 0xc1, 0x2, 0x2af, 0x2b0, - 0x5, 0x19f, 0xd0, 0x2, 0x2b0, 0x2b1, 0x5, 0x199, 0xcd, 0x2, 0x2b1, 0x2b2, - 0x5, 0x1a1, 0xd1, 0x2, 0x2b2, 0x2b3, 0x5, 0x1a1, 0xd1, 0x2, 0x2b3, 0x3e, - 0x3, 0x2, 0x2, 0x2, 0x2b4, 0x2b5, 0x5, 0x181, 0xc1, 0x2, 0x2b5, 0x2b6, - 0x5, 0x1a5, 0xd3, 0x2, 0x2b6, 0x2b7, 0x5, 0x17f, 0xc0, 0x2, 0x2b7, 0x2b8, - 0x5, 0x185, 0xc3, 0x2, 0x2b8, 0x40, 0x3, 0x2, 0x2, 0x2, 0x2b9, 0x2ba, - 0x5, 0x183, 0xc2, 0x2, 0x2ba, 0x2bb, 0x5, 0x17d, 0xbf, 0x2, 0x2bb, 0x2bc, - 0x5, 0x1a3, 0xd2, 0x2, 0x2bc, 0x2bd, 0x5, 0x17d, 0xbf, 0x2, 0x2bd, 0x2be, - 0x5, 0x17f, 0xc0, 0x2, 0x2be, 0x2bf, 0x5, 0x17d, 0xbf, 0x2, 0x2bf, 0x2c0, - 0x5, 0x1a1, 0xd1, 0x2, 0x2c0, 0x2c1, 0x5, 0x185, 0xc3, 0x2, 0x2c1, 0x42, - 0x3, 0x2, 0x2, 0x2, 0x2c2, 0x2c3, 0x5, 0x183, 0xc2, 0x2, 0x2c3, 0x2c4, - 0x5, 0x17d, 0xbf, 0x2, 0x2c4, 0x2c5, 0x5, 0x1a3, 0xd2, 0x2, 0x2c5, 0x2c6, - 0x5, 0x17d, 0xbf, 0x2, 0x2c6, 0x2c7, 0x5, 0x17f, 0xc0, 0x2, 0x2c7, 0x2c8, - 0x5, 0x17d, 0xbf, 0x2, 0x2c8, 0x2c9, 0x5, 0x1a1, 0xd1, 0x2, 0x2c9, 0x2ca, - 0x5, 0x185, 0xc3, 0x2, 0x2ca, 0x2cb, 0x5, 0x1a1, 0xd1, 0x2, 0x2cb, 0x44, - 0x3, 0x2, 0x2, 0x2, 0x2cc, 0x2cd, 0x5, 0x183, 0xc2, 0x2, 0x2cd, 0x2ce, - 0x5, 0x17d, 0xbf, 0x2, 0x2ce, 0x2cf, 0x5, 0x1a3, 0xd2, 0x2, 0x2cf, 0x2d0, - 0x5, 0x185, 0xc3, 0x2, 0x2d0, 0x46, 0x3, 0x2, 0x2, 0x2, 0x2d1, 0x2d2, - 0x5, 0x183, 0xc2, 0x2, 0x2d2, 0x2d3, 0x5, 0x17d, 0xbf, 0x2, 0x2d3, 0x2d4, - 0x5, 0x1ad, 0xd7, 0x2, 0x2d4, 0x48, 0x3, 0x2, 0x2, 0x2, 0x2d5, 0x2d6, - 0x5, 0x183, 0xc2, 0x2, 0x2d6, 0x2d7, 0x5, 0x185, 0xc3, 0x2, 0x2d7, 0x2d8, - 0x5, 0x183, 0xc2, 0x2, 0x2d8, 0x2d9, 0x5, 0x1a5, 0xd3, 0x2, 0x2d9, 0x2da, - 0x5, 0x19b, 0xce, 0x2, 0x2da, 0x2db, 0x5, 0x193, 0xca, 0x2, 0x2db, 0x2dc, - 0x5, 0x18d, 0xc7, 0x2, 0x2dc, 0x2dd, 0x5, 0x181, 0xc1, 0x2, 0x2dd, 0x2de, - 0x5, 0x17d, 0xbf, 0x2, 0x2de, 0x2df, 0x5, 0x1a3, 0xd2, 0x2, 0x2df, 0x2e0, - 0x5, 0x185, 0xc3, 0x2, 0x2e0, 0x4a, 0x3, 0x2, 0x2, 0x2, 0x2e1, 0x2e2, - 0x5, 0x183, 0xc2, 0x2, 0x2e2, 0x2e3, 0x5, 0x185, 0xc3, 0x2, 0x2e3, 0x2e4, - 0x5, 0x187, 0xc4, 0x2, 0x2e4, 0x2e5, 0x5, 0x17d, 0xbf, 0x2, 0x2e5, 0x2e6, - 0x5, 0x1a5, 0xd3, 0x2, 0x2e6, 0x2e7, 0x5, 0x193, 0xca, 0x2, 0x2e7, 0x2e8, - 0x5, 0x1a3, 0xd2, 0x2, 0x2e8, 0x4c, 0x3, 0x2, 0x2, 0x2, 0x2e9, 0x2ea, - 0x5, 0x183, 0xc2, 0x2, 0x2ea, 0x2eb, 0x5, 0x185, 0xc3, 0x2, 0x2eb, 0x2ec, - 0x5, 0x193, 0xca, 0x2, 0x2ec, 0x2ed, 0x5, 0x17d, 0xbf, 0x2, 0x2ed, 0x2ee, - 0x5, 0x1ad, 0xd7, 0x2, 0x2ee, 0x4e, 0x3, 0x2, 0x2, 0x2, 0x2ef, 0x2f0, - 0x5, 0x183, 0xc2, 0x2, 0x2f0, 0x2f1, 0x5, 0x185, 0xc3, 0x2, 0x2f1, 0x2f2, - 0x5, 0x193, 0xca, 0x2, 0x2f2, 0x2f3, 0x5, 0x185, 0xc3, 0x2, 0x2f3, 0x2f4, - 0x5, 0x1a3, 0xd2, 0x2, 0x2f4, 0x2f5, 0x5, 0x185, 0xc3, 0x2, 0x2f5, 0x50, - 0x3, 0x2, 0x2, 0x2, 0x2f6, 0x2f7, 0x5, 0x183, 0xc2, 0x2, 0x2f7, 0x2f8, - 0x5, 0x185, 0xc3, 0x2, 0x2f8, 0x2f9, 0x5, 0x1a1, 0xd1, 0x2, 0x2f9, 0x2fa, - 0x5, 0x181, 0xc1, 0x2, 0x2fa, 0x52, 0x3, 0x2, 0x2, 0x2, 0x2fb, 0x2fc, - 0x5, 0x183, 0xc2, 0x2, 0x2fc, 0x2fd, 0x5, 0x185, 0xc3, 0x2, 0x2fd, 0x2fe, - 0x5, 0x1a1, 0xd1, 0x2, 0x2fe, 0x2ff, 0x5, 0x181, 0xc1, 0x2, 0x2ff, 0x300, - 0x5, 0x185, 0xc3, 0x2, 0x300, 0x301, 0x5, 0x197, 0xcc, 0x2, 0x301, 0x302, - 0x5, 0x183, 0xc2, 0x2, 0x302, 0x303, 0x5, 0x18d, 0xc7, 0x2, 0x303, 0x304, - 0x5, 0x197, 0xcc, 0x2, 0x304, 0x305, 0x5, 0x189, 0xc5, 0x2, 0x305, 0x54, - 0x3, 0x2, 0x2, 0x2, 0x306, 0x307, 0x5, 0x183, 0xc2, 0x2, 0x307, 0x308, - 0x5, 0x185, 0xc3, 0x2, 0x308, 0x309, 0x5, 0x1a1, 0xd1, 0x2, 0x309, 0x30a, - 0x5, 0x181, 0xc1, 0x2, 0x30a, 0x30b, 0x5, 0x19f, 0xd0, 0x2, 0x30b, 0x30c, - 0x5, 0x18d, 0xc7, 0x2, 0x30c, 0x30d, 0x5, 0x17f, 0xc0, 0x2, 0x30d, 0x30e, - 0x5, 0x185, 0xc3, 0x2, 0x30e, 0x56, 0x3, 0x2, 0x2, 0x2, 0x30f, 0x310, - 0x5, 0x183, 0xc2, 0x2, 0x310, 0x311, 0x5, 0x185, 0xc3, 0x2, 0x311, 0x312, - 0x5, 0x1a3, 0xd2, 0x2, 0x312, 0x313, 0x5, 0x17d, 0xbf, 0x2, 0x313, 0x314, - 0x5, 0x181, 0xc1, 0x2, 0x314, 0x315, 0x5, 0x18b, 0xc6, 0x2, 0x315, 0x58, - 0x3, 0x2, 0x2, 0x2, 0x316, 0x317, 0x5, 0x183, 0xc2, 0x2, 0x317, 0x318, - 0x5, 0x18d, 0xc7, 0x2, 0x318, 0x319, 0x5, 0x181, 0xc1, 0x2, 0x319, 0x31a, - 0x5, 0x1a3, 0xd2, 0x2, 0x31a, 0x31b, 0x5, 0x18d, 0xc7, 0x2, 0x31b, 0x31c, - 0x5, 0x199, 0xcd, 0x2, 0x31c, 0x31d, 0x5, 0x197, 0xcc, 0x2, 0x31d, 0x31e, - 0x5, 0x17d, 0xbf, 0x2, 0x31e, 0x31f, 0x5, 0x19f, 0xd0, 0x2, 0x31f, 0x320, - 0x5, 0x18d, 0xc7, 0x2, 0x320, 0x321, 0x5, 0x185, 0xc3, 0x2, 0x321, 0x322, - 0x5, 0x1a1, 0xd1, 0x2, 0x322, 0x5a, 0x3, 0x2, 0x2, 0x2, 0x323, 0x324, - 0x5, 0x183, 0xc2, 0x2, 0x324, 0x325, 0x5, 0x18d, 0xc7, 0x2, 0x325, 0x326, - 0x5, 0x181, 0xc1, 0x2, 0x326, 0x327, 0x5, 0x1a3, 0xd2, 0x2, 0x327, 0x328, - 0x5, 0x18d, 0xc7, 0x2, 0x328, 0x329, 0x5, 0x199, 0xcd, 0x2, 0x329, 0x32a, - 0x5, 0x197, 0xcc, 0x2, 0x32a, 0x32b, 0x5, 0x17d, 0xbf, 0x2, 0x32b, 0x32c, - 0x5, 0x19f, 0xd0, 0x2, 0x32c, 0x32d, 0x5, 0x1ad, 0xd7, 0x2, 0x32d, 0x5c, - 0x3, 0x2, 0x2, 0x2, 0x32e, 0x32f, 0x5, 0x183, 0xc2, 0x2, 0x32f, 0x330, - 0x5, 0x18d, 0xc7, 0x2, 0x330, 0x331, 0x5, 0x1a1, 0xd1, 0x2, 0x331, 0x332, - 0x5, 0x191, 0xc9, 0x2, 0x332, 0x5e, 0x3, 0x2, 0x2, 0x2, 0x333, 0x334, - 0x5, 0x183, 0xc2, 0x2, 0x334, 0x335, 0x5, 0x18d, 0xc7, 0x2, 0x335, 0x336, - 0x5, 0x1a1, 0xd1, 0x2, 0x336, 0x337, 0x5, 0x1a3, 0xd2, 0x2, 0x337, 0x338, - 0x5, 0x18d, 0xc7, 0x2, 0x338, 0x339, 0x5, 0x197, 0xcc, 0x2, 0x339, 0x33a, - 0x5, 0x181, 0xc1, 0x2, 0x33a, 0x33b, 0x5, 0x1a3, 0xd2, 0x2, 0x33b, 0x60, - 0x3, 0x2, 0x2, 0x2, 0x33c, 0x33d, 0x5, 0x183, 0xc2, 0x2, 0x33d, 0x33e, - 0x5, 0x18d, 0xc7, 0x2, 0x33e, 0x33f, 0x5, 0x1a1, 0xd1, 0x2, 0x33f, 0x340, - 0x5, 0x1a3, 0xd2, 0x2, 0x340, 0x341, 0x5, 0x19f, 0xd0, 0x2, 0x341, 0x342, - 0x5, 0x18d, 0xc7, 0x2, 0x342, 0x343, 0x5, 0x17f, 0xc0, 0x2, 0x343, 0x344, - 0x5, 0x1a5, 0xd3, 0x2, 0x344, 0x345, 0x5, 0x1a3, 0xd2, 0x2, 0x345, 0x346, - 0x5, 0x185, 0xc3, 0x2, 0x346, 0x347, 0x5, 0x183, 0xc2, 0x2, 0x347, 0x62, - 0x3, 0x2, 0x2, 0x2, 0x348, 0x349, 0x5, 0x183, 0xc2, 0x2, 0x349, 0x34a, - 0x5, 0x19f, 0xd0, 0x2, 0x34a, 0x34b, 0x5, 0x199, 0xcd, 0x2, 0x34b, 0x34c, - 0x5, 0x19b, 0xce, 0x2, 0x34c, 0x64, 0x3, 0x2, 0x2, 0x2, 0x34d, 0x34e, - 0x5, 0x185, 0xc3, 0x2, 0x34e, 0x34f, 0x5, 0x193, 0xca, 0x2, 0x34f, 0x350, - 0x5, 0x1a1, 0xd1, 0x2, 0x350, 0x351, 0x5, 0x185, 0xc3, 0x2, 0x351, 0x66, - 0x3, 0x2, 0x2, 0x2, 0x352, 0x353, 0x5, 0x185, 0xc3, 0x2, 0x353, 0x354, - 0x5, 0x197, 0xcc, 0x2, 0x354, 0x355, 0x5, 0x183, 0xc2, 0x2, 0x355, 0x68, - 0x3, 0x2, 0x2, 0x2, 0x356, 0x357, 0x5, 0x185, 0xc3, 0x2, 0x357, 0x358, - 0x5, 0x197, 0xcc, 0x2, 0x358, 0x359, 0x5, 0x189, 0xc5, 0x2, 0x359, 0x35a, - 0x5, 0x18d, 0xc7, 0x2, 0x35a, 0x35b, 0x5, 0x197, 0xcc, 0x2, 0x35b, 0x35c, - 0x5, 0x185, 0xc3, 0x2, 0x35c, 0x6a, 0x3, 0x2, 0x2, 0x2, 0x35d, 0x35e, - 0x5, 0x185, 0xc3, 0x2, 0x35e, 0x35f, 0x5, 0x1a7, 0xd4, 0x2, 0x35f, 0x360, - 0x5, 0x185, 0xc3, 0x2, 0x360, 0x361, 0x5, 0x197, 0xcc, 0x2, 0x361, 0x362, - 0x5, 0x1a3, 0xd2, 0x2, 0x362, 0x363, 0x5, 0x1a1, 0xd1, 0x2, 0x363, 0x6c, - 0x3, 0x2, 0x2, 0x2, 0x364, 0x365, 0x5, 0x185, 0xc3, 0x2, 0x365, 0x366, - 0x5, 0x1ab, 0xd6, 0x2, 0x366, 0x367, 0x5, 0x18d, 0xc7, 0x2, 0x367, 0x368, - 0x5, 0x1a1, 0xd1, 0x2, 0x368, 0x369, 0x5, 0x1a3, 0xd2, 0x2, 0x369, 0x36a, - 0x5, 0x1a1, 0xd1, 0x2, 0x36a, 0x6e, 0x3, 0x2, 0x2, 0x2, 0x36b, 0x36c, - 0x5, 0x185, 0xc3, 0x2, 0x36c, 0x36d, 0x5, 0x1ab, 0xd6, 0x2, 0x36d, 0x36e, - 0x5, 0x19b, 0xce, 0x2, 0x36e, 0x36f, 0x5, 0x193, 0xca, 0x2, 0x36f, 0x370, - 0x5, 0x17d, 0xbf, 0x2, 0x370, 0x371, 0x5, 0x18d, 0xc7, 0x2, 0x371, 0x372, - 0x5, 0x197, 0xcc, 0x2, 0x372, 0x70, 0x3, 0x2, 0x2, 0x2, 0x373, 0x374, - 0x5, 0x185, 0xc3, 0x2, 0x374, 0x375, 0x5, 0x1ab, 0xd6, 0x2, 0x375, 0x376, - 0x5, 0x19b, 0xce, 0x2, 0x376, 0x377, 0x5, 0x19f, 0xd0, 0x2, 0x377, 0x378, - 0x5, 0x185, 0xc3, 0x2, 0x378, 0x379, 0x5, 0x1a1, 0xd1, 0x2, 0x379, 0x37a, - 0x5, 0x1a1, 0xd1, 0x2, 0x37a, 0x37b, 0x5, 0x18d, 0xc7, 0x2, 0x37b, 0x37c, - 0x5, 0x199, 0xcd, 0x2, 0x37c, 0x37d, 0x5, 0x197, 0xcc, 0x2, 0x37d, 0x72, - 0x3, 0x2, 0x2, 0x2, 0x37e, 0x37f, 0x5, 0x185, 0xc3, 0x2, 0x37f, 0x380, - 0x5, 0x1ab, 0xd6, 0x2, 0x380, 0x381, 0x5, 0x1a3, 0xd2, 0x2, 0x381, 0x382, - 0x5, 0x19f, 0xd0, 0x2, 0x382, 0x383, 0x5, 0x17d, 0xbf, 0x2, 0x383, 0x384, - 0x5, 0x181, 0xc1, 0x2, 0x384, 0x385, 0x5, 0x1a3, 0xd2, 0x2, 0x385, 0x74, - 0x3, 0x2, 0x2, 0x2, 0x386, 0x387, 0x5, 0x187, 0xc4, 0x2, 0x387, 0x388, - 0x5, 0x185, 0xc3, 0x2, 0x388, 0x389, 0x5, 0x1a3, 0xd2, 0x2, 0x389, 0x38a, - 0x5, 0x181, 0xc1, 0x2, 0x38a, 0x38b, 0x5, 0x18b, 0xc6, 0x2, 0x38b, 0x38c, - 0x5, 0x185, 0xc3, 0x2, 0x38c, 0x38d, 0x5, 0x1a1, 0xd1, 0x2, 0x38d, 0x76, - 0x3, 0x2, 0x2, 0x2, 0x38e, 0x38f, 0x5, 0x187, 0xc4, 0x2, 0x38f, 0x390, - 0x5, 0x18d, 0xc7, 0x2, 0x390, 0x391, 0x5, 0x197, 0xcc, 0x2, 0x391, 0x392, - 0x5, 0x17d, 0xbf, 0x2, 0x392, 0x393, 0x5, 0x193, 0xca, 0x2, 0x393, 0x78, - 0x3, 0x2, 0x2, 0x2, 0x394, 0x395, 0x5, 0x187, 0xc4, 0x2, 0x395, 0x396, - 0x5, 0x18d, 0xc7, 0x2, 0x396, 0x397, 0x5, 0x19f, 0xd0, 0x2, 0x397, 0x398, - 0x5, 0x1a1, 0xd1, 0x2, 0x398, 0x399, 0x5, 0x1a3, 0xd2, 0x2, 0x399, 0x7a, - 0x3, 0x2, 0x2, 0x2, 0x39a, 0x39b, 0x5, 0x187, 0xc4, 0x2, 0x39b, 0x39c, - 0x5, 0x193, 0xca, 0x2, 0x39c, 0x39d, 0x5, 0x1a5, 0xd3, 0x2, 0x39d, 0x39e, - 0x5, 0x1a1, 0xd1, 0x2, 0x39e, 0x39f, 0x5, 0x18b, 0xc6, 0x2, 0x39f, 0x7c, - 0x3, 0x2, 0x2, 0x2, 0x3a0, 0x3a1, 0x5, 0x187, 0xc4, 0x2, 0x3a1, 0x3a2, - 0x5, 0x199, 0xcd, 0x2, 0x3a2, 0x3a3, 0x5, 0x19f, 0xd0, 0x2, 0x3a3, 0x7e, - 0x3, 0x2, 0x2, 0x2, 0x3a4, 0x3a5, 0x5, 0x187, 0xc4, 0x2, 0x3a5, 0x3a6, - 0x5, 0x199, 0xcd, 0x2, 0x3a6, 0x3a7, 0x5, 0x19f, 0xd0, 0x2, 0x3a7, 0x3a8, - 0x5, 0x195, 0xcb, 0x2, 0x3a8, 0x3a9, 0x5, 0x17d, 0xbf, 0x2, 0x3a9, 0x3aa, - 0x5, 0x1a3, 0xd2, 0x2, 0x3aa, 0x80, 0x3, 0x2, 0x2, 0x2, 0x3ab, 0x3ac, - 0x5, 0x187, 0xc4, 0x2, 0x3ac, 0x3ad, 0x5, 0x19f, 0xd0, 0x2, 0x3ad, 0x3ae, - 0x5, 0x185, 0xc3, 0x2, 0x3ae, 0x3af, 0x5, 0x185, 0xc3, 0x2, 0x3af, 0x3b0, - 0x5, 0x1af, 0xd8, 0x2, 0x3b0, 0x3b1, 0x5, 0x185, 0xc3, 0x2, 0x3b1, 0x82, - 0x3, 0x2, 0x2, 0x2, 0x3b2, 0x3b3, 0x5, 0x187, 0xc4, 0x2, 0x3b3, 0x3b4, - 0x5, 0x19f, 0xd0, 0x2, 0x3b4, 0x3b5, 0x5, 0x199, 0xcd, 0x2, 0x3b5, 0x3b6, - 0x5, 0x195, 0xcb, 0x2, 0x3b6, 0x84, 0x3, 0x2, 0x2, 0x2, 0x3b7, 0x3b8, - 0x5, 0x187, 0xc4, 0x2, 0x3b8, 0x3b9, 0x5, 0x1a5, 0xd3, 0x2, 0x3b9, 0x3ba, - 0x5, 0x193, 0xca, 0x2, 0x3ba, 0x3bb, 0x5, 0x193, 0xca, 0x2, 0x3bb, 0x86, - 0x3, 0x2, 0x2, 0x2, 0x3bc, 0x3bd, 0x5, 0x187, 0xc4, 0x2, 0x3bd, 0x3be, - 0x5, 0x1a5, 0xd3, 0x2, 0x3be, 0x3bf, 0x5, 0x197, 0xcc, 0x2, 0x3bf, 0x3c0, - 0x5, 0x181, 0xc1, 0x2, 0x3c0, 0x3c1, 0x5, 0x1a3, 0xd2, 0x2, 0x3c1, 0x3c2, - 0x5, 0x18d, 0xc7, 0x2, 0x3c2, 0x3c3, 0x5, 0x199, 0xcd, 0x2, 0x3c3, 0x3c4, - 0x5, 0x197, 0xcc, 0x2, 0x3c4, 0x88, 0x3, 0x2, 0x2, 0x2, 0x3c5, 0x3c6, - 0x5, 0x189, 0xc5, 0x2, 0x3c6, 0x3c7, 0x5, 0x193, 0xca, 0x2, 0x3c7, 0x3c8, - 0x5, 0x199, 0xcd, 0x2, 0x3c8, 0x3c9, 0x5, 0x17f, 0xc0, 0x2, 0x3c9, 0x3ca, - 0x5, 0x17d, 0xbf, 0x2, 0x3ca, 0x3cb, 0x5, 0x193, 0xca, 0x2, 0x3cb, 0x8a, - 0x3, 0x2, 0x2, 0x2, 0x3cc, 0x3cd, 0x5, 0x189, 0xc5, 0x2, 0x3cd, 0x3ce, - 0x5, 0x19f, 0xd0, 0x2, 0x3ce, 0x3cf, 0x5, 0x17d, 0xbf, 0x2, 0x3cf, 0x3d0, - 0x5, 0x197, 0xcc, 0x2, 0x3d0, 0x3d1, 0x5, 0x1a5, 0xd3, 0x2, 0x3d1, 0x3d2, - 0x5, 0x193, 0xca, 0x2, 0x3d2, 0x3d3, 0x5, 0x17d, 0xbf, 0x2, 0x3d3, 0x3d4, - 0x5, 0x19f, 0xd0, 0x2, 0x3d4, 0x3d5, 0x5, 0x18d, 0xc7, 0x2, 0x3d5, 0x3d6, - 0x5, 0x1a3, 0xd2, 0x2, 0x3d6, 0x3d7, 0x5, 0x1ad, 0xd7, 0x2, 0x3d7, 0x8c, - 0x3, 0x2, 0x2, 0x2, 0x3d8, 0x3d9, 0x5, 0x189, 0xc5, 0x2, 0x3d9, 0x3da, - 0x5, 0x19f, 0xd0, 0x2, 0x3da, 0x3db, 0x5, 0x199, 0xcd, 0x2, 0x3db, 0x3dc, - 0x5, 0x1a5, 0xd3, 0x2, 0x3dc, 0x3dd, 0x5, 0x19b, 0xce, 0x2, 0x3dd, 0x8e, - 0x3, 0x2, 0x2, 0x2, 0x3de, 0x3df, 0x5, 0x18b, 0xc6, 0x2, 0x3df, 0x3e0, - 0x5, 0x17d, 0xbf, 0x2, 0x3e0, 0x3e1, 0x5, 0x1a7, 0xd4, 0x2, 0x3e1, 0x3e2, - 0x5, 0x18d, 0xc7, 0x2, 0x3e2, 0x3e3, 0x5, 0x197, 0xcc, 0x2, 0x3e3, 0x3e4, - 0x5, 0x189, 0xc5, 0x2, 0x3e4, 0x90, 0x3, 0x2, 0x2, 0x2, 0x3e5, 0x3e6, - 0x5, 0x18b, 0xc6, 0x2, 0x3e6, 0x3e7, 0x5, 0x18d, 0xc7, 0x2, 0x3e7, 0x3e8, - 0x5, 0x185, 0xc3, 0x2, 0x3e8, 0x3e9, 0x5, 0x19f, 0xd0, 0x2, 0x3e9, 0x3ea, - 0x5, 0x17d, 0xbf, 0x2, 0x3ea, 0x3eb, 0x5, 0x19f, 0xd0, 0x2, 0x3eb, 0x3ec, - 0x5, 0x181, 0xc1, 0x2, 0x3ec, 0x3ed, 0x5, 0x18b, 0xc6, 0x2, 0x3ed, 0x3ee, - 0x5, 0x18d, 0xc7, 0x2, 0x3ee, 0x3ef, 0x5, 0x181, 0xc1, 0x2, 0x3ef, 0x3f0, - 0x5, 0x17d, 0xbf, 0x2, 0x3f0, 0x3f1, 0x5, 0x193, 0xca, 0x2, 0x3f1, 0x92, - 0x3, 0x2, 0x2, 0x2, 0x3f2, 0x3f3, 0x5, 0x18b, 0xc6, 0x2, 0x3f3, 0x3f4, - 0x5, 0x199, 0xcd, 0x2, 0x3f4, 0x3f5, 0x5, 0x1a5, 0xd3, 0x2, 0x3f5, 0x3f6, - 0x5, 0x19f, 0xd0, 0x2, 0x3f6, 0x94, 0x3, 0x2, 0x2, 0x2, 0x3f7, 0x3f8, - 0x5, 0x18d, 0xc7, 0x2, 0x3f8, 0x3f9, 0x5, 0x183, 0xc2, 0x2, 0x3f9, 0x96, - 0x3, 0x2, 0x2, 0x2, 0x3fa, 0x3fb, 0x5, 0x18d, 0xc7, 0x2, 0x3fb, 0x3fc, - 0x5, 0x187, 0xc4, 0x2, 0x3fc, 0x98, 0x3, 0x2, 0x2, 0x2, 0x3fd, 0x3fe, - 0x5, 0x18d, 0xc7, 0x2, 0x3fe, 0x3ff, 0x5, 0x193, 0xca, 0x2, 0x3ff, 0x400, - 0x5, 0x18d, 0xc7, 0x2, 0x400, 0x401, 0x5, 0x191, 0xc9, 0x2, 0x401, 0x402, - 0x5, 0x185, 0xc3, 0x2, 0x402, 0x9a, 0x3, 0x2, 0x2, 0x2, 0x403, 0x404, - 0x5, 0x18d, 0xc7, 0x2, 0x404, 0x405, 0x5, 0x197, 0xcc, 0x2, 0x405, 0x9c, - 0x3, 0x2, 0x2, 0x2, 0x406, 0x407, 0x5, 0x18d, 0xc7, 0x2, 0x407, 0x408, - 0x5, 0x197, 0xcc, 0x2, 0x408, 0x409, 0x5, 0x183, 0xc2, 0x2, 0x409, 0x40a, - 0x5, 0x185, 0xc3, 0x2, 0x40a, 0x40b, 0x5, 0x1ab, 0xd6, 0x2, 0x40b, 0x9e, - 0x3, 0x2, 0x2, 0x2, 0x40c, 0x40d, 0x5, 0x18d, 0xc7, 0x2, 0x40d, 0x40e, - 0x5, 0x197, 0xcc, 0x2, 0x40e, 0x40f, 0x5, 0x187, 0xc4, 0x2, 0x40f, 0x41a, - 0x3, 0x2, 0x2, 0x2, 0x410, 0x411, 0x5, 0x18d, 0xc7, 0x2, 0x411, 0x412, - 0x5, 0x197, 0xcc, 0x2, 0x412, 0x413, 0x5, 0x187, 0xc4, 0x2, 0x413, 0x414, - 0x5, 0x18d, 0xc7, 0x2, 0x414, 0x415, 0x5, 0x197, 0xcc, 0x2, 0x415, 0x416, - 0x5, 0x18d, 0xc7, 0x2, 0x416, 0x417, 0x5, 0x1a3, 0xd2, 0x2, 0x417, 0x418, - 0x5, 0x1ad, 0xd7, 0x2, 0x418, 0x41a, 0x3, 0x2, 0x2, 0x2, 0x419, 0x40c, - 0x3, 0x2, 0x2, 0x2, 0x419, 0x410, 0x3, 0x2, 0x2, 0x2, 0x41a, 0xa0, 0x3, - 0x2, 0x2, 0x2, 0x41b, 0x41c, 0x5, 0x18d, 0xc7, 0x2, 0x41c, 0x41d, 0x5, - 0x197, 0xcc, 0x2, 0x41d, 0x41e, 0x5, 0x18f, 0xc8, 0x2, 0x41e, 0x41f, - 0x5, 0x185, 0xc3, 0x2, 0x41f, 0x420, 0x5, 0x181, 0xc1, 0x2, 0x420, 0x421, - 0x5, 0x1a3, 0xd2, 0x2, 0x421, 0x422, 0x5, 0x18d, 0xc7, 0x2, 0x422, 0x423, - 0x5, 0x1a7, 0xd4, 0x2, 0x423, 0x424, 0x5, 0x185, 0xc3, 0x2, 0x424, 0xa2, - 0x3, 0x2, 0x2, 0x2, 0x425, 0x426, 0x5, 0x18d, 0xc7, 0x2, 0x426, 0x427, - 0x5, 0x197, 0xcc, 0x2, 0x427, 0x428, 0x5, 0x197, 0xcc, 0x2, 0x428, 0x429, - 0x5, 0x185, 0xc3, 0x2, 0x429, 0x42a, 0x5, 0x19f, 0xd0, 0x2, 0x42a, 0xa4, - 0x3, 0x2, 0x2, 0x2, 0x42b, 0x42c, 0x5, 0x18d, 0xc7, 0x2, 0x42c, 0x42d, - 0x5, 0x197, 0xcc, 0x2, 0x42d, 0x42e, 0x5, 0x1a1, 0xd1, 0x2, 0x42e, 0x42f, - 0x5, 0x185, 0xc3, 0x2, 0x42f, 0x430, 0x5, 0x19f, 0xd0, 0x2, 0x430, 0x431, - 0x5, 0x1a3, 0xd2, 0x2, 0x431, 0xa6, 0x3, 0x2, 0x2, 0x2, 0x432, 0x433, - 0x5, 0x18d, 0xc7, 0x2, 0x433, 0x434, 0x5, 0x197, 0xcc, 0x2, 0x434, 0x435, - 0x5, 0x1a3, 0xd2, 0x2, 0x435, 0x436, 0x5, 0x185, 0xc3, 0x2, 0x436, 0x437, - 0x5, 0x19f, 0xd0, 0x2, 0x437, 0x438, 0x5, 0x1a7, 0xd4, 0x2, 0x438, 0x439, - 0x5, 0x17d, 0xbf, 0x2, 0x439, 0x43a, 0x5, 0x193, 0xca, 0x2, 0x43a, 0xa8, - 0x3, 0x2, 0x2, 0x2, 0x43b, 0x43c, 0x5, 0x18d, 0xc7, 0x2, 0x43c, 0x43d, - 0x5, 0x197, 0xcc, 0x2, 0x43d, 0x43e, 0x5, 0x1a3, 0xd2, 0x2, 0x43e, 0x43f, - 0x5, 0x199, 0xcd, 0x2, 0x43f, 0xaa, 0x3, 0x2, 0x2, 0x2, 0x440, 0x441, - 0x5, 0x18d, 0xc7, 0x2, 0x441, 0x442, 0x5, 0x1a1, 0xd1, 0x2, 0x442, 0xac, - 0x3, 0x2, 0x2, 0x2, 0x443, 0x444, 0x5, 0x18d, 0xc7, 0x2, 0x444, 0x445, - 0x5, 0x1a1, 0xd1, 0x2, 0x445, 0x446, 0x5, 0x1f3, 0xfa, 0x2, 0x446, 0x447, - 0x5, 0x199, 0xcd, 0x2, 0x447, 0x448, 0x5, 0x17f, 0xc0, 0x2, 0x448, 0x449, - 0x5, 0x18f, 0xc8, 0x2, 0x449, 0x44a, 0x5, 0x185, 0xc3, 0x2, 0x44a, 0x44b, - 0x5, 0x181, 0xc1, 0x2, 0x44b, 0x44c, 0x5, 0x1a3, 0xd2, 0x2, 0x44c, 0x44d, - 0x5, 0x1f3, 0xfa, 0x2, 0x44d, 0x44e, 0x5, 0x18d, 0xc7, 0x2, 0x44e, 0x44f, - 0x5, 0x183, 0xc2, 0x2, 0x44f, 0xae, 0x3, 0x2, 0x2, 0x2, 0x450, 0x451, - 0x5, 0x18f, 0xc8, 0x2, 0x451, 0x452, 0x5, 0x199, 0xcd, 0x2, 0x452, 0x453, - 0x5, 0x18d, 0xc7, 0x2, 0x453, 0x454, 0x5, 0x197, 0xcc, 0x2, 0x454, 0xb0, - 0x3, 0x2, 0x2, 0x2, 0x455, 0x456, 0x5, 0x191, 0xc9, 0x2, 0x456, 0x457, - 0x5, 0x185, 0xc3, 0x2, 0x457, 0x458, 0x5, 0x1ad, 0xd7, 0x2, 0x458, 0xb2, - 0x3, 0x2, 0x2, 0x2, 0x459, 0x45a, 0x5, 0x191, 0xc9, 0x2, 0x45a, 0x45b, - 0x5, 0x18d, 0xc7, 0x2, 0x45b, 0x45c, 0x5, 0x193, 0xca, 0x2, 0x45c, 0x45d, - 0x5, 0x193, 0xca, 0x2, 0x45d, 0xb4, 0x3, 0x2, 0x2, 0x2, 0x45e, 0x45f, - 0x5, 0x193, 0xca, 0x2, 0x45f, 0x460, 0x5, 0x17d, 0xbf, 0x2, 0x460, 0x461, - 0x5, 0x1a1, 0xd1, 0x2, 0x461, 0x462, 0x5, 0x1a3, 0xd2, 0x2, 0x462, 0xb6, - 0x3, 0x2, 0x2, 0x2, 0x463, 0x464, 0x5, 0x193, 0xca, 0x2, 0x464, 0x465, - 0x5, 0x17d, 0xbf, 0x2, 0x465, 0x466, 0x5, 0x1ad, 0xd7, 0x2, 0x466, 0x467, - 0x5, 0x199, 0xcd, 0x2, 0x467, 0x468, 0x5, 0x1a5, 0xd3, 0x2, 0x468, 0x469, - 0x5, 0x1a3, 0xd2, 0x2, 0x469, 0xb8, 0x3, 0x2, 0x2, 0x2, 0x46a, 0x46b, - 0x5, 0x193, 0xca, 0x2, 0x46b, 0x46c, 0x5, 0x185, 0xc3, 0x2, 0x46c, 0x46d, - 0x5, 0x17d, 0xbf, 0x2, 0x46d, 0x46e, 0x5, 0x183, 0xc2, 0x2, 0x46e, 0x46f, - 0x5, 0x18d, 0xc7, 0x2, 0x46f, 0x470, 0x5, 0x197, 0xcc, 0x2, 0x470, 0x471, - 0x5, 0x189, 0xc5, 0x2, 0x471, 0xba, 0x3, 0x2, 0x2, 0x2, 0x472, 0x473, - 0x5, 0x193, 0xca, 0x2, 0x473, 0x474, 0x5, 0x185, 0xc3, 0x2, 0x474, 0x475, - 0x5, 0x187, 0xc4, 0x2, 0x475, 0x476, 0x5, 0x1a3, 0xd2, 0x2, 0x476, 0xbc, - 0x3, 0x2, 0x2, 0x2, 0x477, 0x478, 0x5, 0x193, 0xca, 0x2, 0x478, 0x479, - 0x5, 0x18d, 0xc7, 0x2, 0x479, 0x47a, 0x5, 0x187, 0xc4, 0x2, 0x47a, 0x47b, - 0x5, 0x185, 0xc3, 0x2, 0x47b, 0x47c, 0x5, 0x1a3, 0xd2, 0x2, 0x47c, 0x47d, - 0x5, 0x18d, 0xc7, 0x2, 0x47d, 0x47e, 0x5, 0x195, 0xcb, 0x2, 0x47e, 0x47f, - 0x5, 0x185, 0xc3, 0x2, 0x47f, 0xbe, 0x3, 0x2, 0x2, 0x2, 0x480, 0x481, - 0x5, 0x193, 0xca, 0x2, 0x481, 0x482, 0x5, 0x18d, 0xc7, 0x2, 0x482, 0x483, - 0x5, 0x191, 0xc9, 0x2, 0x483, 0x484, 0x5, 0x185, 0xc3, 0x2, 0x484, 0xc0, - 0x3, 0x2, 0x2, 0x2, 0x485, 0x486, 0x5, 0x193, 0xca, 0x2, 0x486, 0x487, - 0x5, 0x18d, 0xc7, 0x2, 0x487, 0x488, 0x5, 0x195, 0xcb, 0x2, 0x488, 0x489, - 0x5, 0x18d, 0xc7, 0x2, 0x489, 0x48a, 0x5, 0x1a3, 0xd2, 0x2, 0x48a, 0xc2, - 0x3, 0x2, 0x2, 0x2, 0x48b, 0x48c, 0x5, 0x193, 0xca, 0x2, 0x48c, 0x48d, - 0x5, 0x18d, 0xc7, 0x2, 0x48d, 0x48e, 0x5, 0x1a7, 0xd4, 0x2, 0x48e, 0x48f, - 0x5, 0x185, 0xc3, 0x2, 0x48f, 0xc4, 0x3, 0x2, 0x2, 0x2, 0x490, 0x491, - 0x5, 0x193, 0xca, 0x2, 0x491, 0x492, 0x5, 0x199, 0xcd, 0x2, 0x492, 0x493, - 0x5, 0x181, 0xc1, 0x2, 0x493, 0x494, 0x5, 0x17d, 0xbf, 0x2, 0x494, 0x495, - 0x5, 0x193, 0xca, 0x2, 0x495, 0xc6, 0x3, 0x2, 0x2, 0x2, 0x496, 0x497, - 0x5, 0x193, 0xca, 0x2, 0x497, 0x498, 0x5, 0x199, 0xcd, 0x2, 0x498, 0x499, - 0x5, 0x189, 0xc5, 0x2, 0x499, 0x49a, 0x5, 0x1a1, 0xd1, 0x2, 0x49a, 0xc8, - 0x3, 0x2, 0x2, 0x2, 0x49b, 0x49c, 0x5, 0x195, 0xcb, 0x2, 0x49c, 0x49d, - 0x5, 0x17d, 0xbf, 0x2, 0x49d, 0x49e, 0x5, 0x1a3, 0xd2, 0x2, 0x49e, 0x49f, - 0x5, 0x185, 0xc3, 0x2, 0x49f, 0x4a0, 0x5, 0x19f, 0xd0, 0x2, 0x4a0, 0x4a1, - 0x5, 0x18d, 0xc7, 0x2, 0x4a1, 0x4a2, 0x5, 0x17d, 0xbf, 0x2, 0x4a2, 0x4a3, - 0x5, 0x193, 0xca, 0x2, 0x4a3, 0x4a4, 0x5, 0x18d, 0xc7, 0x2, 0x4a4, 0x4a5, - 0x5, 0x1af, 0xd8, 0x2, 0x4a5, 0x4a6, 0x5, 0x185, 0xc3, 0x2, 0x4a6, 0xca, - 0x3, 0x2, 0x2, 0x2, 0x4a7, 0x4a8, 0x5, 0x195, 0xcb, 0x2, 0x4a8, 0x4a9, - 0x5, 0x17d, 0xbf, 0x2, 0x4a9, 0x4aa, 0x5, 0x1a3, 0xd2, 0x2, 0x4aa, 0x4ab, - 0x5, 0x185, 0xc3, 0x2, 0x4ab, 0x4ac, 0x5, 0x19f, 0xd0, 0x2, 0x4ac, 0x4ad, - 0x5, 0x18d, 0xc7, 0x2, 0x4ad, 0x4ae, 0x5, 0x17d, 0xbf, 0x2, 0x4ae, 0x4af, - 0x5, 0x193, 0xca, 0x2, 0x4af, 0x4b0, 0x5, 0x18d, 0xc7, 0x2, 0x4b0, 0x4b1, - 0x5, 0x1af, 0xd8, 0x2, 0x4b1, 0x4b2, 0x5, 0x185, 0xc3, 0x2, 0x4b2, 0x4b3, - 0x5, 0x183, 0xc2, 0x2, 0x4b3, 0xcc, 0x3, 0x2, 0x2, 0x2, 0x4b4, 0x4b5, - 0x5, 0x195, 0xcb, 0x2, 0x4b5, 0x4b6, 0x5, 0x17d, 0xbf, 0x2, 0x4b6, 0x4b7, - 0x5, 0x1ab, 0xd6, 0x2, 0x4b7, 0xce, 0x3, 0x2, 0x2, 0x2, 0x4b8, 0x4b9, - 0x5, 0x195, 0xcb, 0x2, 0x4b9, 0x4ba, 0x5, 0x185, 0xc3, 0x2, 0x4ba, 0x4bb, - 0x5, 0x19f, 0xd0, 0x2, 0x4bb, 0x4bc, 0x5, 0x189, 0xc5, 0x2, 0x4bc, 0x4bd, - 0x5, 0x185, 0xc3, 0x2, 0x4bd, 0x4be, 0x5, 0x1a1, 0xd1, 0x2, 0x4be, 0xd0, - 0x3, 0x2, 0x2, 0x2, 0x4bf, 0x4c0, 0x5, 0x195, 0xcb, 0x2, 0x4c0, 0x4c1, - 0x5, 0x18d, 0xc7, 0x2, 0x4c1, 0x4c2, 0x5, 0x197, 0xcc, 0x2, 0x4c2, 0xd2, - 0x3, 0x2, 0x2, 0x2, 0x4c3, 0x4c4, 0x5, 0x195, 0xcb, 0x2, 0x4c4, 0x4c5, - 0x5, 0x18d, 0xc7, 0x2, 0x4c5, 0x4c6, 0x5, 0x197, 0xcc, 0x2, 0x4c6, 0x4c7, - 0x5, 0x1a5, 0xd3, 0x2, 0x4c7, 0x4c8, 0x5, 0x1a3, 0xd2, 0x2, 0x4c8, 0x4c9, - 0x5, 0x185, 0xc3, 0x2, 0x4c9, 0xd4, 0x3, 0x2, 0x2, 0x2, 0x4ca, 0x4cb, - 0x5, 0x195, 0xcb, 0x2, 0x4cb, 0x4cc, 0x5, 0x199, 0xcd, 0x2, 0x4cc, 0x4cd, - 0x5, 0x183, 0xc2, 0x2, 0x4cd, 0x4ce, 0x5, 0x18d, 0xc7, 0x2, 0x4ce, 0x4cf, - 0x5, 0x187, 0xc4, 0x2, 0x4cf, 0x4d0, 0x5, 0x1ad, 0xd7, 0x2, 0x4d0, 0xd6, - 0x3, 0x2, 0x2, 0x2, 0x4d1, 0x4d2, 0x5, 0x195, 0xcb, 0x2, 0x4d2, 0x4d3, - 0x5, 0x199, 0xcd, 0x2, 0x4d3, 0x4d4, 0x5, 0x197, 0xcc, 0x2, 0x4d4, 0x4d5, - 0x5, 0x1a3, 0xd2, 0x2, 0x4d5, 0x4d6, 0x5, 0x18b, 0xc6, 0x2, 0x4d6, 0xd8, - 0x3, 0x2, 0x2, 0x2, 0x4d7, 0x4d8, 0x5, 0x195, 0xcb, 0x2, 0x4d8, 0x4d9, - 0x5, 0x199, 0xcd, 0x2, 0x4d9, 0x4da, 0x5, 0x1a7, 0xd4, 0x2, 0x4da, 0x4db, - 0x5, 0x185, 0xc3, 0x2, 0x4db, 0xda, 0x3, 0x2, 0x2, 0x2, 0x4dc, 0x4dd, - 0x5, 0x195, 0xcb, 0x2, 0x4dd, 0x4de, 0x5, 0x1a5, 0xd3, 0x2, 0x4de, 0x4df, - 0x5, 0x1a3, 0xd2, 0x2, 0x4df, 0x4e0, 0x5, 0x17d, 0xbf, 0x2, 0x4e0, 0x4e1, - 0x5, 0x1a3, 0xd2, 0x2, 0x4e1, 0x4e2, 0x5, 0x18d, 0xc7, 0x2, 0x4e2, 0x4e3, - 0x5, 0x199, 0xcd, 0x2, 0x4e3, 0x4e4, 0x5, 0x197, 0xcc, 0x2, 0x4e4, 0xdc, - 0x3, 0x2, 0x2, 0x2, 0x4e5, 0x4e6, 0x5, 0x197, 0xcc, 0x2, 0x4e6, 0x4e7, - 0x5, 0x17d, 0xbf, 0x2, 0x4e7, 0x4e8, 0x5, 0x197, 0xcc, 0x2, 0x4e8, 0xde, - 0x3, 0x2, 0x2, 0x2, 0x4e9, 0x4ea, 0x5, 0x197, 0xcc, 0x2, 0x4ea, 0x4eb, - 0x5, 0x199, 0xcd, 0x2, 0x4eb, 0xe0, 0x3, 0x2, 0x2, 0x2, 0x4ec, 0x4ed, - 0x5, 0x197, 0xcc, 0x2, 0x4ed, 0x4ee, 0x5, 0x199, 0xcd, 0x2, 0x4ee, 0x4ef, - 0x5, 0x1a3, 0xd2, 0x2, 0x4ef, 0xe2, 0x3, 0x2, 0x2, 0x2, 0x4f0, 0x4f1, - 0x5, 0x197, 0xcc, 0x2, 0x4f1, 0x4f2, 0x5, 0x1a5, 0xd3, 0x2, 0x4f2, 0x4f3, - 0x5, 0x193, 0xca, 0x2, 0x4f3, 0x4f4, 0x5, 0x193, 0xca, 0x2, 0x4f4, 0xe4, - 0x3, 0x2, 0x2, 0x2, 0x4f5, 0x4f6, 0x5, 0x197, 0xcc, 0x2, 0x4f6, 0x4f7, - 0x5, 0x1a5, 0xd3, 0x2, 0x4f7, 0x4f8, 0x5, 0x193, 0xca, 0x2, 0x4f8, 0x4f9, - 0x5, 0x193, 0xca, 0x2, 0x4f9, 0x4fa, 0x5, 0x1a1, 0xd1, 0x2, 0x4fa, 0xe6, - 0x3, 0x2, 0x2, 0x2, 0x4fb, 0x4fc, 0x5, 0x199, 0xcd, 0x2, 0x4fc, 0x4fd, - 0x5, 0x187, 0xc4, 0x2, 0x4fd, 0x4fe, 0x5, 0x187, 0xc4, 0x2, 0x4fe, 0x4ff, - 0x5, 0x1a1, 0xd1, 0x2, 0x4ff, 0x500, 0x5, 0x185, 0xc3, 0x2, 0x500, 0x501, - 0x5, 0x1a3, 0xd2, 0x2, 0x501, 0xe8, 0x3, 0x2, 0x2, 0x2, 0x502, 0x503, - 0x5, 0x199, 0xcd, 0x2, 0x503, 0x504, 0x5, 0x197, 0xcc, 0x2, 0x504, 0xea, - 0x3, 0x2, 0x2, 0x2, 0x505, 0x506, 0x5, 0x199, 0xcd, 0x2, 0x506, 0x507, - 0x5, 0x19b, 0xce, 0x2, 0x507, 0x508, 0x5, 0x1a3, 0xd2, 0x2, 0x508, 0x509, - 0x5, 0x18d, 0xc7, 0x2, 0x509, 0x50a, 0x5, 0x195, 0xcb, 0x2, 0x50a, 0x50b, - 0x5, 0x18d, 0xc7, 0x2, 0x50b, 0x50c, 0x5, 0x1af, 0xd8, 0x2, 0x50c, 0x50d, - 0x5, 0x185, 0xc3, 0x2, 0x50d, 0xec, 0x3, 0x2, 0x2, 0x2, 0x50e, 0x50f, - 0x5, 0x199, 0xcd, 0x2, 0x50f, 0x510, 0x5, 0x19f, 0xd0, 0x2, 0x510, 0xee, - 0x3, 0x2, 0x2, 0x2, 0x511, 0x512, 0x5, 0x199, 0xcd, 0x2, 0x512, 0x513, - 0x5, 0x19f, 0xd0, 0x2, 0x513, 0x514, 0x5, 0x183, 0xc2, 0x2, 0x514, 0x515, - 0x5, 0x185, 0xc3, 0x2, 0x515, 0x516, 0x5, 0x19f, 0xd0, 0x2, 0x516, 0xf0, - 0x3, 0x2, 0x2, 0x2, 0x517, 0x518, 0x5, 0x199, 0xcd, 0x2, 0x518, 0x519, - 0x5, 0x1a5, 0xd3, 0x2, 0x519, 0x51a, 0x5, 0x1a3, 0xd2, 0x2, 0x51a, 0x51b, - 0x5, 0x185, 0xc3, 0x2, 0x51b, 0x51c, 0x5, 0x19f, 0xd0, 0x2, 0x51c, 0xf2, - 0x3, 0x2, 0x2, 0x2, 0x51d, 0x51e, 0x5, 0x199, 0xcd, 0x2, 0x51e, 0x51f, - 0x5, 0x1a5, 0xd3, 0x2, 0x51f, 0x520, 0x5, 0x1a3, 0xd2, 0x2, 0x520, 0x521, - 0x5, 0x187, 0xc4, 0x2, 0x521, 0x522, 0x5, 0x18d, 0xc7, 0x2, 0x522, 0x523, - 0x5, 0x193, 0xca, 0x2, 0x523, 0x524, 0x5, 0x185, 0xc3, 0x2, 0x524, 0xf4, - 0x3, 0x2, 0x2, 0x2, 0x525, 0x526, 0x5, 0x19b, 0xce, 0x2, 0x526, 0x527, - 0x5, 0x17d, 0xbf, 0x2, 0x527, 0x528, 0x5, 0x19f, 0xd0, 0x2, 0x528, 0x529, - 0x5, 0x1a3, 0xd2, 0x2, 0x529, 0x52a, 0x5, 0x18d, 0xc7, 0x2, 0x52a, 0x52b, - 0x5, 0x1a3, 0xd2, 0x2, 0x52b, 0x52c, 0x5, 0x18d, 0xc7, 0x2, 0x52c, 0x52d, - 0x5, 0x199, 0xcd, 0x2, 0x52d, 0x52e, 0x5, 0x197, 0xcc, 0x2, 0x52e, 0xf6, - 0x3, 0x2, 0x2, 0x2, 0x52f, 0x530, 0x5, 0x19b, 0xce, 0x2, 0x530, 0x531, - 0x5, 0x199, 0xcd, 0x2, 0x531, 0x532, 0x5, 0x19b, 0xce, 0x2, 0x532, 0x533, - 0x5, 0x1a5, 0xd3, 0x2, 0x533, 0x534, 0x5, 0x193, 0xca, 0x2, 0x534, 0x535, - 0x5, 0x17d, 0xbf, 0x2, 0x535, 0x536, 0x5, 0x1a3, 0xd2, 0x2, 0x536, 0x537, - 0x5, 0x185, 0xc3, 0x2, 0x537, 0xf8, 0x3, 0x2, 0x2, 0x2, 0x538, 0x539, - 0x5, 0x19b, 0xce, 0x2, 0x539, 0x53a, 0x5, 0x19f, 0xd0, 0x2, 0x53a, 0x53b, - 0x5, 0x185, 0xc3, 0x2, 0x53b, 0x53c, 0x5, 0x1a9, 0xd5, 0x2, 0x53c, 0x53d, - 0x5, 0x18b, 0xc6, 0x2, 0x53d, 0x53e, 0x5, 0x185, 0xc3, 0x2, 0x53e, 0x53f, - 0x5, 0x19f, 0xd0, 0x2, 0x53f, 0x540, 0x5, 0x185, 0xc3, 0x2, 0x540, 0xfa, - 0x3, 0x2, 0x2, 0x2, 0x541, 0x542, 0x5, 0x19b, 0xce, 0x2, 0x542, 0x543, - 0x5, 0x19f, 0xd0, 0x2, 0x543, 0x544, 0x5, 0x18d, 0xc7, 0x2, 0x544, 0x545, - 0x5, 0x195, 0xcb, 0x2, 0x545, 0x546, 0x5, 0x17d, 0xbf, 0x2, 0x546, 0x547, - 0x5, 0x19f, 0xd0, 0x2, 0x547, 0x548, 0x5, 0x1ad, 0xd7, 0x2, 0x548, 0xfc, - 0x3, 0x2, 0x2, 0x2, 0x549, 0x54a, 0x5, 0x19b, 0xce, 0x2, 0x54a, 0x54b, - 0x5, 0x19f, 0xd0, 0x2, 0x54b, 0x54c, 0x5, 0x199, 0xcd, 0x2, 0x54c, 0x54d, - 0x5, 0x18f, 0xc8, 0x2, 0x54d, 0x54e, 0x5, 0x185, 0xc3, 0x2, 0x54e, 0x54f, - 0x5, 0x181, 0xc1, 0x2, 0x54f, 0x550, 0x5, 0x1a3, 0xd2, 0x2, 0x550, 0x551, - 0x5, 0x18d, 0xc7, 0x2, 0x551, 0x552, 0x5, 0x199, 0xcd, 0x2, 0x552, 0x553, - 0x5, 0x197, 0xcc, 0x2, 0x553, 0xfe, 0x3, 0x2, 0x2, 0x2, 0x554, 0x555, - 0x5, 0x19d, 0xcf, 0x2, 0x555, 0x556, 0x5, 0x1a5, 0xd3, 0x2, 0x556, 0x557, - 0x5, 0x17d, 0xbf, 0x2, 0x557, 0x558, 0x5, 0x19f, 0xd0, 0x2, 0x558, 0x559, - 0x5, 0x1a3, 0xd2, 0x2, 0x559, 0x55a, 0x5, 0x185, 0xc3, 0x2, 0x55a, 0x55b, - 0x5, 0x19f, 0xd0, 0x2, 0x55b, 0x100, 0x3, 0x2, 0x2, 0x2, 0x55c, 0x55d, - 0x5, 0x19f, 0xd0, 0x2, 0x55d, 0x55e, 0x5, 0x17d, 0xbf, 0x2, 0x55e, 0x55f, - 0x5, 0x197, 0xcc, 0x2, 0x55f, 0x560, 0x5, 0x189, 0xc5, 0x2, 0x560, 0x561, - 0x5, 0x185, 0xc3, 0x2, 0x561, 0x102, 0x3, 0x2, 0x2, 0x2, 0x562, 0x563, - 0x5, 0x19f, 0xd0, 0x2, 0x563, 0x564, 0x5, 0x185, 0xc3, 0x2, 0x564, 0x565, - 0x5, 0x193, 0xca, 0x2, 0x565, 0x566, 0x5, 0x199, 0xcd, 0x2, 0x566, 0x567, - 0x5, 0x17d, 0xbf, 0x2, 0x567, 0x568, 0x5, 0x183, 0xc2, 0x2, 0x568, 0x104, - 0x3, 0x2, 0x2, 0x2, 0x569, 0x56a, 0x5, 0x19f, 0xd0, 0x2, 0x56a, 0x56b, - 0x5, 0x185, 0xc3, 0x2, 0x56b, 0x56c, 0x5, 0x195, 0xcb, 0x2, 0x56c, 0x56d, - 0x5, 0x199, 0xcd, 0x2, 0x56d, 0x56e, 0x5, 0x1a7, 0xd4, 0x2, 0x56e, 0x56f, - 0x5, 0x185, 0xc3, 0x2, 0x56f, 0x106, 0x3, 0x2, 0x2, 0x2, 0x570, 0x571, - 0x5, 0x19f, 0xd0, 0x2, 0x571, 0x572, 0x5, 0x185, 0xc3, 0x2, 0x572, 0x573, - 0x5, 0x197, 0xcc, 0x2, 0x573, 0x574, 0x5, 0x17d, 0xbf, 0x2, 0x574, 0x575, - 0x5, 0x195, 0xcb, 0x2, 0x575, 0x576, 0x5, 0x185, 0xc3, 0x2, 0x576, 0x108, - 0x3, 0x2, 0x2, 0x2, 0x577, 0x578, 0x5, 0x19f, 0xd0, 0x2, 0x578, 0x579, - 0x5, 0x185, 0xc3, 0x2, 0x579, 0x57a, 0x5, 0x19b, 0xce, 0x2, 0x57a, 0x57b, - 0x5, 0x193, 0xca, 0x2, 0x57b, 0x57c, 0x5, 0x17d, 0xbf, 0x2, 0x57c, 0x57d, - 0x5, 0x181, 0xc1, 0x2, 0x57d, 0x57e, 0x5, 0x185, 0xc3, 0x2, 0x57e, 0x10a, - 0x3, 0x2, 0x2, 0x2, 0x57f, 0x580, 0x5, 0x19f, 0xd0, 0x2, 0x580, 0x581, - 0x5, 0x185, 0xc3, 0x2, 0x581, 0x582, 0x5, 0x19b, 0xce, 0x2, 0x582, 0x583, - 0x5, 0x193, 0xca, 0x2, 0x583, 0x584, 0x5, 0x18d, 0xc7, 0x2, 0x584, 0x585, - 0x5, 0x181, 0xc1, 0x2, 0x585, 0x586, 0x5, 0x17d, 0xbf, 0x2, 0x586, 0x10c, - 0x3, 0x2, 0x2, 0x2, 0x587, 0x588, 0x5, 0x19f, 0xd0, 0x2, 0x588, 0x589, - 0x5, 0x185, 0xc3, 0x2, 0x589, 0x58a, 0x5, 0x19b, 0xce, 0x2, 0x58a, 0x58b, - 0x5, 0x193, 0xca, 0x2, 0x58b, 0x58c, 0x5, 0x18d, 0xc7, 0x2, 0x58c, 0x58d, - 0x5, 0x181, 0xc1, 0x2, 0x58d, 0x58e, 0x5, 0x17d, 0xbf, 0x2, 0x58e, 0x58f, - 0x5, 0x1a3, 0xd2, 0x2, 0x58f, 0x590, 0x5, 0x185, 0xc3, 0x2, 0x590, 0x591, - 0x5, 0x183, 0xc2, 0x2, 0x591, 0x10e, 0x3, 0x2, 0x2, 0x2, 0x592, 0x593, - 0x5, 0x19f, 0xd0, 0x2, 0x593, 0x594, 0x5, 0x18d, 0xc7, 0x2, 0x594, 0x595, - 0x5, 0x189, 0xc5, 0x2, 0x595, 0x596, 0x5, 0x18b, 0xc6, 0x2, 0x596, 0x597, - 0x5, 0x1a3, 0xd2, 0x2, 0x597, 0x110, 0x3, 0x2, 0x2, 0x2, 0x598, 0x599, - 0x5, 0x19f, 0xd0, 0x2, 0x599, 0x59a, 0x5, 0x199, 0xcd, 0x2, 0x59a, 0x59b, - 0x5, 0x193, 0xca, 0x2, 0x59b, 0x59c, 0x5, 0x193, 0xca, 0x2, 0x59c, 0x59d, - 0x5, 0x1a5, 0xd3, 0x2, 0x59d, 0x59e, 0x5, 0x19b, 0xce, 0x2, 0x59e, 0x112, - 0x3, 0x2, 0x2, 0x2, 0x59f, 0x5a0, 0x5, 0x1a1, 0xd1, 0x2, 0x5a0, 0x5a1, - 0x5, 0x17d, 0xbf, 0x2, 0x5a1, 0x5a2, 0x5, 0x195, 0xcb, 0x2, 0x5a2, 0x5a3, - 0x5, 0x19b, 0xce, 0x2, 0x5a3, 0x5a4, 0x5, 0x193, 0xca, 0x2, 0x5a4, 0x5a5, - 0x5, 0x185, 0xc3, 0x2, 0x5a5, 0x114, 0x3, 0x2, 0x2, 0x2, 0x5a6, 0x5a7, - 0x5, 0x1a1, 0xd1, 0x2, 0x5a7, 0x5a8, 0x5, 0x185, 0xc3, 0x2, 0x5a8, 0x5a9, - 0x5, 0x181, 0xc1, 0x2, 0x5a9, 0x5aa, 0x5, 0x199, 0xcd, 0x2, 0x5aa, 0x5ab, - 0x5, 0x197, 0xcc, 0x2, 0x5ab, 0x5ac, 0x5, 0x183, 0xc2, 0x2, 0x5ac, 0x116, - 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x5ae, 0x5, 0x1a1, 0xd1, 0x2, 0x5ae, 0x5af, - 0x5, 0x185, 0xc3, 0x2, 0x5af, 0x5b0, 0x5, 0x193, 0xca, 0x2, 0x5b0, 0x5b1, - 0x5, 0x185, 0xc3, 0x2, 0x5b1, 0x5b2, 0x5, 0x181, 0xc1, 0x2, 0x5b2, 0x5b3, - 0x5, 0x1a3, 0xd2, 0x2, 0x5b3, 0x118, 0x3, 0x2, 0x2, 0x2, 0x5b4, 0x5b5, - 0x5, 0x1a1, 0xd1, 0x2, 0x5b5, 0x5b6, 0x5, 0x185, 0xc3, 0x2, 0x5b6, 0x5b7, - 0x5, 0x195, 0xcb, 0x2, 0x5b7, 0x5b8, 0x5, 0x18d, 0xc7, 0x2, 0x5b8, 0x11a, - 0x3, 0x2, 0x2, 0x2, 0x5b9, 0x5ba, 0x5, 0x1a1, 0xd1, 0x2, 0x5ba, 0x5bb, - 0x5, 0x185, 0xc3, 0x2, 0x5bb, 0x5bc, 0x5, 0x197, 0xcc, 0x2, 0x5bc, 0x5bd, - 0x5, 0x183, 0xc2, 0x2, 0x5bd, 0x5be, 0x5, 0x1a1, 0xd1, 0x2, 0x5be, 0x11c, - 0x3, 0x2, 0x2, 0x2, 0x5bf, 0x5c0, 0x5, 0x1a1, 0xd1, 0x2, 0x5c0, 0x5c1, - 0x5, 0x185, 0xc3, 0x2, 0x5c1, 0x5c2, 0x5, 0x1a3, 0xd2, 0x2, 0x5c2, 0x11e, - 0x3, 0x2, 0x2, 0x2, 0x5c3, 0x5c4, 0x5, 0x1a1, 0xd1, 0x2, 0x5c4, 0x5c5, - 0x5, 0x185, 0xc3, 0x2, 0x5c5, 0x5c6, 0x5, 0x1a3, 0xd2, 0x2, 0x5c6, 0x5c7, - 0x5, 0x1a3, 0xd2, 0x2, 0x5c7, 0x5c8, 0x5, 0x18d, 0xc7, 0x2, 0x5c8, 0x5c9, - 0x5, 0x197, 0xcc, 0x2, 0x5c9, 0x5ca, 0x5, 0x189, 0xc5, 0x2, 0x5ca, 0x5cb, - 0x5, 0x1a1, 0xd1, 0x2, 0x5cb, 0x120, 0x3, 0x2, 0x2, 0x2, 0x5cc, 0x5cd, - 0x5, 0x1a1, 0xd1, 0x2, 0x5cd, 0x5ce, 0x5, 0x18b, 0xc6, 0x2, 0x5ce, 0x5cf, - 0x5, 0x199, 0xcd, 0x2, 0x5cf, 0x5d0, 0x5, 0x1a9, 0xd5, 0x2, 0x5d0, 0x122, - 0x3, 0x2, 0x2, 0x2, 0x5d1, 0x5d2, 0x5, 0x1a1, 0xd1, 0x2, 0x5d2, 0x5d3, - 0x5, 0x199, 0xcd, 0x2, 0x5d3, 0x5d4, 0x5, 0x1a5, 0xd3, 0x2, 0x5d4, 0x5d5, - 0x5, 0x19f, 0xd0, 0x2, 0x5d5, 0x5d6, 0x5, 0x181, 0xc1, 0x2, 0x5d6, 0x5d7, - 0x5, 0x185, 0xc3, 0x2, 0x5d7, 0x124, 0x3, 0x2, 0x2, 0x2, 0x5d8, 0x5d9, - 0x5, 0x1a1, 0xd1, 0x2, 0x5d9, 0x5da, 0x5, 0x1a3, 0xd2, 0x2, 0x5da, 0x5db, - 0x5, 0x17d, 0xbf, 0x2, 0x5db, 0x5dc, 0x5, 0x19f, 0xd0, 0x2, 0x5dc, 0x5dd, - 0x5, 0x1a3, 0xd2, 0x2, 0x5dd, 0x126, 0x3, 0x2, 0x2, 0x2, 0x5de, 0x5df, - 0x5, 0x1a1, 0xd1, 0x2, 0x5df, 0x5e0, 0x5, 0x1a3, 0xd2, 0x2, 0x5e0, 0x5e1, - 0x5, 0x199, 0xcd, 0x2, 0x5e1, 0x5e2, 0x5, 0x19b, 0xce, 0x2, 0x5e2, 0x128, - 0x3, 0x2, 0x2, 0x2, 0x5e3, 0x5e4, 0x5, 0x1a1, 0xd1, 0x2, 0x5e4, 0x5e5, - 0x5, 0x1a5, 0xd3, 0x2, 0x5e5, 0x5e6, 0x5, 0x17f, 0xc0, 0x2, 0x5e6, 0x5e7, - 0x5, 0x1a1, 0xd1, 0x2, 0x5e7, 0x5e8, 0x5, 0x1a3, 0xd2, 0x2, 0x5e8, 0x5e9, - 0x5, 0x19f, 0xd0, 0x2, 0x5e9, 0x5ea, 0x5, 0x18d, 0xc7, 0x2, 0x5ea, 0x5eb, - 0x5, 0x197, 0xcc, 0x2, 0x5eb, 0x5ec, 0x5, 0x189, 0xc5, 0x2, 0x5ec, 0x12a, - 0x3, 0x2, 0x2, 0x2, 0x5ed, 0x5ee, 0x5, 0x1a1, 0xd1, 0x2, 0x5ee, 0x5ef, - 0x5, 0x1ad, 0xd7, 0x2, 0x5ef, 0x5f0, 0x5, 0x197, 0xcc, 0x2, 0x5f0, 0x5f1, - 0x5, 0x181, 0xc1, 0x2, 0x5f1, 0x12c, 0x3, 0x2, 0x2, 0x2, 0x5f2, 0x5f3, - 0x5, 0x1a1, 0xd1, 0x2, 0x5f3, 0x5f4, 0x5, 0x1ad, 0xd7, 0x2, 0x5f4, 0x5f5, - 0x5, 0x197, 0xcc, 0x2, 0x5f5, 0x5f6, 0x5, 0x1a3, 0xd2, 0x2, 0x5f6, 0x5f7, - 0x5, 0x17d, 0xbf, 0x2, 0x5f7, 0x5f8, 0x5, 0x1ab, 0xd6, 0x2, 0x5f8, 0x12e, - 0x3, 0x2, 0x2, 0x2, 0x5f9, 0x5fa, 0x5, 0x1a1, 0xd1, 0x2, 0x5fa, 0x5fb, - 0x5, 0x1ad, 0xd7, 0x2, 0x5fb, 0x5fc, 0x5, 0x1a1, 0xd1, 0x2, 0x5fc, 0x5fd, - 0x5, 0x1a3, 0xd2, 0x2, 0x5fd, 0x5fe, 0x5, 0x185, 0xc3, 0x2, 0x5fe, 0x5ff, - 0x5, 0x195, 0xcb, 0x2, 0x5ff, 0x130, 0x3, 0x2, 0x2, 0x2, 0x600, 0x601, - 0x5, 0x1a3, 0xd2, 0x2, 0x601, 0x602, 0x5, 0x17d, 0xbf, 0x2, 0x602, 0x603, - 0x5, 0x17f, 0xc0, 0x2, 0x603, 0x604, 0x5, 0x193, 0xca, 0x2, 0x604, 0x605, - 0x5, 0x185, 0xc3, 0x2, 0x605, 0x132, 0x3, 0x2, 0x2, 0x2, 0x606, 0x607, - 0x5, 0x1a3, 0xd2, 0x2, 0x607, 0x608, 0x5, 0x17d, 0xbf, 0x2, 0x608, 0x609, - 0x5, 0x17f, 0xc0, 0x2, 0x609, 0x60a, 0x5, 0x193, 0xca, 0x2, 0x60a, 0x60b, - 0x5, 0x185, 0xc3, 0x2, 0x60b, 0x60c, 0x5, 0x1a1, 0xd1, 0x2, 0x60c, 0x134, - 0x3, 0x2, 0x2, 0x2, 0x60d, 0x60e, 0x5, 0x1a3, 0xd2, 0x2, 0x60e, 0x60f, - 0x5, 0x185, 0xc3, 0x2, 0x60f, 0x610, 0x5, 0x195, 0xcb, 0x2, 0x610, 0x611, - 0x5, 0x19b, 0xce, 0x2, 0x611, 0x612, 0x5, 0x199, 0xcd, 0x2, 0x612, 0x613, - 0x5, 0x19f, 0xd0, 0x2, 0x613, 0x614, 0x5, 0x17d, 0xbf, 0x2, 0x614, 0x615, - 0x5, 0x19f, 0xd0, 0x2, 0x615, 0x616, 0x5, 0x1ad, 0xd7, 0x2, 0x616, 0x136, - 0x3, 0x2, 0x2, 0x2, 0x617, 0x618, 0x5, 0x1a3, 0xd2, 0x2, 0x618, 0x619, - 0x5, 0x185, 0xc3, 0x2, 0x619, 0x61a, 0x5, 0x1a1, 0xd1, 0x2, 0x61a, 0x61b, - 0x5, 0x1a3, 0xd2, 0x2, 0x61b, 0x138, 0x3, 0x2, 0x2, 0x2, 0x61c, 0x61d, - 0x5, 0x1a3, 0xd2, 0x2, 0x61d, 0x61e, 0x5, 0x18b, 0xc6, 0x2, 0x61e, 0x61f, - 0x5, 0x185, 0xc3, 0x2, 0x61f, 0x620, 0x5, 0x197, 0xcc, 0x2, 0x620, 0x13a, - 0x3, 0x2, 0x2, 0x2, 0x621, 0x622, 0x5, 0x1a3, 0xd2, 0x2, 0x622, 0x623, - 0x5, 0x18d, 0xc7, 0x2, 0x623, 0x624, 0x5, 0x185, 0xc3, 0x2, 0x624, 0x625, - 0x5, 0x1a1, 0xd1, 0x2, 0x625, 0x13c, 0x3, 0x2, 0x2, 0x2, 0x626, 0x627, - 0x5, 0x1a3, 0xd2, 0x2, 0x627, 0x628, 0x5, 0x18d, 0xc7, 0x2, 0x628, 0x629, - 0x5, 0x195, 0xcb, 0x2, 0x629, 0x62a, 0x5, 0x185, 0xc3, 0x2, 0x62a, 0x62b, - 0x5, 0x199, 0xcd, 0x2, 0x62b, 0x62c, 0x5, 0x1a5, 0xd3, 0x2, 0x62c, 0x62d, - 0x5, 0x1a3, 0xd2, 0x2, 0x62d, 0x13e, 0x3, 0x2, 0x2, 0x2, 0x62e, 0x62f, - 0x5, 0x1a3, 0xd2, 0x2, 0x62f, 0x630, 0x5, 0x18d, 0xc7, 0x2, 0x630, 0x631, - 0x5, 0x195, 0xcb, 0x2, 0x631, 0x632, 0x5, 0x185, 0xc3, 0x2, 0x632, 0x633, - 0x5, 0x1a1, 0xd1, 0x2, 0x633, 0x634, 0x5, 0x1a3, 0xd2, 0x2, 0x634, 0x635, - 0x5, 0x17d, 0xbf, 0x2, 0x635, 0x636, 0x5, 0x195, 0xcb, 0x2, 0x636, 0x637, - 0x5, 0x19b, 0xce, 0x2, 0x637, 0x140, 0x3, 0x2, 0x2, 0x2, 0x638, 0x639, - 0x5, 0x1a3, 0xd2, 0x2, 0x639, 0x63a, 0x5, 0x199, 0xcd, 0x2, 0x63a, 0x142, - 0x3, 0x2, 0x2, 0x2, 0x63b, 0x63c, 0x5, 0x1a3, 0xd2, 0x2, 0x63c, 0x63d, - 0x5, 0x199, 0xcd, 0x2, 0x63d, 0x63e, 0x5, 0x19b, 0xce, 0x2, 0x63e, 0x144, - 0x3, 0x2, 0x2, 0x2, 0x63f, 0x640, 0x5, 0x1a3, 0xd2, 0x2, 0x640, 0x641, - 0x5, 0x199, 0xcd, 0x2, 0x641, 0x642, 0x5, 0x1a3, 0xd2, 0x2, 0x642, 0x643, - 0x5, 0x17d, 0xbf, 0x2, 0x643, 0x644, 0x5, 0x193, 0xca, 0x2, 0x644, 0x645, - 0x5, 0x1a1, 0xd1, 0x2, 0x645, 0x146, 0x3, 0x2, 0x2, 0x2, 0x646, 0x647, - 0x5, 0x1a3, 0xd2, 0x2, 0x647, 0x648, 0x5, 0x19f, 0xd0, 0x2, 0x648, 0x649, - 0x5, 0x17d, 0xbf, 0x2, 0x649, 0x64a, 0x5, 0x18d, 0xc7, 0x2, 0x64a, 0x64b, - 0x5, 0x193, 0xca, 0x2, 0x64b, 0x64c, 0x5, 0x18d, 0xc7, 0x2, 0x64c, 0x64d, - 0x5, 0x197, 0xcc, 0x2, 0x64d, 0x64e, 0x5, 0x189, 0xc5, 0x2, 0x64e, 0x148, - 0x3, 0x2, 0x2, 0x2, 0x64f, 0x650, 0x5, 0x1a3, 0xd2, 0x2, 0x650, 0x651, - 0x5, 0x19f, 0xd0, 0x2, 0x651, 0x652, 0x5, 0x18d, 0xc7, 0x2, 0x652, 0x653, - 0x5, 0x195, 0xcb, 0x2, 0x653, 0x14a, 0x3, 0x2, 0x2, 0x2, 0x654, 0x655, - 0x5, 0x1a3, 0xd2, 0x2, 0x655, 0x656, 0x5, 0x19f, 0xd0, 0x2, 0x656, 0x657, - 0x5, 0x1a5, 0xd3, 0x2, 0x657, 0x658, 0x5, 0x197, 0xcc, 0x2, 0x658, 0x659, - 0x5, 0x181, 0xc1, 0x2, 0x659, 0x65a, 0x5, 0x17d, 0xbf, 0x2, 0x65a, 0x65b, - 0x5, 0x1a3, 0xd2, 0x2, 0x65b, 0x65c, 0x5, 0x185, 0xc3, 0x2, 0x65c, 0x14c, - 0x3, 0x2, 0x2, 0x2, 0x65d, 0x65e, 0x5, 0x1a3, 0xd2, 0x2, 0x65e, 0x65f, - 0x5, 0x1a3, 0xd2, 0x2, 0x65f, 0x660, 0x5, 0x193, 0xca, 0x2, 0x660, 0x14e, - 0x3, 0x2, 0x2, 0x2, 0x661, 0x662, 0x5, 0x1a3, 0xd2, 0x2, 0x662, 0x663, - 0x5, 0x1ad, 0xd7, 0x2, 0x663, 0x664, 0x5, 0x19b, 0xce, 0x2, 0x664, 0x665, - 0x5, 0x185, 0xc3, 0x2, 0x665, 0x150, 0x3, 0x2, 0x2, 0x2, 0x666, 0x667, - 0x5, 0x1a5, 0xd3, 0x2, 0x667, 0x668, 0x5, 0x197, 0xcc, 0x2, 0x668, 0x669, - 0x5, 0x18d, 0xc7, 0x2, 0x669, 0x66a, 0x5, 0x199, 0xcd, 0x2, 0x66a, 0x66b, - 0x5, 0x197, 0xcc, 0x2, 0x66b, 0x152, 0x3, 0x2, 0x2, 0x2, 0x66c, 0x66d, - 0x5, 0x1a5, 0xd3, 0x2, 0x66d, 0x66e, 0x5, 0x19b, 0xce, 0x2, 0x66e, 0x66f, - 0x5, 0x183, 0xc2, 0x2, 0x66f, 0x670, 0x5, 0x17d, 0xbf, 0x2, 0x670, 0x671, - 0x5, 0x1a3, 0xd2, 0x2, 0x671, 0x672, 0x5, 0x185, 0xc3, 0x2, 0x672, 0x154, - 0x3, 0x2, 0x2, 0x2, 0x673, 0x674, 0x5, 0x1a5, 0xd3, 0x2, 0x674, 0x675, - 0x5, 0x1a1, 0xd1, 0x2, 0x675, 0x676, 0x5, 0x185, 0xc3, 0x2, 0x676, 0x156, - 0x3, 0x2, 0x2, 0x2, 0x677, 0x678, 0x5, 0x1a5, 0xd3, 0x2, 0x678, 0x679, - 0x5, 0x1a1, 0xd1, 0x2, 0x679, 0x67a, 0x5, 0x18d, 0xc7, 0x2, 0x67a, 0x67b, - 0x5, 0x197, 0xcc, 0x2, 0x67b, 0x67c, 0x5, 0x189, 0xc5, 0x2, 0x67c, 0x158, - 0x3, 0x2, 0x2, 0x2, 0x67d, 0x67e, 0x5, 0x1a5, 0xd3, 0x2, 0x67e, 0x67f, - 0x5, 0x1a5, 0xd3, 0x2, 0x67f, 0x680, 0x5, 0x18d, 0xc7, 0x2, 0x680, 0x681, - 0x5, 0x183, 0xc2, 0x2, 0x681, 0x15a, 0x3, 0x2, 0x2, 0x2, 0x682, 0x683, - 0x5, 0x1a7, 0xd4, 0x2, 0x683, 0x684, 0x5, 0x17d, 0xbf, 0x2, 0x684, 0x685, - 0x5, 0x193, 0xca, 0x2, 0x685, 0x686, 0x5, 0x1a5, 0xd3, 0x2, 0x686, 0x687, - 0x5, 0x185, 0xc3, 0x2, 0x687, 0x688, 0x5, 0x1a1, 0xd1, 0x2, 0x688, 0x15c, - 0x3, 0x2, 0x2, 0x2, 0x689, 0x68a, 0x5, 0x1a7, 0xd4, 0x2, 0x68a, 0x68b, - 0x5, 0x18d, 0xc7, 0x2, 0x68b, 0x68c, 0x5, 0x185, 0xc3, 0x2, 0x68c, 0x68d, - 0x5, 0x1a9, 0xd5, 0x2, 0x68d, 0x15e, 0x3, 0x2, 0x2, 0x2, 0x68e, 0x68f, - 0x5, 0x1a7, 0xd4, 0x2, 0x68f, 0x690, 0x5, 0x199, 0xcd, 0x2, 0x690, 0x691, - 0x5, 0x193, 0xca, 0x2, 0x691, 0x692, 0x5, 0x1a5, 0xd3, 0x2, 0x692, 0x693, - 0x5, 0x195, 0xcb, 0x2, 0x693, 0x694, 0x5, 0x185, 0xc3, 0x2, 0x694, 0x160, - 0x3, 0x2, 0x2, 0x2, 0x695, 0x696, 0x5, 0x1a9, 0xd5, 0x2, 0x696, 0x697, - 0x5, 0x17d, 0xbf, 0x2, 0x697, 0x698, 0x5, 0x1a3, 0xd2, 0x2, 0x698, 0x699, - 0x5, 0x181, 0xc1, 0x2, 0x699, 0x69a, 0x5, 0x18b, 0xc6, 0x2, 0x69a, 0x162, - 0x3, 0x2, 0x2, 0x2, 0x69b, 0x69c, 0x5, 0x1a9, 0xd5, 0x2, 0x69c, 0x69d, - 0x5, 0x185, 0xc3, 0x2, 0x69d, 0x69e, 0x5, 0x185, 0xc3, 0x2, 0x69e, 0x69f, - 0x5, 0x191, 0xc9, 0x2, 0x69f, 0x164, 0x3, 0x2, 0x2, 0x2, 0x6a0, 0x6a1, - 0x5, 0x1a9, 0xd5, 0x2, 0x6a1, 0x6a2, 0x5, 0x18b, 0xc6, 0x2, 0x6a2, 0x6a3, - 0x5, 0x185, 0xc3, 0x2, 0x6a3, 0x6a4, 0x5, 0x197, 0xcc, 0x2, 0x6a4, 0x166, - 0x3, 0x2, 0x2, 0x2, 0x6a5, 0x6a6, 0x5, 0x1a9, 0xd5, 0x2, 0x6a6, 0x6a7, - 0x5, 0x18b, 0xc6, 0x2, 0x6a7, 0x6a8, 0x5, 0x185, 0xc3, 0x2, 0x6a8, 0x6a9, - 0x5, 0x19f, 0xd0, 0x2, 0x6a9, 0x6aa, 0x5, 0x185, 0xc3, 0x2, 0x6aa, 0x168, - 0x3, 0x2, 0x2, 0x2, 0x6ab, 0x6ac, 0x5, 0x1a9, 0xd5, 0x2, 0x6ac, 0x6ad, - 0x5, 0x18d, 0xc7, 0x2, 0x6ad, 0x6ae, 0x5, 0x1a3, 0xd2, 0x2, 0x6ae, 0x6af, - 0x5, 0x18b, 0xc6, 0x2, 0x6af, 0x16a, 0x3, 0x2, 0x2, 0x2, 0x6b0, 0x6b1, - 0x5, 0x1ad, 0xd7, 0x2, 0x6b1, 0x6b2, 0x5, 0x185, 0xc3, 0x2, 0x6b2, 0x6b3, - 0x5, 0x17d, 0xbf, 0x2, 0x6b3, 0x6b4, 0x5, 0x19f, 0xd0, 0x2, 0x6b4, 0x6bb, - 0x3, 0x2, 0x2, 0x2, 0x6b5, 0x6b6, 0x5, 0x1ad, 0xd7, 0x2, 0x6b6, 0x6b7, - 0x5, 0x1ad, 0xd7, 0x2, 0x6b7, 0x6b8, 0x5, 0x1ad, 0xd7, 0x2, 0x6b8, 0x6b9, - 0x5, 0x1ad, 0xd7, 0x2, 0x6b9, 0x6bb, 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6b0, - 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6b5, 0x3, 0x2, 0x2, 0x2, 0x6bb, 0x16c, - 0x3, 0x2, 0x2, 0x2, 0x6bc, 0x6bd, 0x7, 0x68, 0x2, 0x2, 0x6bd, 0x6be, - 0x7, 0x63, 0x2, 0x2, 0x6be, 0x6bf, 0x7, 0x6e, 0x2, 0x2, 0x6bf, 0x6c0, - 0x7, 0x75, 0x2, 0x2, 0x6c0, 0x6c1, 0x7, 0x67, 0x2, 0x2, 0x6c1, 0x16e, - 0x3, 0x2, 0x2, 0x2, 0x6c2, 0x6c3, 0x7, 0x76, 0x2, 0x2, 0x6c3, 0x6c4, - 0x7, 0x74, 0x2, 0x2, 0x6c4, 0x6c5, 0x7, 0x77, 0x2, 0x2, 0x6c5, 0x6c6, - 0x7, 0x67, 0x2, 0x2, 0x6c6, 0x170, 0x3, 0x2, 0x2, 0x2, 0x6c7, 0x6ca, - 0x5, 0x1b1, 0xd9, 0x2, 0x6c8, 0x6ca, 0x5, 0x1f3, 0xfa, 0x2, 0x6c9, 0x6c7, - 0x3, 0x2, 0x2, 0x2, 0x6c9, 0x6c8, 0x3, 0x2, 0x2, 0x2, 0x6ca, 0x6d0, - 0x3, 0x2, 0x2, 0x2, 0x6cb, 0x6cf, 0x5, 0x1b1, 0xd9, 0x2, 0x6cc, 0x6cf, - 0x5, 0x1f3, 0xfa, 0x2, 0x6cd, 0x6cf, 0x5, 0x1b5, 0xdb, 0x2, 0x6ce, 0x6cb, - 0x3, 0x2, 0x2, 0x2, 0x6ce, 0x6cc, 0x3, 0x2, 0x2, 0x2, 0x6ce, 0x6cd, - 0x3, 0x2, 0x2, 0x2, 0x6cf, 0x6d2, 0x3, 0x2, 0x2, 0x2, 0x6d0, 0x6ce, - 0x3, 0x2, 0x2, 0x2, 0x6d0, 0x6d1, 0x3, 0x2, 0x2, 0x2, 0x6d1, 0x6f2, - 0x3, 0x2, 0x2, 0x2, 0x6d2, 0x6d0, 0x3, 0x2, 0x2, 0x2, 0x6d3, 0x6dd, - 0x5, 0x1bd, 0xdf, 0x2, 0x6d4, 0x6dc, 0xa, 0x2, 0x2, 0x2, 0x6d5, 0x6d6, - 0x5, 0x1bf, 0xe0, 0x2, 0x6d6, 0x6d7, 0xb, 0x2, 0x2, 0x2, 0x6d7, 0x6dc, - 0x3, 0x2, 0x2, 0x2, 0x6d8, 0x6d9, 0x5, 0x1bd, 0xdf, 0x2, 0x6d9, 0x6da, - 0x5, 0x1bd, 0xdf, 0x2, 0x6da, 0x6dc, 0x3, 0x2, 0x2, 0x2, 0x6db, 0x6d4, - 0x3, 0x2, 0x2, 0x2, 0x6db, 0x6d5, 0x3, 0x2, 0x2, 0x2, 0x6db, 0x6d8, - 0x3, 0x2, 0x2, 0x2, 0x6dc, 0x6df, 0x3, 0x2, 0x2, 0x2, 0x6dd, 0x6db, - 0x3, 0x2, 0x2, 0x2, 0x6dd, 0x6de, 0x3, 0x2, 0x2, 0x2, 0x6de, 0x6e0, - 0x3, 0x2, 0x2, 0x2, 0x6df, 0x6dd, 0x3, 0x2, 0x2, 0x2, 0x6e0, 0x6e1, - 0x5, 0x1bd, 0xdf, 0x2, 0x6e1, 0x6f2, 0x3, 0x2, 0x2, 0x2, 0x6e2, 0x6ec, - 0x5, 0x1e5, 0xf3, 0x2, 0x6e3, 0x6eb, 0xa, 0x3, 0x2, 0x2, 0x6e4, 0x6e5, - 0x5, 0x1bf, 0xe0, 0x2, 0x6e5, 0x6e6, 0xb, 0x2, 0x2, 0x2, 0x6e6, 0x6eb, - 0x3, 0x2, 0x2, 0x2, 0x6e7, 0x6e8, 0x5, 0x1e5, 0xf3, 0x2, 0x6e8, 0x6e9, - 0x5, 0x1e5, 0xf3, 0x2, 0x6e9, 0x6eb, 0x3, 0x2, 0x2, 0x2, 0x6ea, 0x6e3, - 0x3, 0x2, 0x2, 0x2, 0x6ea, 0x6e4, 0x3, 0x2, 0x2, 0x2, 0x6ea, 0x6e7, - 0x3, 0x2, 0x2, 0x2, 0x6eb, 0x6ee, 0x3, 0x2, 0x2, 0x2, 0x6ec, 0x6ea, - 0x3, 0x2, 0x2, 0x2, 0x6ec, 0x6ed, 0x3, 0x2, 0x2, 0x2, 0x6ed, 0x6ef, - 0x3, 0x2, 0x2, 0x2, 0x6ee, 0x6ec, 0x3, 0x2, 0x2, 0x2, 0x6ef, 0x6f0, - 0x5, 0x1e5, 0xf3, 0x2, 0x6f0, 0x6f2, 0x3, 0x2, 0x2, 0x2, 0x6f1, 0x6c9, - 0x3, 0x2, 0x2, 0x2, 0x6f1, 0x6d3, 0x3, 0x2, 0x2, 0x2, 0x6f1, 0x6e2, - 0x3, 0x2, 0x2, 0x2, 0x6f2, 0x172, 0x3, 0x2, 0x2, 0x2, 0x6f3, 0x6f4, - 0x5, 0x179, 0xbd, 0x2, 0x6f4, 0x6f8, 0x5, 0x1c9, 0xe5, 0x2, 0x6f5, 0x6f7, - 0x5, 0x1b7, 0xdc, 0x2, 0x6f6, 0x6f5, 0x3, 0x2, 0x2, 0x2, 0x6f7, 0x6fa, - 0x3, 0x2, 0x2, 0x2, 0x6f8, 0x6f6, 0x3, 0x2, 0x2, 0x2, 0x6f8, 0x6f9, - 0x3, 0x2, 0x2, 0x2, 0x6f9, 0x6fd, 0x3, 0x2, 0x2, 0x2, 0x6fa, 0x6f8, - 0x3, 0x2, 0x2, 0x2, 0x6fb, 0x6fe, 0x5, 0x19b, 0xce, 0x2, 0x6fc, 0x6fe, - 0x5, 0x185, 0xc3, 0x2, 0x6fd, 0x6fb, 0x3, 0x2, 0x2, 0x2, 0x6fd, 0x6fc, - 0x3, 0x2, 0x2, 0x2, 0x6fe, 0x701, 0x3, 0x2, 0x2, 0x2, 0x6ff, 0x702, - 0x5, 0x1e1, 0xf1, 0x2, 0x700, 0x702, 0x5, 0x1c7, 0xe4, 0x2, 0x701, 0x6ff, - 0x3, 0x2, 0x2, 0x2, 0x701, 0x700, 0x3, 0x2, 0x2, 0x2, 0x701, 0x702, - 0x3, 0x2, 0x2, 0x2, 0x702, 0x704, 0x3, 0x2, 0x2, 0x2, 0x703, 0x705, - 0x5, 0x1b5, 0xdb, 0x2, 0x704, 0x703, 0x3, 0x2, 0x2, 0x2, 0x705, 0x706, - 0x3, 0x2, 0x2, 0x2, 0x706, 0x704, 0x3, 0x2, 0x2, 0x2, 0x706, 0x707, - 0x3, 0x2, 0x2, 0x2, 0x707, 0x740, 0x3, 0x2, 0x2, 0x2, 0x708, 0x70b, - 0x5, 0x179, 0xbd, 0x2, 0x709, 0x70c, 0x5, 0x19b, 0xce, 0x2, 0x70a, 0x70c, - 0x5, 0x185, 0xc3, 0x2, 0x70b, 0x709, 0x3, 0x2, 0x2, 0x2, 0x70b, 0x70a, - 0x3, 0x2, 0x2, 0x2, 0x70c, 0x70f, 0x3, 0x2, 0x2, 0x2, 0x70d, 0x710, - 0x5, 0x1e1, 0xf1, 0x2, 0x70e, 0x710, 0x5, 0x1c7, 0xe4, 0x2, 0x70f, 0x70d, - 0x3, 0x2, 0x2, 0x2, 0x70f, 0x70e, 0x3, 0x2, 0x2, 0x2, 0x70f, 0x710, - 0x3, 0x2, 0x2, 0x2, 0x710, 0x712, 0x3, 0x2, 0x2, 0x2, 0x711, 0x713, - 0x5, 0x1b5, 0xdb, 0x2, 0x712, 0x711, 0x3, 0x2, 0x2, 0x2, 0x713, 0x714, - 0x3, 0x2, 0x2, 0x2, 0x714, 0x712, 0x3, 0x2, 0x2, 0x2, 0x714, 0x715, - 0x3, 0x2, 0x2, 0x2, 0x715, 0x740, 0x3, 0x2, 0x2, 0x2, 0x716, 0x717, - 0x5, 0x177, 0xbc, 0x2, 0x717, 0x71b, 0x5, 0x1c9, 0xe5, 0x2, 0x718, 0x71a, - 0x5, 0x1b5, 0xdb, 0x2, 0x719, 0x718, 0x3, 0x2, 0x2, 0x2, 0x71a, 0x71d, - 0x3, 0x2, 0x2, 0x2, 0x71b, 0x719, 0x3, 0x2, 0x2, 0x2, 0x71b, 0x71c, - 0x3, 0x2, 0x2, 0x2, 0x71c, 0x71e, 0x3, 0x2, 0x2, 0x2, 0x71d, 0x71b, - 0x3, 0x2, 0x2, 0x2, 0x71e, 0x721, 0x5, 0x185, 0xc3, 0x2, 0x71f, 0x722, - 0x5, 0x1e1, 0xf1, 0x2, 0x720, 0x722, 0x5, 0x1c7, 0xe4, 0x2, 0x721, 0x71f, - 0x3, 0x2, 0x2, 0x2, 0x721, 0x720, 0x3, 0x2, 0x2, 0x2, 0x721, 0x722, - 0x3, 0x2, 0x2, 0x2, 0x722, 0x724, 0x3, 0x2, 0x2, 0x2, 0x723, 0x725, - 0x5, 0x1b5, 0xdb, 0x2, 0x724, 0x723, 0x3, 0x2, 0x2, 0x2, 0x725, 0x726, - 0x3, 0x2, 0x2, 0x2, 0x726, 0x724, 0x3, 0x2, 0x2, 0x2, 0x726, 0x727, - 0x3, 0x2, 0x2, 0x2, 0x727, 0x740, 0x3, 0x2, 0x2, 0x2, 0x728, 0x729, - 0x5, 0x1c9, 0xe5, 0x2, 0x729, 0x72a, 0x5, 0x177, 0xbc, 0x2, 0x72a, 0x72d, - 0x5, 0x185, 0xc3, 0x2, 0x72b, 0x72e, 0x5, 0x1e1, 0xf1, 0x2, 0x72c, 0x72e, - 0x5, 0x1c7, 0xe4, 0x2, 0x72d, 0x72b, 0x3, 0x2, 0x2, 0x2, 0x72d, 0x72c, - 0x3, 0x2, 0x2, 0x2, 0x72d, 0x72e, 0x3, 0x2, 0x2, 0x2, 0x72e, 0x730, - 0x3, 0x2, 0x2, 0x2, 0x72f, 0x731, 0x5, 0x1b5, 0xdb, 0x2, 0x730, 0x72f, - 0x3, 0x2, 0x2, 0x2, 0x731, 0x732, 0x3, 0x2, 0x2, 0x2, 0x732, 0x730, - 0x3, 0x2, 0x2, 0x2, 0x732, 0x733, 0x3, 0x2, 0x2, 0x2, 0x733, 0x740, - 0x3, 0x2, 0x2, 0x2, 0x734, 0x735, 0x5, 0x177, 0xbc, 0x2, 0x735, 0x738, - 0x5, 0x185, 0xc3, 0x2, 0x736, 0x739, 0x5, 0x1e1, 0xf1, 0x2, 0x737, 0x739, - 0x5, 0x1c7, 0xe4, 0x2, 0x738, 0x736, 0x3, 0x2, 0x2, 0x2, 0x738, 0x737, - 0x3, 0x2, 0x2, 0x2, 0x738, 0x739, 0x3, 0x2, 0x2, 0x2, 0x739, 0x73b, - 0x3, 0x2, 0x2, 0x2, 0x73a, 0x73c, 0x5, 0x1b5, 0xdb, 0x2, 0x73b, 0x73a, - 0x3, 0x2, 0x2, 0x2, 0x73c, 0x73d, 0x3, 0x2, 0x2, 0x2, 0x73d, 0x73b, - 0x3, 0x2, 0x2, 0x2, 0x73d, 0x73e, 0x3, 0x2, 0x2, 0x2, 0x73e, 0x740, - 0x3, 0x2, 0x2, 0x2, 0x73f, 0x6f3, 0x3, 0x2, 0x2, 0x2, 0x73f, 0x708, - 0x3, 0x2, 0x2, 0x2, 0x73f, 0x716, 0x3, 0x2, 0x2, 0x2, 0x73f, 0x728, - 0x3, 0x2, 0x2, 0x2, 0x73f, 0x734, 0x3, 0x2, 0x2, 0x2, 0x740, 0x174, - 0x3, 0x2, 0x2, 0x2, 0x741, 0x743, 0x7, 0x32, 0x2, 0x2, 0x742, 0x744, - 0x5, 0x1b3, 0xda, 0x2, 0x743, 0x742, 0x3, 0x2, 0x2, 0x2, 0x744, 0x745, - 0x3, 0x2, 0x2, 0x2, 0x745, 0x743, 0x3, 0x2, 0x2, 0x2, 0x745, 0x746, - 0x3, 0x2, 0x2, 0x2, 0x746, 0x176, 0x3, 0x2, 0x2, 0x2, 0x747, 0x749, - 0x5, 0x1b5, 0xdb, 0x2, 0x748, 0x747, 0x3, 0x2, 0x2, 0x2, 0x749, 0x74a, - 0x3, 0x2, 0x2, 0x2, 0x74a, 0x748, 0x3, 0x2, 0x2, 0x2, 0x74a, 0x74b, - 0x3, 0x2, 0x2, 0x2, 0x74b, 0x178, 0x3, 0x2, 0x2, 0x2, 0x74c, 0x74d, - 0x7, 0x32, 0x2, 0x2, 0x74d, 0x74f, 0x5, 0x1ab, 0xd6, 0x2, 0x74e, 0x750, - 0x5, 0x1b7, 0xdc, 0x2, 0x74f, 0x74e, 0x3, 0x2, 0x2, 0x2, 0x750, 0x751, - 0x3, 0x2, 0x2, 0x2, 0x751, 0x74f, 0x3, 0x2, 0x2, 0x2, 0x751, 0x752, - 0x3, 0x2, 0x2, 0x2, 0x752, 0x17a, 0x3, 0x2, 0x2, 0x2, 0x753, 0x75d, - 0x5, 0x1e7, 0xf4, 0x2, 0x754, 0x75c, 0xa, 0x4, 0x2, 0x2, 0x755, 0x756, - 0x5, 0x1bf, 0xe0, 0x2, 0x756, 0x757, 0xb, 0x2, 0x2, 0x2, 0x757, 0x75c, - 0x3, 0x2, 0x2, 0x2, 0x758, 0x759, 0x5, 0x1e7, 0xf4, 0x2, 0x759, 0x75a, - 0x5, 0x1e7, 0xf4, 0x2, 0x75a, 0x75c, 0x3, 0x2, 0x2, 0x2, 0x75b, 0x754, - 0x3, 0x2, 0x2, 0x2, 0x75b, 0x755, 0x3, 0x2, 0x2, 0x2, 0x75b, 0x758, - 0x3, 0x2, 0x2, 0x2, 0x75c, 0x75f, 0x3, 0x2, 0x2, 0x2, 0x75d, 0x75b, - 0x3, 0x2, 0x2, 0x2, 0x75d, 0x75e, 0x3, 0x2, 0x2, 0x2, 0x75e, 0x760, - 0x3, 0x2, 0x2, 0x2, 0x75f, 0x75d, 0x3, 0x2, 0x2, 0x2, 0x760, 0x761, - 0x5, 0x1e7, 0xf4, 0x2, 0x761, 0x17c, 0x3, 0x2, 0x2, 0x2, 0x762, 0x763, - 0x9, 0x5, 0x2, 0x2, 0x763, 0x17e, 0x3, 0x2, 0x2, 0x2, 0x764, 0x765, - 0x9, 0x6, 0x2, 0x2, 0x765, 0x180, 0x3, 0x2, 0x2, 0x2, 0x766, 0x767, - 0x9, 0x7, 0x2, 0x2, 0x767, 0x182, 0x3, 0x2, 0x2, 0x2, 0x768, 0x769, - 0x9, 0x8, 0x2, 0x2, 0x769, 0x184, 0x3, 0x2, 0x2, 0x2, 0x76a, 0x76b, - 0x9, 0x9, 0x2, 0x2, 0x76b, 0x186, 0x3, 0x2, 0x2, 0x2, 0x76c, 0x76d, - 0x9, 0xa, 0x2, 0x2, 0x76d, 0x188, 0x3, 0x2, 0x2, 0x2, 0x76e, 0x76f, - 0x9, 0xb, 0x2, 0x2, 0x76f, 0x18a, 0x3, 0x2, 0x2, 0x2, 0x770, 0x771, - 0x9, 0xc, 0x2, 0x2, 0x771, 0x18c, 0x3, 0x2, 0x2, 0x2, 0x772, 0x773, - 0x9, 0xd, 0x2, 0x2, 0x773, 0x18e, 0x3, 0x2, 0x2, 0x2, 0x774, 0x775, - 0x9, 0xe, 0x2, 0x2, 0x775, 0x190, 0x3, 0x2, 0x2, 0x2, 0x776, 0x777, - 0x9, 0xf, 0x2, 0x2, 0x777, 0x192, 0x3, 0x2, 0x2, 0x2, 0x778, 0x779, - 0x9, 0x10, 0x2, 0x2, 0x779, 0x194, 0x3, 0x2, 0x2, 0x2, 0x77a, 0x77b, - 0x9, 0x11, 0x2, 0x2, 0x77b, 0x196, 0x3, 0x2, 0x2, 0x2, 0x77c, 0x77d, - 0x9, 0x12, 0x2, 0x2, 0x77d, 0x198, 0x3, 0x2, 0x2, 0x2, 0x77e, 0x77f, - 0x9, 0x13, 0x2, 0x2, 0x77f, 0x19a, 0x3, 0x2, 0x2, 0x2, 0x780, 0x781, - 0x9, 0x14, 0x2, 0x2, 0x781, 0x19c, 0x3, 0x2, 0x2, 0x2, 0x782, 0x783, - 0x9, 0x15, 0x2, 0x2, 0x783, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x784, 0x785, - 0x9, 0x16, 0x2, 0x2, 0x785, 0x1a0, 0x3, 0x2, 0x2, 0x2, 0x786, 0x787, - 0x9, 0x17, 0x2, 0x2, 0x787, 0x1a2, 0x3, 0x2, 0x2, 0x2, 0x788, 0x789, - 0x9, 0x18, 0x2, 0x2, 0x789, 0x1a4, 0x3, 0x2, 0x2, 0x2, 0x78a, 0x78b, - 0x9, 0x19, 0x2, 0x2, 0x78b, 0x1a6, 0x3, 0x2, 0x2, 0x2, 0x78c, 0x78d, - 0x9, 0x1a, 0x2, 0x2, 0x78d, 0x1a8, 0x3, 0x2, 0x2, 0x2, 0x78e, 0x78f, - 0x9, 0x1b, 0x2, 0x2, 0x78f, 0x1aa, 0x3, 0x2, 0x2, 0x2, 0x790, 0x791, - 0x9, 0x1c, 0x2, 0x2, 0x791, 0x1ac, 0x3, 0x2, 0x2, 0x2, 0x792, 0x793, - 0x9, 0x1d, 0x2, 0x2, 0x793, 0x1ae, 0x3, 0x2, 0x2, 0x2, 0x794, 0x795, - 0x9, 0x1e, 0x2, 0x2, 0x795, 0x1b0, 0x3, 0x2, 0x2, 0x2, 0x796, 0x797, - 0x9, 0x1f, 0x2, 0x2, 0x797, 0x1b2, 0x3, 0x2, 0x2, 0x2, 0x798, 0x799, - 0x9, 0x20, 0x2, 0x2, 0x799, 0x1b4, 0x3, 0x2, 0x2, 0x2, 0x79a, 0x79b, - 0x9, 0x21, 0x2, 0x2, 0x79b, 0x1b6, 0x3, 0x2, 0x2, 0x2, 0x79c, 0x79d, - 0x9, 0x22, 0x2, 0x2, 0x79d, 0x1b8, 0x3, 0x2, 0x2, 0x2, 0x79e, 0x79f, - 0x7, 0x2f, 0x2, 0x2, 0x79f, 0x7a0, 0x7, 0x40, 0x2, 0x2, 0x7a0, 0x1ba, - 0x3, 0x2, 0x2, 0x2, 0x7a1, 0x7a2, 0x7, 0x2c, 0x2, 0x2, 0x7a2, 0x1bc, - 0x3, 0x2, 0x2, 0x2, 0x7a3, 0x7a4, 0x7, 0x62, 0x2, 0x2, 0x7a4, 0x1be, - 0x3, 0x2, 0x2, 0x2, 0x7a5, 0x7a6, 0x7, 0x5e, 0x2, 0x2, 0x7a6, 0x1c0, - 0x3, 0x2, 0x2, 0x2, 0x7a7, 0x7a8, 0x7, 0x3c, 0x2, 0x2, 0x7a8, 0x1c2, - 0x3, 0x2, 0x2, 0x2, 0x7a9, 0x7aa, 0x7, 0x2e, 0x2, 0x2, 0x7aa, 0x1c4, - 0x3, 0x2, 0x2, 0x2, 0x7ab, 0x7ac, 0x7, 0x7e, 0x2, 0x2, 0x7ac, 0x7ad, - 0x7, 0x7e, 0x2, 0x2, 0x7ad, 0x1c6, 0x3, 0x2, 0x2, 0x2, 0x7ae, 0x7af, - 0x7, 0x2f, 0x2, 0x2, 0x7af, 0x1c8, 0x3, 0x2, 0x2, 0x2, 0x7b0, 0x7b1, - 0x7, 0x30, 0x2, 0x2, 0x7b1, 0x1ca, 0x3, 0x2, 0x2, 0x2, 0x7b2, 0x7b3, - 0x7, 0x3f, 0x2, 0x2, 0x7b3, 0x7b4, 0x7, 0x3f, 0x2, 0x2, 0x7b4, 0x1cc, - 0x3, 0x2, 0x2, 0x2, 0x7b5, 0x7b6, 0x7, 0x3f, 0x2, 0x2, 0x7b6, 0x1ce, - 0x3, 0x2, 0x2, 0x2, 0x7b7, 0x7b8, 0x7, 0x40, 0x2, 0x2, 0x7b8, 0x7b9, - 0x7, 0x3f, 0x2, 0x2, 0x7b9, 0x1d0, 0x3, 0x2, 0x2, 0x2, 0x7ba, 0x7bb, - 0x7, 0x40, 0x2, 0x2, 0x7bb, 0x1d2, 0x3, 0x2, 0x2, 0x2, 0x7bc, 0x7bd, - 0x7, 0x7d, 0x2, 0x2, 0x7bd, 0x1d4, 0x3, 0x2, 0x2, 0x2, 0x7be, 0x7bf, - 0x7, 0x5d, 0x2, 0x2, 0x7bf, 0x1d6, 0x3, 0x2, 0x2, 0x2, 0x7c0, 0x7c1, - 0x7, 0x3e, 0x2, 0x2, 0x7c1, 0x7c2, 0x7, 0x3f, 0x2, 0x2, 0x7c2, 0x1d8, - 0x3, 0x2, 0x2, 0x2, 0x7c3, 0x7c4, 0x7, 0x2a, 0x2, 0x2, 0x7c4, 0x1da, - 0x3, 0x2, 0x2, 0x2, 0x7c5, 0x7c6, 0x7, 0x3e, 0x2, 0x2, 0x7c6, 0x1dc, - 0x3, 0x2, 0x2, 0x2, 0x7c7, 0x7c8, 0x7, 0x23, 0x2, 0x2, 0x7c8, 0x7cc, - 0x7, 0x3f, 0x2, 0x2, 0x7c9, 0x7ca, 0x7, 0x3e, 0x2, 0x2, 0x7ca, 0x7cc, - 0x7, 0x40, 0x2, 0x2, 0x7cb, 0x7c7, 0x3, 0x2, 0x2, 0x2, 0x7cb, 0x7c9, - 0x3, 0x2, 0x2, 0x2, 0x7cc, 0x1de, 0x3, 0x2, 0x2, 0x2, 0x7cd, 0x7ce, - 0x7, 0x27, 0x2, 0x2, 0x7ce, 0x1e0, 0x3, 0x2, 0x2, 0x2, 0x7cf, 0x7d0, - 0x7, 0x2d, 0x2, 0x2, 0x7d0, 0x1e2, 0x3, 0x2, 0x2, 0x2, 0x7d1, 0x7d2, - 0x7, 0x41, 0x2, 0x2, 0x7d2, 0x1e4, 0x3, 0x2, 0x2, 0x2, 0x7d3, 0x7d4, - 0x7, 0x24, 0x2, 0x2, 0x7d4, 0x1e6, 0x3, 0x2, 0x2, 0x2, 0x7d5, 0x7d6, - 0x7, 0x29, 0x2, 0x2, 0x7d6, 0x1e8, 0x3, 0x2, 0x2, 0x2, 0x7d7, 0x7d8, - 0x7, 0x7f, 0x2, 0x2, 0x7d8, 0x1ea, 0x3, 0x2, 0x2, 0x2, 0x7d9, 0x7da, - 0x7, 0x5f, 0x2, 0x2, 0x7da, 0x1ec, 0x3, 0x2, 0x2, 0x2, 0x7db, 0x7dc, - 0x7, 0x2b, 0x2, 0x2, 0x7dc, 0x1ee, 0x3, 0x2, 0x2, 0x2, 0x7dd, 0x7de, - 0x7, 0x3d, 0x2, 0x2, 0x7de, 0x1f0, 0x3, 0x2, 0x2, 0x2, 0x7df, 0x7e0, - 0x7, 0x31, 0x2, 0x2, 0x7e0, 0x1f2, 0x3, 0x2, 0x2, 0x2, 0x7e1, 0x7e2, - 0x7, 0x61, 0x2, 0x2, 0x7e2, 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x7e3, 0x7e4, - 0x7, 0x31, 0x2, 0x2, 0x7e4, 0x7e5, 0x7, 0x2c, 0x2, 0x2, 0x7e5, 0x7e9, - 0x3, 0x2, 0x2, 0x2, 0x7e6, 0x7e8, 0xb, 0x2, 0x2, 0x2, 0x7e7, 0x7e6, - 0x3, 0x2, 0x2, 0x2, 0x7e8, 0x7eb, 0x3, 0x2, 0x2, 0x2, 0x7e9, 0x7ea, - 0x3, 0x2, 0x2, 0x2, 0x7e9, 0x7e7, 0x3, 0x2, 0x2, 0x2, 0x7ea, 0x7ec, - 0x3, 0x2, 0x2, 0x2, 0x7eb, 0x7e9, 0x3, 0x2, 0x2, 0x2, 0x7ec, 0x7ed, - 0x7, 0x2c, 0x2, 0x2, 0x7ed, 0x7ee, 0x7, 0x31, 0x2, 0x2, 0x7ee, 0x7ef, - 0x3, 0x2, 0x2, 0x2, 0x7ef, 0x7f0, 0x8, 0xfb, 0x2, 0x2, 0x7f0, 0x1f6, - 0x3, 0x2, 0x2, 0x2, 0x7f1, 0x7f2, 0x7, 0x2f, 0x2, 0x2, 0x7f2, 0x7f3, - 0x7, 0x2f, 0x2, 0x2, 0x7f3, 0x7f7, 0x3, 0x2, 0x2, 0x2, 0x7f4, 0x7f6, - 0xa, 0x23, 0x2, 0x2, 0x7f5, 0x7f4, 0x3, 0x2, 0x2, 0x2, 0x7f6, 0x7f9, - 0x3, 0x2, 0x2, 0x2, 0x7f7, 0x7f5, 0x3, 0x2, 0x2, 0x2, 0x7f7, 0x7f8, - 0x3, 0x2, 0x2, 0x2, 0x7f8, 0x7fb, 0x3, 0x2, 0x2, 0x2, 0x7f9, 0x7f7, - 0x3, 0x2, 0x2, 0x2, 0x7fa, 0x7fc, 0x9, 0x24, 0x2, 0x2, 0x7fb, 0x7fa, - 0x3, 0x2, 0x2, 0x2, 0x7fc, 0x7fd, 0x3, 0x2, 0x2, 0x2, 0x7fd, 0x7fe, - 0x8, 0xfc, 0x2, 0x2, 0x7fe, 0x1f8, 0x3, 0x2, 0x2, 0x2, 0x7ff, 0x800, - 0x9, 0x25, 0x2, 0x2, 0x800, 0x801, 0x3, 0x2, 0x2, 0x2, 0x801, 0x802, - 0x8, 0xfd, 0x2, 0x2, 0x802, 0x1fa, 0x3, 0x2, 0x2, 0x2, 0x26, 0x2, 0x239, - 0x419, 0x6ba, 0x6c9, 0x6ce, 0x6d0, 0x6db, 0x6dd, 0x6ea, 0x6ec, 0x6f1, - 0x6f8, 0x6fd, 0x701, 0x706, 0x70b, 0x70f, 0x714, 0x71b, 0x721, 0x726, - 0x72d, 0x732, 0x738, 0x73d, 0x73f, 0x745, 0x74a, 0x751, 0x75b, 0x75d, - 0x7cb, 0x7e9, 0x7f7, 0x7fb, 0x3, 0x8, 0x2, 0x2, - }; - - atn::ATNDeserializer deserializer; - _atn = deserializer.deserialize(_serializedATN); - - size_t count = _atn.getNumberOfDecisions(); - _decisionToDFA.reserve(count); - for (size_t i = 0; i < count; i++) { - _decisionToDFA.emplace_back(_atn.getDecisionState(i), i); - } -} - -ClickHouseLexer::Initializer ClickHouseLexer::_init; diff --git a/src/Parsers/New/ClickHouseLexer.h b/src/Parsers/New/ClickHouseLexer.h deleted file mode 100644 index 1cce0ee0bd7..00000000000 --- a/src/Parsers/New/ClickHouseLexer.h +++ /dev/null @@ -1,98 +0,0 @@ - -// Generated from ClickHouseLexer.g4 by ANTLR 4.7.2 - -#pragma once - - -#include "antlr4-runtime.h" - - -namespace DB { - - -class ClickHouseLexer : public antlr4::Lexer { -public: - enum { - ADD = 1, AFTER = 2, ALIAS = 3, ALL = 4, ALTER = 5, AND = 6, ANTI = 7, - ANY = 8, ARRAY = 9, AS = 10, ASCENDING = 11, ASOF = 12, AST = 13, ASYNC = 14, - ATTACH = 15, BETWEEN = 16, BOTH = 17, BY = 18, CASE = 19, CAST = 20, - CHECK = 21, CLEAR = 22, CLUSTER = 23, CODEC = 24, COLLATE = 25, COLUMN = 26, - COMMENT = 27, CONSTRAINT = 28, CREATE = 29, CROSS = 30, CUBE = 31, DATABASE = 32, - DATABASES = 33, DATE = 34, DAY = 35, DEDUPLICATE = 36, DEFAULT = 37, - DELAY = 38, DELETE = 39, DESC = 40, DESCENDING = 41, DESCRIBE = 42, - DETACH = 43, DICTIONARIES = 44, DICTIONARY = 45, DISK = 46, DISTINCT = 47, - DISTRIBUTED = 48, DROP = 49, ELSE = 50, END = 51, ENGINE = 52, EVENTS = 53, - EXISTS = 54, EXPLAIN = 55, EXPRESSION = 56, EXTRACT = 57, FETCHES = 58, - FINAL = 59, FIRST = 60, FLUSH = 61, FOR = 62, FORMAT = 63, FREEZE = 64, - FROM = 65, FULL = 66, FUNCTION = 67, GLOBAL = 68, GRANULARITY = 69, - GROUP = 70, HAVING = 71, HIERARCHICAL = 72, HOUR = 73, ID = 74, IF = 75, - ILIKE = 76, IN = 77, INDEX = 78, INF = 79, INJECTIVE = 80, INNER = 81, - INSERT = 82, INTERVAL = 83, INTO = 84, IS = 85, IS_OBJECT_ID = 86, JOIN = 87, - KEY = 88, KILL = 89, LAST = 90, LAYOUT = 91, LEADING = 92, LEFT = 93, - LIFETIME = 94, LIKE = 95, LIMIT = 96, LIVE = 97, LOCAL = 98, LOGS = 99, - MATERIALIZE = 100, MATERIALIZED = 101, MAX = 102, MERGES = 103, MIN = 104, - MINUTE = 105, MODIFY = 106, MONTH = 107, MOVE = 108, MUTATION = 109, - NAN_SQL = 110, NO = 111, NOT = 112, NULL_SQL = 113, NULLS = 114, OFFSET = 115, - ON = 116, OPTIMIZE = 117, OR = 118, ORDER = 119, OUTER = 120, OUTFILE = 121, - PARTITION = 122, POPULATE = 123, PREWHERE = 124, PRIMARY = 125, PROJECTION = 126, - QUARTER = 127, RANGE = 128, RELOAD = 129, REMOVE = 130, RENAME = 131, - REPLACE = 132, REPLICA = 133, REPLICATED = 134, RIGHT = 135, ROLLUP = 136, - SAMPLE = 137, SECOND = 138, SELECT = 139, SEMI = 140, SENDS = 141, SET = 142, - SETTINGS = 143, SHOW = 144, SOURCE = 145, START = 146, STOP = 147, SUBSTRING = 148, - SYNC = 149, SYNTAX = 150, SYSTEM = 151, TABLE = 152, TABLES = 153, TEMPORARY = 154, - TEST = 155, THEN = 156, TIES = 157, TIMEOUT = 158, TIMESTAMP = 159, - TO = 160, TOP = 161, TOTALS = 162, TRAILING = 163, TRIM = 164, TRUNCATE = 165, - TTL = 166, TYPE = 167, UNION = 168, UPDATE = 169, USE = 170, USING = 171, - UUID = 172, VALUES = 173, VIEW = 174, VOLUME = 175, WATCH = 176, WEEK = 177, - WHEN = 178, WHERE = 179, WITH = 180, YEAR = 181, JSON_FALSE = 182, JSON_TRUE = 183, - IDENTIFIER = 184, FLOATING_LITERAL = 185, OCTAL_LITERAL = 186, DECIMAL_LITERAL = 187, - HEXADECIMAL_LITERAL = 188, STRING_LITERAL = 189, ARROW = 190, ASTERISK = 191, - BACKQUOTE = 192, BACKSLASH = 193, COLON = 194, COMMA = 195, CONCAT = 196, - DASH = 197, DOT = 198, EQ_DOUBLE = 199, EQ_SINGLE = 200, GE = 201, GT = 202, - LBRACE = 203, LBRACKET = 204, LE = 205, LPAREN = 206, LT = 207, NOT_EQ = 208, - PERCENT = 209, PLUS = 210, QUERY = 211, QUOTE_DOUBLE = 212, QUOTE_SINGLE = 213, - RBRACE = 214, RBRACKET = 215, RPAREN = 216, SEMICOLON = 217, SLASH = 218, - UNDERSCORE = 219, MULTI_LINE_COMMENT = 220, SINGLE_LINE_COMMENT = 221, - WHITESPACE = 222 - }; - - ClickHouseLexer(antlr4::CharStream *input); - ~ClickHouseLexer(); - - virtual std::string getGrammarFileName() const override; - virtual const std::vector& getRuleNames() const override; - - virtual const std::vector& getChannelNames() const override; - virtual const std::vector& getModeNames() const override; - virtual const std::vector& getTokenNames() const override; // deprecated, use vocabulary instead - virtual antlr4::dfa::Vocabulary& getVocabulary() const override; - - virtual const std::vector getSerializedATN() const override; - virtual const antlr4::atn::ATN& getATN() const override; - -private: - static std::vector _decisionToDFA; - static antlr4::atn::PredictionContextCache _sharedContextCache; - static std::vector _ruleNames; - static std::vector _tokenNames; - static std::vector _channelNames; - static std::vector _modeNames; - - static std::vector _literalNames; - static std::vector _symbolicNames; - static antlr4::dfa::Vocabulary _vocabulary; - static antlr4::atn::ATN _atn; - static std::vector _serializedATN; - - - // Individual action functions triggered by action() above. - - // Individual semantic predicate functions triggered by sempred() above. - - struct Initializer { - Initializer(); - }; - static Initializer _init; -}; - -} // namespace DB diff --git a/src/Parsers/New/ClickHouseParser.cpp b/src/Parsers/New/ClickHouseParser.cpp deleted file mode 100644 index 174f838f19d..00000000000 --- a/src/Parsers/New/ClickHouseParser.cpp +++ /dev/null @@ -1,20220 +0,0 @@ - -// Generated from ClickHouseParser.g4 by ANTLR 4.7.2 - - -#include "ClickHouseParserVisitor.h" - -#include "ClickHouseParser.h" - - -using namespace antlrcpp; -using namespace DB; -using namespace antlr4; - -ClickHouseParser::ClickHouseParser(TokenStream *input) : Parser(input) { - _interpreter = new atn::ParserATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache); -} - -ClickHouseParser::~ClickHouseParser() { - delete _interpreter; -} - -std::string ClickHouseParser::getGrammarFileName() const { - return "ClickHouseParser.g4"; -} - -const std::vector& ClickHouseParser::getRuleNames() const { - return _ruleNames; -} - -dfa::Vocabulary& ClickHouseParser::getVocabulary() const { - return _vocabulary; -} - - -//----------------- QueryStmtContext ------------------------------------------------------------------ - -ClickHouseParser::QueryStmtContext::QueryStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::QueryContext* ClickHouseParser::QueryStmtContext::query() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::QueryStmtContext::INTO() { - return getToken(ClickHouseParser::INTO, 0); -} - -tree::TerminalNode* ClickHouseParser::QueryStmtContext::OUTFILE() { - return getToken(ClickHouseParser::OUTFILE, 0); -} - -tree::TerminalNode* ClickHouseParser::QueryStmtContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::QueryStmtContext::FORMAT() { - return getToken(ClickHouseParser::FORMAT, 0); -} - -ClickHouseParser::IdentifierOrNullContext* ClickHouseParser::QueryStmtContext::identifierOrNull() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::QueryStmtContext::SEMICOLON() { - return getToken(ClickHouseParser::SEMICOLON, 0); -} - -ClickHouseParser::InsertStmtContext* ClickHouseParser::QueryStmtContext::insertStmt() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::QueryStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleQueryStmt; -} - -antlrcpp::Any ClickHouseParser::QueryStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitQueryStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::QueryStmtContext* ClickHouseParser::queryStmt() { - QueryStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 0, ClickHouseParser::RuleQueryStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(232); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::ALTER: - case ClickHouseParser::ATTACH: - case ClickHouseParser::CHECK: - case ClickHouseParser::CREATE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DROP: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::KILL: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::RENAME: - case ClickHouseParser::SELECT: - case ClickHouseParser::SET: - case ClickHouseParser::SHOW: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::USE: - case ClickHouseParser::WATCH: - case ClickHouseParser::WITH: - case ClickHouseParser::LPAREN: { - enterOuterAlt(_localctx, 1); - setState(218); - query(); - setState(222); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::INTO) { - setState(219); - match(ClickHouseParser::INTO); - setState(220); - match(ClickHouseParser::OUTFILE); - setState(221); - match(ClickHouseParser::STRING_LITERAL); - } - setState(226); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FORMAT) { - setState(224); - match(ClickHouseParser::FORMAT); - setState(225); - identifierOrNull(); - } - setState(229); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::SEMICOLON) { - setState(228); - match(ClickHouseParser::SEMICOLON); - } - break; - } - - case ClickHouseParser::INSERT: { - enterOuterAlt(_localctx, 2); - setState(231); - insertStmt(); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- QueryContext ------------------------------------------------------------------ - -ClickHouseParser::QueryContext::QueryContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::AlterStmtContext* ClickHouseParser::QueryContext::alterStmt() { - return getRuleContext(0); -} - -ClickHouseParser::AttachStmtContext* ClickHouseParser::QueryContext::attachStmt() { - return getRuleContext(0); -} - -ClickHouseParser::CheckStmtContext* ClickHouseParser::QueryContext::checkStmt() { - return getRuleContext(0); -} - -ClickHouseParser::CreateStmtContext* ClickHouseParser::QueryContext::createStmt() { - return getRuleContext(0); -} - -ClickHouseParser::DescribeStmtContext* ClickHouseParser::QueryContext::describeStmt() { - return getRuleContext(0); -} - -ClickHouseParser::DropStmtContext* ClickHouseParser::QueryContext::dropStmt() { - return getRuleContext(0); -} - -ClickHouseParser::ExistsStmtContext* ClickHouseParser::QueryContext::existsStmt() { - return getRuleContext(0); -} - -ClickHouseParser::ExplainStmtContext* ClickHouseParser::QueryContext::explainStmt() { - return getRuleContext(0); -} - -ClickHouseParser::KillStmtContext* ClickHouseParser::QueryContext::killStmt() { - return getRuleContext(0); -} - -ClickHouseParser::OptimizeStmtContext* ClickHouseParser::QueryContext::optimizeStmt() { - return getRuleContext(0); -} - -ClickHouseParser::RenameStmtContext* ClickHouseParser::QueryContext::renameStmt() { - return getRuleContext(0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::QueryContext::selectUnionStmt() { - return getRuleContext(0); -} - -ClickHouseParser::SetStmtContext* ClickHouseParser::QueryContext::setStmt() { - return getRuleContext(0); -} - -ClickHouseParser::ShowStmtContext* ClickHouseParser::QueryContext::showStmt() { - return getRuleContext(0); -} - -ClickHouseParser::SystemStmtContext* ClickHouseParser::QueryContext::systemStmt() { - return getRuleContext(0); -} - -ClickHouseParser::TruncateStmtContext* ClickHouseParser::QueryContext::truncateStmt() { - return getRuleContext(0); -} - -ClickHouseParser::UseStmtContext* ClickHouseParser::QueryContext::useStmt() { - return getRuleContext(0); -} - -ClickHouseParser::WatchStmtContext* ClickHouseParser::QueryContext::watchStmt() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::QueryContext::getRuleIndex() const { - return ClickHouseParser::RuleQuery; -} - -antlrcpp::Any ClickHouseParser::QueryContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitQuery(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::QueryContext* ClickHouseParser::query() { - QueryContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 2, ClickHouseParser::RuleQuery); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(252); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 4, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(234); - alterStmt(); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(235); - attachStmt(); - break; - } - - case 3: { - enterOuterAlt(_localctx, 3); - setState(236); - checkStmt(); - break; - } - - case 4: { - enterOuterAlt(_localctx, 4); - setState(237); - createStmt(); - break; - } - - case 5: { - enterOuterAlt(_localctx, 5); - setState(238); - describeStmt(); - break; - } - - case 6: { - enterOuterAlt(_localctx, 6); - setState(239); - dropStmt(); - break; - } - - case 7: { - enterOuterAlt(_localctx, 7); - setState(240); - existsStmt(); - break; - } - - case 8: { - enterOuterAlt(_localctx, 8); - setState(241); - explainStmt(); - break; - } - - case 9: { - enterOuterAlt(_localctx, 9); - setState(242); - killStmt(); - break; - } - - case 10: { - enterOuterAlt(_localctx, 10); - setState(243); - optimizeStmt(); - break; - } - - case 11: { - enterOuterAlt(_localctx, 11); - setState(244); - renameStmt(); - break; - } - - case 12: { - enterOuterAlt(_localctx, 12); - setState(245); - selectUnionStmt(); - break; - } - - case 13: { - enterOuterAlt(_localctx, 13); - setState(246); - setStmt(); - break; - } - - case 14: { - enterOuterAlt(_localctx, 14); - setState(247); - showStmt(); - break; - } - - case 15: { - enterOuterAlt(_localctx, 15); - setState(248); - systemStmt(); - break; - } - - case 16: { - enterOuterAlt(_localctx, 16); - setState(249); - truncateStmt(); - break; - } - - case 17: { - enterOuterAlt(_localctx, 17); - setState(250); - useStmt(); - break; - } - - case 18: { - enterOuterAlt(_localctx, 18); - setState(251); - watchStmt(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AlterStmtContext ------------------------------------------------------------------ - -ClickHouseParser::AlterStmtContext::AlterStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::AlterStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleAlterStmt; -} - -void ClickHouseParser::AlterStmtContext::copyFrom(AlterStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- AlterTableStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableStmtContext::ALTER() { - return getToken(ClickHouseParser::ALTER, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::AlterTableStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::AlterTableStmtContext::alterTableClause() { - return getRuleContexts(); -} - -ClickHouseParser::AlterTableClauseContext* ClickHouseParser::AlterTableStmtContext::alterTableClause(size_t i) { - return getRuleContext(i); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::AlterTableStmtContext::clusterClause() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::AlterTableStmtContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::AlterTableStmtContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::AlterTableStmtContext::AlterTableStmtContext(AlterStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::AlterStmtContext* ClickHouseParser::alterStmt() { - AlterStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 4, ClickHouseParser::RuleAlterStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(254); - match(ClickHouseParser::ALTER); - setState(255); - match(ClickHouseParser::TABLE); - setState(256); - tableIdentifier(); - setState(258); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(257); - clusterClause(); - } - setState(260); - alterTableClause(); - setState(265); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(261); - match(ClickHouseParser::COMMA); - setState(262); - alterTableClause(); - setState(267); - _errHandler->sync(this); - _la = _input->LA(1); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AlterTableClauseContext ------------------------------------------------------------------ - -ClickHouseParser::AlterTableClauseContext::AlterTableClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::AlterTableClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleAlterTableClause; -} - -void ClickHouseParser::AlterTableClauseContext::copyFrom(AlterTableClauseContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- AlterTableClauseReplaceContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseReplaceContext::REPLACE() { - return getToken(ClickHouseParser::REPLACE, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseReplaceContext::partitionClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseReplaceContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::AlterTableClauseReplaceContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseReplaceContext::AlterTableClauseReplaceContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseReplaceContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseReplace(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyOrderByContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyOrderByContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyOrderByContext::ORDER() { - return getToken(ClickHouseParser::ORDER, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyOrderByContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::AlterTableClauseModifyOrderByContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseModifyOrderByContext::AlterTableClauseModifyOrderByContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyOrderByContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModifyOrderBy(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseUpdateContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseUpdateContext::UPDATE() { - return getToken(ClickHouseParser::UPDATE, 0); -} - -ClickHouseParser::AssignmentExprListContext* ClickHouseParser::AlterTableClauseUpdateContext::assignmentExprList() { - return getRuleContext(0); -} - -ClickHouseParser::WhereClauseContext* ClickHouseParser::AlterTableClauseUpdateContext::whereClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseUpdateContext::AlterTableClauseUpdateContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseUpdateContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseUpdate(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseClearProjectionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearProjectionContext::CLEAR() { - return getToken(ClickHouseParser::CLEAR, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearProjectionContext::PROJECTION() { - return getToken(ClickHouseParser::PROJECTION, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseClearProjectionContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearProjectionContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearProjectionContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearProjectionContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseClearProjectionContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseClearProjectionContext::AlterTableClauseClearProjectionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseClearProjectionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseClearProjection(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyRemoveContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyRemoveContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyRemoveContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseModifyRemoveContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyRemoveContext::REMOVE() { - return getToken(ClickHouseParser::REMOVE, 0); -} - -ClickHouseParser::TableColumnPropertyTypeContext* ClickHouseParser::AlterTableClauseModifyRemoveContext::tableColumnPropertyType() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyRemoveContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyRemoveContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseModifyRemoveContext::AlterTableClauseModifyRemoveContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyRemoveContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModifyRemove(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDeleteContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDeleteContext::DELETE() { - return getToken(ClickHouseParser::DELETE, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDeleteContext::WHERE() { - return getToken(ClickHouseParser::WHERE, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::AlterTableClauseDeleteContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseDeleteContext::AlterTableClauseDeleteContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDeleteContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDelete(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseCommentContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseCommentContext::COMMENT() { - return getToken(ClickHouseParser::COMMENT, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseCommentContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseCommentContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseCommentContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseCommentContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseCommentContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseCommentContext::AlterTableClauseCommentContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseCommentContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseComment(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDropColumnContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropColumnContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropColumnContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseDropColumnContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropColumnContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropColumnContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseDropColumnContext::AlterTableClauseDropColumnContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDropColumnContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDropColumn(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDetachContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDetachContext::DETACH() { - return getToken(ClickHouseParser::DETACH, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseDetachContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseDetachContext::AlterTableClauseDetachContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDetachContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDetach(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseAddIndexContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::ADD() { - return getToken(ClickHouseParser::ADD, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -ClickHouseParser::TableIndexDfntContext* ClickHouseParser::AlterTableClauseAddIndexContext::tableIndexDfnt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddIndexContext::AFTER() { - return getToken(ClickHouseParser::AFTER, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseAddIndexContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseAddIndexContext::AlterTableClauseAddIndexContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseAddIndexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseAddIndex(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDropPartitionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropPartitionContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseDropPartitionContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseDropPartitionContext::AlterTableClauseDropPartitionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDropPartitionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDropPartition(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseMaterializeIndexContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeIndexContext::MATERIALIZE() { - return getToken(ClickHouseParser::MATERIALIZE, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeIndexContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseMaterializeIndexContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeIndexContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeIndexContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeIndexContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseMaterializeIndexContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseMaterializeIndexContext::AlterTableClauseMaterializeIndexContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseMaterializeIndexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseMaterializeIndex(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseMaterializeProjectionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::MATERIALIZE() { - return getToken(ClickHouseParser::MATERIALIZE, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::PROJECTION() { - return getToken(ClickHouseParser::PROJECTION, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseMaterializeProjectionContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseMaterializeProjectionContext::AlterTableClauseMaterializeProjectionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseMaterializeProjectionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseMaterializeProjection(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseMovePartitionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::MOVE() { - return getToken(ClickHouseParser::MOVE, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseMovePartitionContext::partitionClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::TO() { - return getToken(ClickHouseParser::TO, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::DISK() { - return getToken(ClickHouseParser::DISK, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::VOLUME() { - return getToken(ClickHouseParser::VOLUME, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseMovePartitionContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::AlterTableClauseMovePartitionContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseMovePartitionContext::AlterTableClauseMovePartitionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseMovePartitionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseMovePartition(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseRenameContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRenameContext::RENAME() { - return getToken(ClickHouseParser::RENAME, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRenameContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -std::vector ClickHouseParser::AlterTableClauseRenameContext::nestedIdentifier() { - return getRuleContexts(); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseRenameContext::nestedIdentifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRenameContext::TO() { - return getToken(ClickHouseParser::TO, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRenameContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRenameContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseRenameContext::AlterTableClauseRenameContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseRenameContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseRename(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseFreezePartitionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseFreezePartitionContext::FREEZE() { - return getToken(ClickHouseParser::FREEZE, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseFreezePartitionContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseFreezePartitionContext::AlterTableClauseFreezePartitionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseFreezePartitionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseFreezePartition(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseClearColumnContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearColumnContext::CLEAR() { - return getToken(ClickHouseParser::CLEAR, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearColumnContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseClearColumnContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearColumnContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearColumnContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearColumnContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseClearColumnContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseClearColumnContext::AlterTableClauseClearColumnContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseClearColumnContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseClearColumn(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::TableColumnDfntContext* ClickHouseParser::AlterTableClauseModifyContext::tableColumnDfnt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseModifyContext::AlterTableClauseModifyContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModify(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseClearIndexContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearIndexContext::CLEAR() { - return getToken(ClickHouseParser::CLEAR, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearIndexContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseClearIndexContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearIndexContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearIndexContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseClearIndexContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseClearIndexContext::partitionClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseClearIndexContext::AlterTableClauseClearIndexContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseClearIndexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseClearIndex(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseRemoveTTLContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRemoveTTLContext::REMOVE() { - return getToken(ClickHouseParser::REMOVE, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseRemoveTTLContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - -ClickHouseParser::AlterTableClauseRemoveTTLContext::AlterTableClauseRemoveTTLContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseRemoveTTLContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseRemoveTTL(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyCodecContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCodecContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCodecContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseModifyCodecContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::CodecExprContext* ClickHouseParser::AlterTableClauseModifyCodecContext::codecExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCodecContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCodecContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseModifyCodecContext::AlterTableClauseModifyCodecContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyCodecContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModifyCodec(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseAttachContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAttachContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::AlterTableClauseAttachContext::partitionClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAttachContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::AlterTableClauseAttachContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseAttachContext::AlterTableClauseAttachContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseAttachContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseAttach(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDropProjectionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropProjectionContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropProjectionContext::PROJECTION() { - return getToken(ClickHouseParser::PROJECTION, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseDropProjectionContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropProjectionContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropProjectionContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseDropProjectionContext::AlterTableClauseDropProjectionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDropProjectionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDropProjection(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseDropIndexContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropIndexContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropIndexContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseDropIndexContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropIndexContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseDropIndexContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseDropIndexContext::AlterTableClauseDropIndexContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseDropIndexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseDropIndex(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyCommentContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseModifyCommentContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::COMMENT() { - return getToken(ClickHouseParser::COMMENT, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyCommentContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::AlterTableClauseModifyCommentContext::AlterTableClauseModifyCommentContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyCommentContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModifyComment(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseModifyTTLContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseModifyTTLContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -ClickHouseParser::TtlClauseContext* ClickHouseParser::AlterTableClauseModifyTTLContext::ttlClause() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseModifyTTLContext::AlterTableClauseModifyTTLContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseModifyTTLContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseModifyTTL(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseAddProjectionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::ADD() { - return getToken(ClickHouseParser::ADD, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::PROJECTION() { - return getToken(ClickHouseParser::PROJECTION, 0); -} - -ClickHouseParser::TableProjectionDfntContext* ClickHouseParser::AlterTableClauseAddProjectionContext::tableProjectionDfnt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddProjectionContext::AFTER() { - return getToken(ClickHouseParser::AFTER, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseAddProjectionContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseAddProjectionContext::AlterTableClauseAddProjectionContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseAddProjectionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseAddProjection(this); - else - return visitor->visitChildren(this); -} -//----------------- AlterTableClauseAddColumnContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::ADD() { - return getToken(ClickHouseParser::ADD, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -ClickHouseParser::TableColumnDfntContext* ClickHouseParser::AlterTableClauseAddColumnContext::tableColumnDfnt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::AlterTableClauseAddColumnContext::AFTER() { - return getToken(ClickHouseParser::AFTER, 0); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AlterTableClauseAddColumnContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::AlterTableClauseAddColumnContext::AlterTableClauseAddColumnContext(AlterTableClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AlterTableClauseAddColumnContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlterTableClauseAddColumn(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::AlterTableClauseContext* ClickHouseParser::alterTableClause() { - AlterTableClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 6, ClickHouseParser::RuleAlterTableClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(482); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 35, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(268); - match(ClickHouseParser::ADD); - setState(269); - match(ClickHouseParser::COLUMN); - setState(273); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 7, _ctx)) { - case 1: { - setState(270); - match(ClickHouseParser::IF); - setState(271); - match(ClickHouseParser::NOT); - setState(272); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(275); - tableColumnDfnt(); - setState(278); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::AFTER) { - setState(276); - match(ClickHouseParser::AFTER); - setState(277); - nestedIdentifier(); - } - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(280); - match(ClickHouseParser::ADD); - setState(281); - match(ClickHouseParser::INDEX); - setState(285); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 9, _ctx)) { - case 1: { - setState(282); - match(ClickHouseParser::IF); - setState(283); - match(ClickHouseParser::NOT); - setState(284); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(287); - tableIndexDfnt(); - setState(290); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::AFTER) { - setState(288); - match(ClickHouseParser::AFTER); - setState(289); - nestedIdentifier(); - } - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(292); - match(ClickHouseParser::ADD); - setState(293); - match(ClickHouseParser::PROJECTION); - setState(297); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 11, _ctx)) { - case 1: { - setState(294); - match(ClickHouseParser::IF); - setState(295); - match(ClickHouseParser::NOT); - setState(296); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(299); - tableProjectionDfnt(); - setState(302); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::AFTER) { - setState(300); - match(ClickHouseParser::AFTER); - setState(301); - nestedIdentifier(); - } - break; - } - - case 4: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 4); - setState(304); - match(ClickHouseParser::ATTACH); - setState(305); - partitionClause(); - setState(308); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FROM) { - setState(306); - match(ClickHouseParser::FROM); - setState(307); - tableIdentifier(); - } - break; - } - - case 5: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 5); - setState(310); - match(ClickHouseParser::CLEAR); - setState(311); - match(ClickHouseParser::COLUMN); - setState(314); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 14, _ctx)) { - case 1: { - setState(312); - match(ClickHouseParser::IF); - setState(313); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(316); - nestedIdentifier(); - setState(319); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::IN) { - setState(317); - match(ClickHouseParser::IN); - setState(318); - partitionClause(); - } - break; - } - - case 6: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 6); - setState(321); - match(ClickHouseParser::CLEAR); - setState(322); - match(ClickHouseParser::INDEX); - setState(325); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 16, _ctx)) { - case 1: { - setState(323); - match(ClickHouseParser::IF); - setState(324); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(327); - nestedIdentifier(); - setState(330); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::IN) { - setState(328); - match(ClickHouseParser::IN); - setState(329); - partitionClause(); - } - break; - } - - case 7: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 7); - setState(332); - match(ClickHouseParser::CLEAR); - setState(333); - match(ClickHouseParser::PROJECTION); - setState(336); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 18, _ctx)) { - case 1: { - setState(334); - match(ClickHouseParser::IF); - setState(335); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(338); - nestedIdentifier(); - setState(341); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::IN) { - setState(339); - match(ClickHouseParser::IN); - setState(340); - partitionClause(); - } - break; - } - - case 8: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 8); - setState(343); - match(ClickHouseParser::COMMENT); - setState(344); - match(ClickHouseParser::COLUMN); - setState(347); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 20, _ctx)) { - case 1: { - setState(345); - match(ClickHouseParser::IF); - setState(346); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(349); - nestedIdentifier(); - setState(350); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 9: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 9); - setState(352); - match(ClickHouseParser::DELETE); - setState(353); - match(ClickHouseParser::WHERE); - setState(354); - columnExpr(0); - break; - } - - case 10: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 10); - setState(355); - match(ClickHouseParser::DETACH); - setState(356); - partitionClause(); - break; - } - - case 11: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 11); - setState(357); - match(ClickHouseParser::DROP); - setState(358); - match(ClickHouseParser::COLUMN); - setState(361); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 21, _ctx)) { - case 1: { - setState(359); - match(ClickHouseParser::IF); - setState(360); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(363); - nestedIdentifier(); - break; - } - - case 12: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 12); - setState(364); - match(ClickHouseParser::DROP); - setState(365); - match(ClickHouseParser::INDEX); - setState(368); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 22, _ctx)) { - case 1: { - setState(366); - match(ClickHouseParser::IF); - setState(367); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(370); - nestedIdentifier(); - break; - } - - case 13: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 13); - setState(371); - match(ClickHouseParser::DROP); - setState(372); - match(ClickHouseParser::PROJECTION); - setState(375); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 23, _ctx)) { - case 1: { - setState(373); - match(ClickHouseParser::IF); - setState(374); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(377); - nestedIdentifier(); - break; - } - - case 14: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 14); - setState(378); - match(ClickHouseParser::DROP); - setState(379); - partitionClause(); - break; - } - - case 15: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 15); - setState(380); - match(ClickHouseParser::FREEZE); - setState(382); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::PARTITION) { - setState(381); - partitionClause(); - } - break; - } - - case 16: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 16); - setState(384); - match(ClickHouseParser::MATERIALIZE); - setState(385); - match(ClickHouseParser::INDEX); - setState(388); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 25, _ctx)) { - case 1: { - setState(386); - match(ClickHouseParser::IF); - setState(387); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(390); - nestedIdentifier(); - setState(393); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::IN) { - setState(391); - match(ClickHouseParser::IN); - setState(392); - partitionClause(); - } - break; - } - - case 17: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 17); - setState(395); - match(ClickHouseParser::MATERIALIZE); - setState(396); - match(ClickHouseParser::PROJECTION); - setState(399); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 27, _ctx)) { - case 1: { - setState(397); - match(ClickHouseParser::IF); - setState(398); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(401); - nestedIdentifier(); - setState(404); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::IN) { - setState(402); - match(ClickHouseParser::IN); - setState(403); - partitionClause(); - } - break; - } - - case 18: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 18); - setState(406); - match(ClickHouseParser::MODIFY); - setState(407); - match(ClickHouseParser::COLUMN); - setState(410); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 29, _ctx)) { - case 1: { - setState(408); - match(ClickHouseParser::IF); - setState(409); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(412); - nestedIdentifier(); - setState(413); - codecExpr(); - break; - } - - case 19: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 19); - setState(415); - match(ClickHouseParser::MODIFY); - setState(416); - match(ClickHouseParser::COLUMN); - setState(419); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 30, _ctx)) { - case 1: { - setState(417); - match(ClickHouseParser::IF); - setState(418); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(421); - nestedIdentifier(); - setState(422); - match(ClickHouseParser::COMMENT); - setState(423); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 20: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 20); - setState(425); - match(ClickHouseParser::MODIFY); - setState(426); - match(ClickHouseParser::COLUMN); - setState(429); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 31, _ctx)) { - case 1: { - setState(427); - match(ClickHouseParser::IF); - setState(428); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(431); - nestedIdentifier(); - setState(432); - match(ClickHouseParser::REMOVE); - setState(433); - tableColumnPropertyType(); - break; - } - - case 21: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 21); - setState(435); - match(ClickHouseParser::MODIFY); - setState(436); - match(ClickHouseParser::COLUMN); - setState(439); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 32, _ctx)) { - case 1: { - setState(437); - match(ClickHouseParser::IF); - setState(438); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(441); - tableColumnDfnt(); - break; - } - - case 22: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 22); - setState(442); - match(ClickHouseParser::MODIFY); - setState(443); - match(ClickHouseParser::ORDER); - setState(444); - match(ClickHouseParser::BY); - setState(445); - columnExpr(0); - break; - } - - case 23: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 23); - setState(446); - match(ClickHouseParser::MODIFY); - setState(447); - ttlClause(); - break; - } - - case 24: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 24); - setState(448); - match(ClickHouseParser::MOVE); - setState(449); - partitionClause(); - setState(459); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 33, _ctx)) { - case 1: { - setState(450); - match(ClickHouseParser::TO); - setState(451); - match(ClickHouseParser::DISK); - setState(452); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 2: { - setState(453); - match(ClickHouseParser::TO); - setState(454); - match(ClickHouseParser::VOLUME); - setState(455); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 3: { - setState(456); - match(ClickHouseParser::TO); - setState(457); - match(ClickHouseParser::TABLE); - setState(458); - tableIdentifier(); - break; - } - - } - break; - } - - case 25: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 25); - setState(461); - match(ClickHouseParser::REMOVE); - setState(462); - match(ClickHouseParser::TTL); - break; - } - - case 26: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 26); - setState(463); - match(ClickHouseParser::RENAME); - setState(464); - match(ClickHouseParser::COLUMN); - setState(467); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 34, _ctx)) { - case 1: { - setState(465); - match(ClickHouseParser::IF); - setState(466); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(469); - nestedIdentifier(); - setState(470); - match(ClickHouseParser::TO); - setState(471); - nestedIdentifier(); - break; - } - - case 27: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 27); - setState(473); - match(ClickHouseParser::REPLACE); - setState(474); - partitionClause(); - setState(475); - match(ClickHouseParser::FROM); - setState(476); - tableIdentifier(); - break; - } - - case 28: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 28); - setState(478); - match(ClickHouseParser::UPDATE); - setState(479); - assignmentExprList(); - setState(480); - whereClause(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AssignmentExprListContext ------------------------------------------------------------------ - -ClickHouseParser::AssignmentExprListContext::AssignmentExprListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::AssignmentExprListContext::assignmentExpr() { - return getRuleContexts(); -} - -ClickHouseParser::AssignmentExprContext* ClickHouseParser::AssignmentExprListContext::assignmentExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::AssignmentExprListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::AssignmentExprListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::AssignmentExprListContext::getRuleIndex() const { - return ClickHouseParser::RuleAssignmentExprList; -} - -antlrcpp::Any ClickHouseParser::AssignmentExprListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAssignmentExprList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::AssignmentExprListContext* ClickHouseParser::assignmentExprList() { - AssignmentExprListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 8, ClickHouseParser::RuleAssignmentExprList); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(484); - assignmentExpr(); - setState(489); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(485); - match(ClickHouseParser::COMMA); - setState(486); - assignmentExpr(); - setState(491); - _errHandler->sync(this); - _la = _input->LA(1); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AssignmentExprContext ------------------------------------------------------------------ - -ClickHouseParser::AssignmentExprContext::AssignmentExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::AssignmentExprContext::nestedIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::AssignmentExprContext::EQ_SINGLE() { - return getToken(ClickHouseParser::EQ_SINGLE, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::AssignmentExprContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::AssignmentExprContext::getRuleIndex() const { - return ClickHouseParser::RuleAssignmentExpr; -} - -antlrcpp::Any ClickHouseParser::AssignmentExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAssignmentExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::AssignmentExprContext* ClickHouseParser::assignmentExpr() { - AssignmentExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 10, ClickHouseParser::RuleAssignmentExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(492); - nestedIdentifier(); - setState(493); - match(ClickHouseParser::EQ_SINGLE); - setState(494); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableColumnPropertyTypeContext ------------------------------------------------------------------ - -ClickHouseParser::TableColumnPropertyTypeContext::TableColumnPropertyTypeContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::ALIAS() { - return getToken(ClickHouseParser::ALIAS, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::CODEC() { - return getToken(ClickHouseParser::CODEC, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::COMMENT() { - return getToken(ClickHouseParser::COMMENT, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::DEFAULT() { - return getToken(ClickHouseParser::DEFAULT, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::MATERIALIZED() { - return getToken(ClickHouseParser::MATERIALIZED, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyTypeContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - - -size_t ClickHouseParser::TableColumnPropertyTypeContext::getRuleIndex() const { - return ClickHouseParser::RuleTableColumnPropertyType; -} - -antlrcpp::Any ClickHouseParser::TableColumnPropertyTypeContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableColumnPropertyType(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableColumnPropertyTypeContext* ClickHouseParser::tableColumnPropertyType() { - TableColumnPropertyTypeContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 12, ClickHouseParser::RuleTableColumnPropertyType); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(496); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::DEFAULT))) != 0) || _la == ClickHouseParser::MATERIALIZED || _la == ClickHouseParser::TTL)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- PartitionClauseContext ------------------------------------------------------------------ - -ClickHouseParser::PartitionClauseContext::PartitionClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::PartitionClauseContext::PARTITION() { - return getToken(ClickHouseParser::PARTITION, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::PartitionClauseContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::PartitionClauseContext::ID() { - return getToken(ClickHouseParser::ID, 0); -} - -tree::TerminalNode* ClickHouseParser::PartitionClauseContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - - -size_t ClickHouseParser::PartitionClauseContext::getRuleIndex() const { - return ClickHouseParser::RulePartitionClause; -} - -antlrcpp::Any ClickHouseParser::PartitionClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitPartitionClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::partitionClause() { - PartitionClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 14, ClickHouseParser::RulePartitionClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(503); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 37, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(498); - match(ClickHouseParser::PARTITION); - setState(499); - columnExpr(0); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(500); - match(ClickHouseParser::PARTITION); - setState(501); - match(ClickHouseParser::ID); - setState(502); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AttachStmtContext ------------------------------------------------------------------ - -ClickHouseParser::AttachStmtContext::AttachStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::AttachStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleAttachStmt; -} - -void ClickHouseParser::AttachStmtContext::copyFrom(AttachStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- AttachDictionaryStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::AttachDictionaryStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::AttachDictionaryStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::AttachDictionaryStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::AttachDictionaryStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::AttachDictionaryStmtContext::AttachDictionaryStmtContext(AttachStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::AttachDictionaryStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAttachDictionaryStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::AttachStmtContext* ClickHouseParser::attachStmt() { - AttachStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 16, ClickHouseParser::RuleAttachStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(505); - match(ClickHouseParser::ATTACH); - setState(506); - match(ClickHouseParser::DICTIONARY); - setState(507); - tableIdentifier(); - setState(509); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(508); - clusterClause(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- CheckStmtContext ------------------------------------------------------------------ - -ClickHouseParser::CheckStmtContext::CheckStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::CheckStmtContext::CHECK() { - return getToken(ClickHouseParser::CHECK, 0); -} - -tree::TerminalNode* ClickHouseParser::CheckStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CheckStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::CheckStmtContext::partitionClause() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::CheckStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleCheckStmt; -} - -antlrcpp::Any ClickHouseParser::CheckStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCheckStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::CheckStmtContext* ClickHouseParser::checkStmt() { - CheckStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 18, ClickHouseParser::RuleCheckStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(511); - match(ClickHouseParser::CHECK); - setState(512); - match(ClickHouseParser::TABLE); - setState(513); - tableIdentifier(); - setState(515); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::PARTITION) { - setState(514); - partitionClause(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- CreateStmtContext ------------------------------------------------------------------ - -ClickHouseParser::CreateStmtContext::CreateStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::CreateStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleCreateStmt; -} - -void ClickHouseParser::CreateStmtContext::copyFrom(CreateStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- CreateViewStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CreateViewStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::SubqueryClauseContext* ClickHouseParser::CreateViewStmtContext::subqueryClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::OR() { - return getToken(ClickHouseParser::OR, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::REPLACE() { - return getToken(ClickHouseParser::REPLACE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateViewStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::CreateViewStmtContext::uuidClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateViewStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::TableSchemaClauseContext* ClickHouseParser::CreateViewStmtContext::tableSchemaClause() { - return getRuleContext(0); -} - -ClickHouseParser::CreateViewStmtContext::CreateViewStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateViewStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateViewStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- CreateDictionaryStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CreateDictionaryStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::DictionarySchemaClauseContext* ClickHouseParser::CreateDictionaryStmtContext::dictionarySchemaClause() { - return getRuleContext(0); -} - -ClickHouseParser::DictionaryEngineClauseContext* ClickHouseParser::CreateDictionaryStmtContext::dictionaryEngineClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDictionaryStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::CreateDictionaryStmtContext::uuidClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateDictionaryStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::CreateDictionaryStmtContext::CreateDictionaryStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateDictionaryStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateDictionaryStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- CreateDatabaseStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::DATABASE() { - return getToken(ClickHouseParser::DATABASE, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::CreateDatabaseStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateDatabaseStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateDatabaseStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::EngineExprContext* ClickHouseParser::CreateDatabaseStmtContext::engineExpr() { - return getRuleContext(0); -} - -ClickHouseParser::CreateDatabaseStmtContext::CreateDatabaseStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateDatabaseStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateDatabaseStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- CreateLiveViewStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::LIVE() { - return getToken(ClickHouseParser::LIVE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CreateLiveViewStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::SubqueryClauseContext* ClickHouseParser::CreateLiveViewStmtContext::subqueryClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::CreateLiveViewStmtContext::uuidClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateLiveViewStmtContext::clusterClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::WITH() { - return getToken(ClickHouseParser::WITH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::TIMEOUT() { - return getToken(ClickHouseParser::TIMEOUT, 0); -} - -ClickHouseParser::DestinationClauseContext* ClickHouseParser::CreateLiveViewStmtContext::destinationClause() { - return getRuleContext(0); -} - -ClickHouseParser::TableSchemaClauseContext* ClickHouseParser::CreateLiveViewStmtContext::tableSchemaClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateLiveViewStmtContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - -ClickHouseParser::CreateLiveViewStmtContext::CreateLiveViewStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateLiveViewStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateLiveViewStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- CreateMaterializedViewStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::MATERIALIZED() { - return getToken(ClickHouseParser::MATERIALIZED, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CreateMaterializedViewStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::SubqueryClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::subqueryClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -ClickHouseParser::DestinationClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::destinationClause() { - return getRuleContext(0); -} - -ClickHouseParser::EngineClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::engineClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::uuidClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::TableSchemaClauseContext* ClickHouseParser::CreateMaterializedViewStmtContext::tableSchemaClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateMaterializedViewStmtContext::POPULATE() { - return getToken(ClickHouseParser::POPULATE, 0); -} - -ClickHouseParser::CreateMaterializedViewStmtContext::CreateMaterializedViewStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateMaterializedViewStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateMaterializedViewStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- CreateTableStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::CreateTableStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::CreateTableStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::CreateTableStmtContext::uuidClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::CreateTableStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::TableSchemaClauseContext* ClickHouseParser::CreateTableStmtContext::tableSchemaClause() { - return getRuleContext(0); -} - -ClickHouseParser::EngineClauseContext* ClickHouseParser::CreateTableStmtContext::engineClause() { - return getRuleContext(0); -} - -ClickHouseParser::SubqueryClauseContext* ClickHouseParser::CreateTableStmtContext::subqueryClause() { - return getRuleContext(0); -} - -ClickHouseParser::CreateTableStmtContext::CreateTableStmtContext(CreateStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::CreateTableStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCreateTableStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::CreateStmtContext* ClickHouseParser::createStmt() { - CreateStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 20, ClickHouseParser::RuleCreateStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(654); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 71, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(517); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(518); - match(ClickHouseParser::DATABASE); - setState(522); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 40, _ctx)) { - case 1: { - setState(519); - match(ClickHouseParser::IF); - setState(520); - match(ClickHouseParser::NOT); - setState(521); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(524); - databaseIdentifier(); - setState(526); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(525); - clusterClause(); - } - setState(529); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ENGINE) { - setState(528); - engineExpr(); - } - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(531); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(532); - match(ClickHouseParser::DICTIONARY); - setState(536); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 43, _ctx)) { - case 1: { - setState(533); - match(ClickHouseParser::IF); - setState(534); - match(ClickHouseParser::NOT); - setState(535); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(538); - tableIdentifier(); - setState(540); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::UUID) { - setState(539); - uuidClause(); - } - setState(543); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(542); - clusterClause(); - } - setState(545); - dictionarySchemaClause(); - setState(546); - dictionaryEngineClause(); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(548); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(549); - match(ClickHouseParser::LIVE); - setState(550); - match(ClickHouseParser::VIEW); - setState(554); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 46, _ctx)) { - case 1: { - setState(551); - match(ClickHouseParser::IF); - setState(552); - match(ClickHouseParser::NOT); - setState(553); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(556); - tableIdentifier(); - setState(558); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::UUID) { - setState(557); - uuidClause(); - } - setState(561); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(560); - clusterClause(); - } - setState(568); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WITH) { - setState(563); - match(ClickHouseParser::WITH); - setState(564); - match(ClickHouseParser::TIMEOUT); - setState(566); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::DECIMAL_LITERAL) { - setState(565); - match(ClickHouseParser::DECIMAL_LITERAL); - } - } - setState(571); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TO) { - setState(570); - destinationClause(); - } - setState(574); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 52, _ctx)) { - case 1: { - setState(573); - tableSchemaClause(); - break; - } - - } - setState(576); - subqueryClause(); - break; - } - - case 4: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 4); - setState(578); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(579); - match(ClickHouseParser::MATERIALIZED); - setState(580); - match(ClickHouseParser::VIEW); - setState(584); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 53, _ctx)) { - case 1: { - setState(581); - match(ClickHouseParser::IF); - setState(582); - match(ClickHouseParser::NOT); - setState(583); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(586); - tableIdentifier(); - setState(588); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::UUID) { - setState(587); - uuidClause(); - } - setState(591); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(590); - clusterClause(); - } - setState(594); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::AS || _la == ClickHouseParser::LPAREN) { - setState(593); - tableSchemaClause(); - } - setState(601); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::TO: { - setState(596); - destinationClause(); - break; - } - - case ClickHouseParser::ENGINE: { - setState(597); - engineClause(); - setState(599); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::POPULATE) { - setState(598); - match(ClickHouseParser::POPULATE); - } - break; - } - - default: - throw NoViableAltException(this); - } - setState(603); - subqueryClause(); - break; - } - - case 5: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 5); - setState(605); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(607); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TEMPORARY) { - setState(606); - match(ClickHouseParser::TEMPORARY); - } - setState(609); - match(ClickHouseParser::TABLE); - setState(613); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 60, _ctx)) { - case 1: { - setState(610); - match(ClickHouseParser::IF); - setState(611); - match(ClickHouseParser::NOT); - setState(612); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(615); - tableIdentifier(); - setState(617); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::UUID) { - setState(616); - uuidClause(); - } - setState(620); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(619); - clusterClause(); - } - setState(623); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 63, _ctx)) { - case 1: { - setState(622); - tableSchemaClause(); - break; - } - - } - setState(626); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ENGINE) { - setState(625); - engineClause(); - } - setState(629); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::AS) { - setState(628); - subqueryClause(); - } - break; - } - - case 6: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 6); - setState(631); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ATTACH - - || _la == ClickHouseParser::CREATE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(634); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OR) { - setState(632); - match(ClickHouseParser::OR); - setState(633); - match(ClickHouseParser::REPLACE); - } - setState(636); - match(ClickHouseParser::VIEW); - setState(640); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 67, _ctx)) { - case 1: { - setState(637); - match(ClickHouseParser::IF); - setState(638); - match(ClickHouseParser::NOT); - setState(639); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(642); - tableIdentifier(); - setState(644); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::UUID) { - setState(643); - uuidClause(); - } - setState(647); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(646); - clusterClause(); - } - setState(650); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 70, _ctx)) { - case 1: { - setState(649); - tableSchemaClause(); - break; - } - - } - setState(652); - subqueryClause(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionarySchemaClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DictionarySchemaClauseContext::DictionarySchemaClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::DictionarySchemaClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::DictionarySchemaClauseContext::dictionaryAttrDfnt() { - return getRuleContexts(); -} - -ClickHouseParser::DictionaryAttrDfntContext* ClickHouseParser::DictionarySchemaClauseContext::dictionaryAttrDfnt(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::DictionarySchemaClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::DictionarySchemaClauseContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::DictionarySchemaClauseContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::DictionarySchemaClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionarySchemaClause; -} - -antlrcpp::Any ClickHouseParser::DictionarySchemaClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionarySchemaClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionarySchemaClauseContext* ClickHouseParser::dictionarySchemaClause() { - DictionarySchemaClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 22, ClickHouseParser::RuleDictionarySchemaClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(656); - match(ClickHouseParser::LPAREN); - setState(657); - dictionaryAttrDfnt(); - setState(662); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(658); - match(ClickHouseParser::COMMA); - setState(659); - dictionaryAttrDfnt(); - setState(664); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(665); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionaryAttrDfntContext ------------------------------------------------------------------ - -ClickHouseParser::DictionaryAttrDfntContext::DictionaryAttrDfntContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::DictionaryAttrDfntContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::DictionaryAttrDfntContext::columnTypeExpr() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::DEFAULT() { - return getTokens(ClickHouseParser::DEFAULT); -} - -tree::TerminalNode* ClickHouseParser::DictionaryAttrDfntContext::DEFAULT(size_t i) { - return getToken(ClickHouseParser::DEFAULT, i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::literal() { - return getRuleContexts(); -} - -ClickHouseParser::LiteralContext* ClickHouseParser::DictionaryAttrDfntContext::literal(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::EXPRESSION() { - return getTokens(ClickHouseParser::EXPRESSION); -} - -tree::TerminalNode* ClickHouseParser::DictionaryAttrDfntContext::EXPRESSION(size_t i) { - return getToken(ClickHouseParser::EXPRESSION, i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::DictionaryAttrDfntContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::HIERARCHICAL() { - return getTokens(ClickHouseParser::HIERARCHICAL); -} - -tree::TerminalNode* ClickHouseParser::DictionaryAttrDfntContext::HIERARCHICAL(size_t i) { - return getToken(ClickHouseParser::HIERARCHICAL, i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::INJECTIVE() { - return getTokens(ClickHouseParser::INJECTIVE); -} - -tree::TerminalNode* ClickHouseParser::DictionaryAttrDfntContext::INJECTIVE(size_t i) { - return getToken(ClickHouseParser::INJECTIVE, i); -} - -std::vector ClickHouseParser::DictionaryAttrDfntContext::IS_OBJECT_ID() { - return getTokens(ClickHouseParser::IS_OBJECT_ID); -} - -tree::TerminalNode* ClickHouseParser::DictionaryAttrDfntContext::IS_OBJECT_ID(size_t i) { - return getToken(ClickHouseParser::IS_OBJECT_ID, i); -} - - -size_t ClickHouseParser::DictionaryAttrDfntContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionaryAttrDfnt; -} - -antlrcpp::Any ClickHouseParser::DictionaryAttrDfntContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionaryAttrDfnt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionaryAttrDfntContext* ClickHouseParser::dictionaryAttrDfnt() { - DictionaryAttrDfntContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 24, ClickHouseParser::RuleDictionaryAttrDfnt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(667); - identifier(); - setState(668); - columnTypeExpr(); - setState(690); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 74, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(688); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 73, _ctx)) { - case 1: { - setState(669); - - if (!(!_localctx->attrs.count("default"))) throw FailedPredicateException(this, "!$attrs.count(\"default\")"); - setState(670); - match(ClickHouseParser::DEFAULT); - setState(671); - literal(); - _localctx->attrs.insert("default"); - break; - } - - case 2: { - setState(674); - - if (!(!_localctx->attrs.count("expression"))) throw FailedPredicateException(this, "!$attrs.count(\"expression\")"); - setState(675); - match(ClickHouseParser::EXPRESSION); - setState(676); - columnExpr(0); - _localctx->attrs.insert("expression"); - break; - } - - case 3: { - setState(679); - - if (!(!_localctx->attrs.count("hierarchical"))) throw FailedPredicateException(this, "!$attrs.count(\"hierarchical\")"); - setState(680); - match(ClickHouseParser::HIERARCHICAL); - _localctx->attrs.insert("hierarchical"); - break; - } - - case 4: { - setState(682); - - if (!(!_localctx->attrs.count("injective"))) throw FailedPredicateException(this, "!$attrs.count(\"injective\")"); - setState(683); - match(ClickHouseParser::INJECTIVE); - _localctx->attrs.insert("injective"); - break; - } - - case 5: { - setState(685); - - if (!(!_localctx->attrs.count("is_object_id"))) throw FailedPredicateException(this, "!$attrs.count(\"is_object_id\")"); - setState(686); - match(ClickHouseParser::IS_OBJECT_ID); - _localctx->attrs.insert("is_object_id"); - break; - } - - } - } - setState(692); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 74, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionaryEngineClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DictionaryEngineClauseContext::DictionaryEngineClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::DictionaryPrimaryKeyClauseContext* ClickHouseParser::DictionaryEngineClauseContext::dictionaryPrimaryKeyClause() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::DictionaryEngineClauseContext::sourceClause() { - return getRuleContexts(); -} - -ClickHouseParser::SourceClauseContext* ClickHouseParser::DictionaryEngineClauseContext::sourceClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryEngineClauseContext::lifetimeClause() { - return getRuleContexts(); -} - -ClickHouseParser::LifetimeClauseContext* ClickHouseParser::DictionaryEngineClauseContext::lifetimeClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryEngineClauseContext::layoutClause() { - return getRuleContexts(); -} - -ClickHouseParser::LayoutClauseContext* ClickHouseParser::DictionaryEngineClauseContext::layoutClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryEngineClauseContext::rangeClause() { - return getRuleContexts(); -} - -ClickHouseParser::RangeClauseContext* ClickHouseParser::DictionaryEngineClauseContext::rangeClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::DictionaryEngineClauseContext::dictionarySettingsClause() { - return getRuleContexts(); -} - -ClickHouseParser::DictionarySettingsClauseContext* ClickHouseParser::DictionaryEngineClauseContext::dictionarySettingsClause(size_t i) { - return getRuleContext(i); -} - - -size_t ClickHouseParser::DictionaryEngineClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionaryEngineClause; -} - -antlrcpp::Any ClickHouseParser::DictionaryEngineClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionaryEngineClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionaryEngineClauseContext* ClickHouseParser::dictionaryEngineClause() { - DictionaryEngineClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 26, ClickHouseParser::RuleDictionaryEngineClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(694); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 75, _ctx)) { - case 1: { - setState(693); - dictionaryPrimaryKeyClause(); - break; - } - - } - setState(718); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 77, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(716); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 76, _ctx)) { - case 1: { - setState(696); - - if (!(!_localctx->clauses.count("source"))) throw FailedPredicateException(this, "!$clauses.count(\"source\")"); - setState(697); - sourceClause(); - _localctx->clauses.insert("source"); - break; - } - - case 2: { - setState(700); - - if (!(!_localctx->clauses.count("lifetime"))) throw FailedPredicateException(this, "!$clauses.count(\"lifetime\")"); - setState(701); - lifetimeClause(); - _localctx->clauses.insert("lifetime"); - break; - } - - case 3: { - setState(704); - - if (!(!_localctx->clauses.count("layout"))) throw FailedPredicateException(this, "!$clauses.count(\"layout\")"); - setState(705); - layoutClause(); - _localctx->clauses.insert("layout"); - break; - } - - case 4: { - setState(708); - - if (!(!_localctx->clauses.count("range"))) throw FailedPredicateException(this, "!$clauses.count(\"range\")"); - setState(709); - rangeClause(); - _localctx->clauses.insert("range"); - break; - } - - case 5: { - setState(712); - - if (!(!_localctx->clauses.count("settings"))) throw FailedPredicateException(this, "!$clauses.count(\"settings\")"); - setState(713); - dictionarySettingsClause(); - _localctx->clauses.insert("settings"); - break; - } - - } - } - setState(720); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 77, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionaryPrimaryKeyClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DictionaryPrimaryKeyClauseContext::DictionaryPrimaryKeyClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::DictionaryPrimaryKeyClauseContext::PRIMARY() { - return getToken(ClickHouseParser::PRIMARY, 0); -} - -tree::TerminalNode* ClickHouseParser::DictionaryPrimaryKeyClauseContext::KEY() { - return getToken(ClickHouseParser::KEY, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::DictionaryPrimaryKeyClauseContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::DictionaryPrimaryKeyClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionaryPrimaryKeyClause; -} - -antlrcpp::Any ClickHouseParser::DictionaryPrimaryKeyClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionaryPrimaryKeyClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionaryPrimaryKeyClauseContext* ClickHouseParser::dictionaryPrimaryKeyClause() { - DictionaryPrimaryKeyClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 28, ClickHouseParser::RuleDictionaryPrimaryKeyClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(721); - match(ClickHouseParser::PRIMARY); - setState(722); - match(ClickHouseParser::KEY); - setState(723); - columnExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionaryArgExprContext ------------------------------------------------------------------ - -ClickHouseParser::DictionaryArgExprContext::DictionaryArgExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::DictionaryArgExprContext::identifier() { - return getRuleContexts(); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::DictionaryArgExprContext::identifier(size_t i) { - return getRuleContext(i); -} - -ClickHouseParser::LiteralContext* ClickHouseParser::DictionaryArgExprContext::literal() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DictionaryArgExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::DictionaryArgExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - - -size_t ClickHouseParser::DictionaryArgExprContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionaryArgExpr; -} - -antlrcpp::Any ClickHouseParser::DictionaryArgExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionaryArgExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionaryArgExprContext* ClickHouseParser::dictionaryArgExpr() { - DictionaryArgExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 30, ClickHouseParser::RuleDictionaryArgExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(725); - identifier(); - setState(732); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::AFTER: - case ClickHouseParser::ALIAS: - case ClickHouseParser::ALL: - case ClickHouseParser::ALTER: - case ClickHouseParser::AND: - case ClickHouseParser::ANTI: - case ClickHouseParser::ANY: - case ClickHouseParser::ARRAY: - case ClickHouseParser::AS: - case ClickHouseParser::ASCENDING: - case ClickHouseParser::ASOF: - case ClickHouseParser::AST: - case ClickHouseParser::ASYNC: - case ClickHouseParser::ATTACH: - case ClickHouseParser::BETWEEN: - case ClickHouseParser::BOTH: - case ClickHouseParser::BY: - case ClickHouseParser::CASE: - case ClickHouseParser::CAST: - case ClickHouseParser::CHECK: - case ClickHouseParser::CLEAR: - case ClickHouseParser::CLUSTER: - case ClickHouseParser::CODEC: - case ClickHouseParser::COLLATE: - case ClickHouseParser::COLUMN: - case ClickHouseParser::COMMENT: - case ClickHouseParser::CONSTRAINT: - case ClickHouseParser::CREATE: - case ClickHouseParser::CROSS: - case ClickHouseParser::CUBE: - case ClickHouseParser::DATABASE: - case ClickHouseParser::DATABASES: - case ClickHouseParser::DATE: - case ClickHouseParser::DAY: - case ClickHouseParser::DEDUPLICATE: - case ClickHouseParser::DEFAULT: - case ClickHouseParser::DELAY: - case ClickHouseParser::DELETE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCENDING: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DICTIONARIES: - case ClickHouseParser::DICTIONARY: - case ClickHouseParser::DISK: - case ClickHouseParser::DISTINCT: - case ClickHouseParser::DISTRIBUTED: - case ClickHouseParser::DROP: - case ClickHouseParser::ELSE: - case ClickHouseParser::END: - case ClickHouseParser::ENGINE: - case ClickHouseParser::EVENTS: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::EXPRESSION: - case ClickHouseParser::EXTRACT: - case ClickHouseParser::FETCHES: - case ClickHouseParser::FINAL: - case ClickHouseParser::FIRST: - case ClickHouseParser::FLUSH: - case ClickHouseParser::FOR: - case ClickHouseParser::FORMAT: - case ClickHouseParser::FREEZE: - case ClickHouseParser::FROM: - case ClickHouseParser::FULL: - case ClickHouseParser::FUNCTION: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::GRANULARITY: - case ClickHouseParser::GROUP: - case ClickHouseParser::HAVING: - case ClickHouseParser::HIERARCHICAL: - case ClickHouseParser::HOUR: - case ClickHouseParser::ID: - case ClickHouseParser::IF: - case ClickHouseParser::ILIKE: - case ClickHouseParser::IN: - case ClickHouseParser::INDEX: - case ClickHouseParser::INJECTIVE: - case ClickHouseParser::INNER: - case ClickHouseParser::INSERT: - case ClickHouseParser::INTERVAL: - case ClickHouseParser::INTO: - case ClickHouseParser::IS: - case ClickHouseParser::IS_OBJECT_ID: - case ClickHouseParser::JOIN: - case ClickHouseParser::KEY: - case ClickHouseParser::KILL: - case ClickHouseParser::LAST: - case ClickHouseParser::LAYOUT: - case ClickHouseParser::LEADING: - case ClickHouseParser::LEFT: - case ClickHouseParser::LIFETIME: - case ClickHouseParser::LIKE: - case ClickHouseParser::LIMIT: - case ClickHouseParser::LIVE: - case ClickHouseParser::LOCAL: - case ClickHouseParser::LOGS: - case ClickHouseParser::MATERIALIZE: - case ClickHouseParser::MATERIALIZED: - case ClickHouseParser::MAX: - case ClickHouseParser::MERGES: - case ClickHouseParser::MIN: - case ClickHouseParser::MINUTE: - case ClickHouseParser::MODIFY: - case ClickHouseParser::MONTH: - case ClickHouseParser::MOVE: - case ClickHouseParser::MUTATION: - case ClickHouseParser::NO: - case ClickHouseParser::NOT: - case ClickHouseParser::NULLS: - case ClickHouseParser::OFFSET: - case ClickHouseParser::ON: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::OR: - case ClickHouseParser::ORDER: - case ClickHouseParser::OUTER: - case ClickHouseParser::OUTFILE: - case ClickHouseParser::PARTITION: - case ClickHouseParser::POPULATE: - case ClickHouseParser::PREWHERE: - case ClickHouseParser::PRIMARY: - case ClickHouseParser::QUARTER: - case ClickHouseParser::RANGE: - case ClickHouseParser::RELOAD: - case ClickHouseParser::REMOVE: - case ClickHouseParser::RENAME: - case ClickHouseParser::REPLACE: - case ClickHouseParser::REPLICA: - case ClickHouseParser::REPLICATED: - case ClickHouseParser::RIGHT: - case ClickHouseParser::ROLLUP: - case ClickHouseParser::SAMPLE: - case ClickHouseParser::SECOND: - case ClickHouseParser::SELECT: - case ClickHouseParser::SEMI: - case ClickHouseParser::SENDS: - case ClickHouseParser::SET: - case ClickHouseParser::SETTINGS: - case ClickHouseParser::SHOW: - case ClickHouseParser::SOURCE: - case ClickHouseParser::START: - case ClickHouseParser::STOP: - case ClickHouseParser::SUBSTRING: - case ClickHouseParser::SYNC: - case ClickHouseParser::SYNTAX: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TABLE: - case ClickHouseParser::TABLES: - case ClickHouseParser::TEMPORARY: - case ClickHouseParser::TEST: - case ClickHouseParser::THEN: - case ClickHouseParser::TIES: - case ClickHouseParser::TIMEOUT: - case ClickHouseParser::TIMESTAMP: - case ClickHouseParser::TO: - case ClickHouseParser::TOP: - case ClickHouseParser::TOTALS: - case ClickHouseParser::TRAILING: - case ClickHouseParser::TRIM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::TTL: - case ClickHouseParser::TYPE: - case ClickHouseParser::UNION: - case ClickHouseParser::UPDATE: - case ClickHouseParser::USE: - case ClickHouseParser::USING: - case ClickHouseParser::UUID: - case ClickHouseParser::VALUES: - case ClickHouseParser::VIEW: - case ClickHouseParser::VOLUME: - case ClickHouseParser::WATCH: - case ClickHouseParser::WEEK: - case ClickHouseParser::WHEN: - case ClickHouseParser::WHERE: - case ClickHouseParser::WITH: - case ClickHouseParser::YEAR: - case ClickHouseParser::JSON_FALSE: - case ClickHouseParser::JSON_TRUE: - case ClickHouseParser::IDENTIFIER: { - setState(726); - identifier(); - setState(729); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::LPAREN) { - setState(727); - match(ClickHouseParser::LPAREN); - setState(728); - match(ClickHouseParser::RPAREN); - } - break; - } - - case ClickHouseParser::INF: - case ClickHouseParser::NAN_SQL: - case ClickHouseParser::NULL_SQL: - case ClickHouseParser::FLOATING_LITERAL: - case ClickHouseParser::OCTAL_LITERAL: - case ClickHouseParser::DECIMAL_LITERAL: - case ClickHouseParser::HEXADECIMAL_LITERAL: - case ClickHouseParser::STRING_LITERAL: - case ClickHouseParser::DASH: - case ClickHouseParser::DOT: - case ClickHouseParser::PLUS: { - setState(731); - literal(); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SourceClauseContext ------------------------------------------------------------------ - -ClickHouseParser::SourceClauseContext::SourceClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SourceClauseContext::SOURCE() { - return getToken(ClickHouseParser::SOURCE, 0); -} - -std::vector ClickHouseParser::SourceClauseContext::LPAREN() { - return getTokens(ClickHouseParser::LPAREN); -} - -tree::TerminalNode* ClickHouseParser::SourceClauseContext::LPAREN(size_t i) { - return getToken(ClickHouseParser::LPAREN, i); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::SourceClauseContext::identifier() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::SourceClauseContext::RPAREN() { - return getTokens(ClickHouseParser::RPAREN); -} - -tree::TerminalNode* ClickHouseParser::SourceClauseContext::RPAREN(size_t i) { - return getToken(ClickHouseParser::RPAREN, i); -} - -std::vector ClickHouseParser::SourceClauseContext::dictionaryArgExpr() { - return getRuleContexts(); -} - -ClickHouseParser::DictionaryArgExprContext* ClickHouseParser::SourceClauseContext::dictionaryArgExpr(size_t i) { - return getRuleContext(i); -} - - -size_t ClickHouseParser::SourceClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleSourceClause; -} - -antlrcpp::Any ClickHouseParser::SourceClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSourceClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SourceClauseContext* ClickHouseParser::sourceClause() { - SourceClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 32, ClickHouseParser::RuleSourceClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(734); - match(ClickHouseParser::SOURCE); - setState(735); - match(ClickHouseParser::LPAREN); - setState(736); - identifier(); - setState(737); - match(ClickHouseParser::LPAREN); - setState(741); - _errHandler->sync(this); - _la = _input->LA(1); - while ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)))) != 0)) { - setState(738); - dictionaryArgExpr(); - setState(743); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(744); - match(ClickHouseParser::RPAREN); - setState(745); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LifetimeClauseContext ------------------------------------------------------------------ - -ClickHouseParser::LifetimeClauseContext::LifetimeClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::LIFETIME() { - return getToken(ClickHouseParser::LIFETIME, 0); -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::LifetimeClauseContext::DECIMAL_LITERAL() { - return getTokens(ClickHouseParser::DECIMAL_LITERAL); -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::DECIMAL_LITERAL(size_t i) { - return getToken(ClickHouseParser::DECIMAL_LITERAL, i); -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::MIN() { - return getToken(ClickHouseParser::MIN, 0); -} - -tree::TerminalNode* ClickHouseParser::LifetimeClauseContext::MAX() { - return getToken(ClickHouseParser::MAX, 0); -} - - -size_t ClickHouseParser::LifetimeClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleLifetimeClause; -} - -antlrcpp::Any ClickHouseParser::LifetimeClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLifetimeClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LifetimeClauseContext* ClickHouseParser::lifetimeClause() { - LifetimeClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 34, ClickHouseParser::RuleLifetimeClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(747); - match(ClickHouseParser::LIFETIME); - setState(748); - match(ClickHouseParser::LPAREN); - setState(758); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::DECIMAL_LITERAL: { - setState(749); - match(ClickHouseParser::DECIMAL_LITERAL); - break; - } - - case ClickHouseParser::MIN: { - setState(750); - match(ClickHouseParser::MIN); - setState(751); - match(ClickHouseParser::DECIMAL_LITERAL); - setState(752); - match(ClickHouseParser::MAX); - setState(753); - match(ClickHouseParser::DECIMAL_LITERAL); - break; - } - - case ClickHouseParser::MAX: { - setState(754); - match(ClickHouseParser::MAX); - setState(755); - match(ClickHouseParser::DECIMAL_LITERAL); - setState(756); - match(ClickHouseParser::MIN); - setState(757); - match(ClickHouseParser::DECIMAL_LITERAL); - break; - } - - default: - throw NoViableAltException(this); - } - setState(760); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LayoutClauseContext ------------------------------------------------------------------ - -ClickHouseParser::LayoutClauseContext::LayoutClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::LayoutClauseContext::LAYOUT() { - return getToken(ClickHouseParser::LAYOUT, 0); -} - -std::vector ClickHouseParser::LayoutClauseContext::LPAREN() { - return getTokens(ClickHouseParser::LPAREN); -} - -tree::TerminalNode* ClickHouseParser::LayoutClauseContext::LPAREN(size_t i) { - return getToken(ClickHouseParser::LPAREN, i); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::LayoutClauseContext::identifier() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::LayoutClauseContext::RPAREN() { - return getTokens(ClickHouseParser::RPAREN); -} - -tree::TerminalNode* ClickHouseParser::LayoutClauseContext::RPAREN(size_t i) { - return getToken(ClickHouseParser::RPAREN, i); -} - -std::vector ClickHouseParser::LayoutClauseContext::dictionaryArgExpr() { - return getRuleContexts(); -} - -ClickHouseParser::DictionaryArgExprContext* ClickHouseParser::LayoutClauseContext::dictionaryArgExpr(size_t i) { - return getRuleContext(i); -} - - -size_t ClickHouseParser::LayoutClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleLayoutClause; -} - -antlrcpp::Any ClickHouseParser::LayoutClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLayoutClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LayoutClauseContext* ClickHouseParser::layoutClause() { - LayoutClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 36, ClickHouseParser::RuleLayoutClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(762); - match(ClickHouseParser::LAYOUT); - setState(763); - match(ClickHouseParser::LPAREN); - setState(764); - identifier(); - setState(765); - match(ClickHouseParser::LPAREN); - setState(769); - _errHandler->sync(this); - _la = _input->LA(1); - while ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)))) != 0)) { - setState(766); - dictionaryArgExpr(); - setState(771); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(772); - match(ClickHouseParser::RPAREN); - setState(773); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- RangeClauseContext ------------------------------------------------------------------ - -ClickHouseParser::RangeClauseContext::RangeClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::RangeClauseContext::RANGE() { - return getToken(ClickHouseParser::RANGE, 0); -} - -tree::TerminalNode* ClickHouseParser::RangeClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::RangeClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::RangeClauseContext::MIN() { - return getToken(ClickHouseParser::MIN, 0); -} - -std::vector ClickHouseParser::RangeClauseContext::identifier() { - return getRuleContexts(); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::RangeClauseContext::identifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::RangeClauseContext::MAX() { - return getToken(ClickHouseParser::MAX, 0); -} - - -size_t ClickHouseParser::RangeClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleRangeClause; -} - -antlrcpp::Any ClickHouseParser::RangeClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitRangeClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::RangeClauseContext* ClickHouseParser::rangeClause() { - RangeClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 38, ClickHouseParser::RuleRangeClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(775); - match(ClickHouseParser::RANGE); - setState(776); - match(ClickHouseParser::LPAREN); - setState(787); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::MIN: { - setState(777); - match(ClickHouseParser::MIN); - setState(778); - identifier(); - setState(779); - match(ClickHouseParser::MAX); - setState(780); - identifier(); - break; - } - - case ClickHouseParser::MAX: { - setState(782); - match(ClickHouseParser::MAX); - setState(783); - identifier(); - setState(784); - match(ClickHouseParser::MIN); - setState(785); - identifier(); - break; - } - - default: - throw NoViableAltException(this); - } - setState(789); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DictionarySettingsClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DictionarySettingsClauseContext::DictionarySettingsClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::DictionarySettingsClauseContext::SETTINGS() { - return getToken(ClickHouseParser::SETTINGS, 0); -} - -tree::TerminalNode* ClickHouseParser::DictionarySettingsClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::SettingExprListContext* ClickHouseParser::DictionarySettingsClauseContext::settingExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DictionarySettingsClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - - -size_t ClickHouseParser::DictionarySettingsClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDictionarySettingsClause; -} - -antlrcpp::Any ClickHouseParser::DictionarySettingsClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDictionarySettingsClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DictionarySettingsClauseContext* ClickHouseParser::dictionarySettingsClause() { - DictionarySettingsClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 40, ClickHouseParser::RuleDictionarySettingsClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(791); - match(ClickHouseParser::SETTINGS); - setState(792); - match(ClickHouseParser::LPAREN); - setState(793); - settingExprList(); - setState(794); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ClusterClauseContext ------------------------------------------------------------------ - -ClickHouseParser::ClusterClauseContext::ClusterClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ClusterClauseContext::ON() { - return getToken(ClickHouseParser::ON, 0); -} - -tree::TerminalNode* ClickHouseParser::ClusterClauseContext::CLUSTER() { - return getToken(ClickHouseParser::CLUSTER, 0); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::ClusterClauseContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ClusterClauseContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - - -size_t ClickHouseParser::ClusterClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleClusterClause; -} - -antlrcpp::Any ClickHouseParser::ClusterClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitClusterClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::clusterClause() { - ClusterClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 42, ClickHouseParser::RuleClusterClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(796); - match(ClickHouseParser::ON); - setState(797); - match(ClickHouseParser::CLUSTER); - setState(800); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::AFTER: - case ClickHouseParser::ALIAS: - case ClickHouseParser::ALL: - case ClickHouseParser::ALTER: - case ClickHouseParser::AND: - case ClickHouseParser::ANTI: - case ClickHouseParser::ANY: - case ClickHouseParser::ARRAY: - case ClickHouseParser::AS: - case ClickHouseParser::ASCENDING: - case ClickHouseParser::ASOF: - case ClickHouseParser::AST: - case ClickHouseParser::ASYNC: - case ClickHouseParser::ATTACH: - case ClickHouseParser::BETWEEN: - case ClickHouseParser::BOTH: - case ClickHouseParser::BY: - case ClickHouseParser::CASE: - case ClickHouseParser::CAST: - case ClickHouseParser::CHECK: - case ClickHouseParser::CLEAR: - case ClickHouseParser::CLUSTER: - case ClickHouseParser::CODEC: - case ClickHouseParser::COLLATE: - case ClickHouseParser::COLUMN: - case ClickHouseParser::COMMENT: - case ClickHouseParser::CONSTRAINT: - case ClickHouseParser::CREATE: - case ClickHouseParser::CROSS: - case ClickHouseParser::CUBE: - case ClickHouseParser::DATABASE: - case ClickHouseParser::DATABASES: - case ClickHouseParser::DATE: - case ClickHouseParser::DAY: - case ClickHouseParser::DEDUPLICATE: - case ClickHouseParser::DEFAULT: - case ClickHouseParser::DELAY: - case ClickHouseParser::DELETE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCENDING: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DICTIONARIES: - case ClickHouseParser::DICTIONARY: - case ClickHouseParser::DISK: - case ClickHouseParser::DISTINCT: - case ClickHouseParser::DISTRIBUTED: - case ClickHouseParser::DROP: - case ClickHouseParser::ELSE: - case ClickHouseParser::END: - case ClickHouseParser::ENGINE: - case ClickHouseParser::EVENTS: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::EXPRESSION: - case ClickHouseParser::EXTRACT: - case ClickHouseParser::FETCHES: - case ClickHouseParser::FINAL: - case ClickHouseParser::FIRST: - case ClickHouseParser::FLUSH: - case ClickHouseParser::FOR: - case ClickHouseParser::FORMAT: - case ClickHouseParser::FREEZE: - case ClickHouseParser::FROM: - case ClickHouseParser::FULL: - case ClickHouseParser::FUNCTION: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::GRANULARITY: - case ClickHouseParser::GROUP: - case ClickHouseParser::HAVING: - case ClickHouseParser::HIERARCHICAL: - case ClickHouseParser::HOUR: - case ClickHouseParser::ID: - case ClickHouseParser::IF: - case ClickHouseParser::ILIKE: - case ClickHouseParser::IN: - case ClickHouseParser::INDEX: - case ClickHouseParser::INJECTIVE: - case ClickHouseParser::INNER: - case ClickHouseParser::INSERT: - case ClickHouseParser::INTERVAL: - case ClickHouseParser::INTO: - case ClickHouseParser::IS: - case ClickHouseParser::IS_OBJECT_ID: - case ClickHouseParser::JOIN: - case ClickHouseParser::KEY: - case ClickHouseParser::KILL: - case ClickHouseParser::LAST: - case ClickHouseParser::LAYOUT: - case ClickHouseParser::LEADING: - case ClickHouseParser::LEFT: - case ClickHouseParser::LIFETIME: - case ClickHouseParser::LIKE: - case ClickHouseParser::LIMIT: - case ClickHouseParser::LIVE: - case ClickHouseParser::LOCAL: - case ClickHouseParser::LOGS: - case ClickHouseParser::MATERIALIZE: - case ClickHouseParser::MATERIALIZED: - case ClickHouseParser::MAX: - case ClickHouseParser::MERGES: - case ClickHouseParser::MIN: - case ClickHouseParser::MINUTE: - case ClickHouseParser::MODIFY: - case ClickHouseParser::MONTH: - case ClickHouseParser::MOVE: - case ClickHouseParser::MUTATION: - case ClickHouseParser::NO: - case ClickHouseParser::NOT: - case ClickHouseParser::NULLS: - case ClickHouseParser::OFFSET: - case ClickHouseParser::ON: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::OR: - case ClickHouseParser::ORDER: - case ClickHouseParser::OUTER: - case ClickHouseParser::OUTFILE: - case ClickHouseParser::PARTITION: - case ClickHouseParser::POPULATE: - case ClickHouseParser::PREWHERE: - case ClickHouseParser::PRIMARY: - case ClickHouseParser::QUARTER: - case ClickHouseParser::RANGE: - case ClickHouseParser::RELOAD: - case ClickHouseParser::REMOVE: - case ClickHouseParser::RENAME: - case ClickHouseParser::REPLACE: - case ClickHouseParser::REPLICA: - case ClickHouseParser::REPLICATED: - case ClickHouseParser::RIGHT: - case ClickHouseParser::ROLLUP: - case ClickHouseParser::SAMPLE: - case ClickHouseParser::SECOND: - case ClickHouseParser::SELECT: - case ClickHouseParser::SEMI: - case ClickHouseParser::SENDS: - case ClickHouseParser::SET: - case ClickHouseParser::SETTINGS: - case ClickHouseParser::SHOW: - case ClickHouseParser::SOURCE: - case ClickHouseParser::START: - case ClickHouseParser::STOP: - case ClickHouseParser::SUBSTRING: - case ClickHouseParser::SYNC: - case ClickHouseParser::SYNTAX: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TABLE: - case ClickHouseParser::TABLES: - case ClickHouseParser::TEMPORARY: - case ClickHouseParser::TEST: - case ClickHouseParser::THEN: - case ClickHouseParser::TIES: - case ClickHouseParser::TIMEOUT: - case ClickHouseParser::TIMESTAMP: - case ClickHouseParser::TO: - case ClickHouseParser::TOP: - case ClickHouseParser::TOTALS: - case ClickHouseParser::TRAILING: - case ClickHouseParser::TRIM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::TTL: - case ClickHouseParser::TYPE: - case ClickHouseParser::UNION: - case ClickHouseParser::UPDATE: - case ClickHouseParser::USE: - case ClickHouseParser::USING: - case ClickHouseParser::UUID: - case ClickHouseParser::VALUES: - case ClickHouseParser::VIEW: - case ClickHouseParser::VOLUME: - case ClickHouseParser::WATCH: - case ClickHouseParser::WEEK: - case ClickHouseParser::WHEN: - case ClickHouseParser::WHERE: - case ClickHouseParser::WITH: - case ClickHouseParser::YEAR: - case ClickHouseParser::JSON_FALSE: - case ClickHouseParser::JSON_TRUE: - case ClickHouseParser::IDENTIFIER: { - setState(798); - identifier(); - break; - } - - case ClickHouseParser::STRING_LITERAL: { - setState(799); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- UuidClauseContext ------------------------------------------------------------------ - -ClickHouseParser::UuidClauseContext::UuidClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::UuidClauseContext::UUID() { - return getToken(ClickHouseParser::UUID, 0); -} - -tree::TerminalNode* ClickHouseParser::UuidClauseContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - - -size_t ClickHouseParser::UuidClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleUuidClause; -} - -antlrcpp::Any ClickHouseParser::UuidClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitUuidClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::UuidClauseContext* ClickHouseParser::uuidClause() { - UuidClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 44, ClickHouseParser::RuleUuidClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(802); - match(ClickHouseParser::UUID); - setState(803); - match(ClickHouseParser::STRING_LITERAL); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DestinationClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DestinationClauseContext::DestinationClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::DestinationClauseContext::TO() { - return getToken(ClickHouseParser::TO, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::DestinationClauseContext::tableIdentifier() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::DestinationClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDestinationClause; -} - -antlrcpp::Any ClickHouseParser::DestinationClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDestinationClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DestinationClauseContext* ClickHouseParser::destinationClause() { - DestinationClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 46, ClickHouseParser::RuleDestinationClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(805); - match(ClickHouseParser::TO); - setState(806); - tableIdentifier(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SubqueryClauseContext ------------------------------------------------------------------ - -ClickHouseParser::SubqueryClauseContext::SubqueryClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SubqueryClauseContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::SubqueryClauseContext::selectUnionStmt() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::SubqueryClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleSubqueryClause; -} - -antlrcpp::Any ClickHouseParser::SubqueryClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSubqueryClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SubqueryClauseContext* ClickHouseParser::subqueryClause() { - SubqueryClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 48, ClickHouseParser::RuleSubqueryClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(808); - match(ClickHouseParser::AS); - setState(809); - selectUnionStmt(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableSchemaClauseContext ------------------------------------------------------------------ - -ClickHouseParser::TableSchemaClauseContext::TableSchemaClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::TableSchemaClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleTableSchemaClause; -} - -void ClickHouseParser::TableSchemaClauseContext::copyFrom(TableSchemaClauseContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- SchemaAsTableClauseContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::SchemaAsTableClauseContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::SchemaAsTableClauseContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::SchemaAsTableClauseContext::SchemaAsTableClauseContext(TableSchemaClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::SchemaAsTableClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSchemaAsTableClause(this); - else - return visitor->visitChildren(this); -} -//----------------- SchemaAsFunctionClauseContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::SchemaAsFunctionClauseContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::TableFunctionExprContext* ClickHouseParser::SchemaAsFunctionClauseContext::tableFunctionExpr() { - return getRuleContext(0); -} - -ClickHouseParser::SchemaAsFunctionClauseContext::SchemaAsFunctionClauseContext(TableSchemaClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::SchemaAsFunctionClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSchemaAsFunctionClause(this); - else - return visitor->visitChildren(this); -} -//----------------- SchemaDescriptionClauseContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::SchemaDescriptionClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::SchemaDescriptionClauseContext::tableElementExpr() { - return getRuleContexts(); -} - -ClickHouseParser::TableElementExprContext* ClickHouseParser::SchemaDescriptionClauseContext::tableElementExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::SchemaDescriptionClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::SchemaDescriptionClauseContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::SchemaDescriptionClauseContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::SchemaDescriptionClauseContext::SchemaDescriptionClauseContext(TableSchemaClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::SchemaDescriptionClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSchemaDescriptionClause(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::TableSchemaClauseContext* ClickHouseParser::tableSchemaClause() { - TableSchemaClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 50, ClickHouseParser::RuleTableSchemaClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(826); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 86, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(811); - match(ClickHouseParser::LPAREN); - setState(812); - tableElementExpr(); - setState(817); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(813); - match(ClickHouseParser::COMMA); - setState(814); - tableElementExpr(); - setState(819); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(820); - match(ClickHouseParser::RPAREN); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(822); - match(ClickHouseParser::AS); - setState(823); - tableIdentifier(); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(824); - match(ClickHouseParser::AS); - setState(825); - tableFunctionExpr(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- EngineClauseContext ------------------------------------------------------------------ - -ClickHouseParser::EngineClauseContext::EngineClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::EngineExprContext* ClickHouseParser::EngineClauseContext::engineExpr() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::EngineClauseContext::orderByClause() { - return getRuleContexts(); -} - -ClickHouseParser::OrderByClauseContext* ClickHouseParser::EngineClauseContext::orderByClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::EngineClauseContext::partitionByClause() { - return getRuleContexts(); -} - -ClickHouseParser::PartitionByClauseContext* ClickHouseParser::EngineClauseContext::partitionByClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::EngineClauseContext::primaryKeyClause() { - return getRuleContexts(); -} - -ClickHouseParser::PrimaryKeyClauseContext* ClickHouseParser::EngineClauseContext::primaryKeyClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::EngineClauseContext::sampleByClause() { - return getRuleContexts(); -} - -ClickHouseParser::SampleByClauseContext* ClickHouseParser::EngineClauseContext::sampleByClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::EngineClauseContext::ttlClause() { - return getRuleContexts(); -} - -ClickHouseParser::TtlClauseContext* ClickHouseParser::EngineClauseContext::ttlClause(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::EngineClauseContext::settingsClause() { - return getRuleContexts(); -} - -ClickHouseParser::SettingsClauseContext* ClickHouseParser::EngineClauseContext::settingsClause(size_t i) { - return getRuleContext(i); -} - - -size_t ClickHouseParser::EngineClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleEngineClause; -} - -antlrcpp::Any ClickHouseParser::EngineClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitEngineClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::EngineClauseContext* ClickHouseParser::engineClause() { - EngineClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 52, ClickHouseParser::RuleEngineClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(828); - engineExpr(); - setState(855); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 88, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(853); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 87, _ctx)) { - case 1: { - setState(829); - - if (!(!_localctx->clauses.count("orderByClause"))) throw FailedPredicateException(this, "!$clauses.count(\"orderByClause\")"); - setState(830); - orderByClause(); - _localctx->clauses.insert("orderByClause"); - break; - } - - case 2: { - setState(833); - - if (!(!_localctx->clauses.count("partitionByClause"))) throw FailedPredicateException(this, "!$clauses.count(\"partitionByClause\")"); - setState(834); - partitionByClause(); - _localctx->clauses.insert("partitionByClause"); - break; - } - - case 3: { - setState(837); - - if (!(!_localctx->clauses.count("primaryKeyClause"))) throw FailedPredicateException(this, "!$clauses.count(\"primaryKeyClause\")"); - setState(838); - primaryKeyClause(); - _localctx->clauses.insert("primaryKeyClause"); - break; - } - - case 4: { - setState(841); - - if (!(!_localctx->clauses.count("sampleByClause"))) throw FailedPredicateException(this, "!$clauses.count(\"sampleByClause\")"); - setState(842); - sampleByClause(); - _localctx->clauses.insert("sampleByClause"); - break; - } - - case 5: { - setState(845); - - if (!(!_localctx->clauses.count("ttlClause"))) throw FailedPredicateException(this, "!$clauses.count(\"ttlClause\")"); - setState(846); - ttlClause(); - _localctx->clauses.insert("ttlClause"); - break; - } - - case 6: { - setState(849); - - if (!(!_localctx->clauses.count("settingsClause"))) throw FailedPredicateException(this, "!$clauses.count(\"settingsClause\")"); - setState(850); - settingsClause(); - _localctx->clauses.insert("settingsClause"); - break; - } - - } - } - setState(857); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 88, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- PartitionByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::PartitionByClauseContext::PartitionByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::PartitionByClauseContext::PARTITION() { - return getToken(ClickHouseParser::PARTITION, 0); -} - -tree::TerminalNode* ClickHouseParser::PartitionByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::PartitionByClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::PartitionByClauseContext::getRuleIndex() const { - return ClickHouseParser::RulePartitionByClause; -} - -antlrcpp::Any ClickHouseParser::PartitionByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitPartitionByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::PartitionByClauseContext* ClickHouseParser::partitionByClause() { - PartitionByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 54, ClickHouseParser::RulePartitionByClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(858); - match(ClickHouseParser::PARTITION); - setState(859); - match(ClickHouseParser::BY); - setState(860); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- PrimaryKeyClauseContext ------------------------------------------------------------------ - -ClickHouseParser::PrimaryKeyClauseContext::PrimaryKeyClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::PrimaryKeyClauseContext::PRIMARY() { - return getToken(ClickHouseParser::PRIMARY, 0); -} - -tree::TerminalNode* ClickHouseParser::PrimaryKeyClauseContext::KEY() { - return getToken(ClickHouseParser::KEY, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::PrimaryKeyClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::PrimaryKeyClauseContext::getRuleIndex() const { - return ClickHouseParser::RulePrimaryKeyClause; -} - -antlrcpp::Any ClickHouseParser::PrimaryKeyClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitPrimaryKeyClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::PrimaryKeyClauseContext* ClickHouseParser::primaryKeyClause() { - PrimaryKeyClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 56, ClickHouseParser::RulePrimaryKeyClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(862); - match(ClickHouseParser::PRIMARY); - setState(863); - match(ClickHouseParser::KEY); - setState(864); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SampleByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::SampleByClauseContext::SampleByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SampleByClauseContext::SAMPLE() { - return getToken(ClickHouseParser::SAMPLE, 0); -} - -tree::TerminalNode* ClickHouseParser::SampleByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::SampleByClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::SampleByClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleSampleByClause; -} - -antlrcpp::Any ClickHouseParser::SampleByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSampleByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SampleByClauseContext* ClickHouseParser::sampleByClause() { - SampleByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 58, ClickHouseParser::RuleSampleByClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(866); - match(ClickHouseParser::SAMPLE); - setState(867); - match(ClickHouseParser::BY); - setState(868); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TtlClauseContext ------------------------------------------------------------------ - -ClickHouseParser::TtlClauseContext::TtlClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::TtlClauseContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - -std::vector ClickHouseParser::TtlClauseContext::ttlExpr() { - return getRuleContexts(); -} - -ClickHouseParser::TtlExprContext* ClickHouseParser::TtlClauseContext::ttlExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::TtlClauseContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::TtlClauseContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::TtlClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleTtlClause; -} - -antlrcpp::Any ClickHouseParser::TtlClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTtlClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TtlClauseContext* ClickHouseParser::ttlClause() { - TtlClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 60, ClickHouseParser::RuleTtlClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(870); - match(ClickHouseParser::TTL); - setState(871); - ttlExpr(); - setState(876); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 89, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(872); - match(ClickHouseParser::COMMA); - setState(873); - ttlExpr(); - } - setState(878); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 89, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- EngineExprContext ------------------------------------------------------------------ - -ClickHouseParser::EngineExprContext::EngineExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::EngineExprContext::ENGINE() { - return getToken(ClickHouseParser::ENGINE, 0); -} - -ClickHouseParser::IdentifierOrNullContext* ClickHouseParser::EngineExprContext::identifierOrNull() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::EngineExprContext::EQ_SINGLE() { - return getToken(ClickHouseParser::EQ_SINGLE, 0); -} - -tree::TerminalNode* ClickHouseParser::EngineExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::EngineExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::EngineExprContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::EngineExprContext::getRuleIndex() const { - return ClickHouseParser::RuleEngineExpr; -} - -antlrcpp::Any ClickHouseParser::EngineExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitEngineExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::EngineExprContext* ClickHouseParser::engineExpr() { - EngineExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 62, ClickHouseParser::RuleEngineExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(879); - match(ClickHouseParser::ENGINE); - setState(881); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::EQ_SINGLE) { - setState(880); - match(ClickHouseParser::EQ_SINGLE); - } - setState(883); - identifierOrNull(); - setState(889); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 92, _ctx)) { - case 1: { - setState(884); - match(ClickHouseParser::LPAREN); - setState(886); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(885); - columnExprList(); - } - setState(888); - match(ClickHouseParser::RPAREN); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableElementExprContext ------------------------------------------------------------------ - -ClickHouseParser::TableElementExprContext::TableElementExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::TableElementExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTableElementExpr; -} - -void ClickHouseParser::TableElementExprContext::copyFrom(TableElementExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- TableElementExprProjectionContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::TableElementExprProjectionContext::PROJECTION() { - return getToken(ClickHouseParser::PROJECTION, 0); -} - -ClickHouseParser::TableProjectionDfntContext* ClickHouseParser::TableElementExprProjectionContext::tableProjectionDfnt() { - return getRuleContext(0); -} - -ClickHouseParser::TableElementExprProjectionContext::TableElementExprProjectionContext(TableElementExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableElementExprProjectionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableElementExprProjection(this); - else - return visitor->visitChildren(this); -} -//----------------- TableElementExprConstraintContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::TableElementExprConstraintContext::CONSTRAINT() { - return getToken(ClickHouseParser::CONSTRAINT, 0); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::TableElementExprConstraintContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableElementExprConstraintContext::CHECK() { - return getToken(ClickHouseParser::CHECK, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::TableElementExprConstraintContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::TableElementExprConstraintContext::TableElementExprConstraintContext(TableElementExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableElementExprConstraintContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableElementExprConstraint(this); - else - return visitor->visitChildren(this); -} -//----------------- TableElementExprColumnContext ------------------------------------------------------------------ - -ClickHouseParser::TableColumnDfntContext* ClickHouseParser::TableElementExprColumnContext::tableColumnDfnt() { - return getRuleContext(0); -} - -ClickHouseParser::TableElementExprColumnContext::TableElementExprColumnContext(TableElementExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableElementExprColumnContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableElementExprColumn(this); - else - return visitor->visitChildren(this); -} -//----------------- TableElementExprIndexContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::TableElementExprIndexContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -ClickHouseParser::TableIndexDfntContext* ClickHouseParser::TableElementExprIndexContext::tableIndexDfnt() { - return getRuleContext(0); -} - -ClickHouseParser::TableElementExprIndexContext::TableElementExprIndexContext(TableElementExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableElementExprIndexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableElementExprIndex(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::TableElementExprContext* ClickHouseParser::tableElementExpr() { - TableElementExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 64, ClickHouseParser::RuleTableElementExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(901); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 93, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(891); - tableColumnDfnt(); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(892); - match(ClickHouseParser::CONSTRAINT); - setState(893); - identifier(); - setState(894); - match(ClickHouseParser::CHECK); - setState(895); - columnExpr(0); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(897); - match(ClickHouseParser::INDEX); - setState(898); - tableIndexDfnt(); - break; - } - - case 4: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 4); - setState(899); - match(ClickHouseParser::PROJECTION); - setState(900); - tableProjectionDfnt(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableColumnDfntContext ------------------------------------------------------------------ - -ClickHouseParser::TableColumnDfntContext::TableColumnDfntContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::TableColumnDfntContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::TableColumnDfntContext::columnTypeExpr() { - return getRuleContext(0); -} - -ClickHouseParser::TableColumnPropertyExprContext* ClickHouseParser::TableColumnDfntContext::tableColumnPropertyExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnDfntContext::COMMENT() { - return getToken(ClickHouseParser::COMMENT, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnDfntContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -ClickHouseParser::CodecExprContext* ClickHouseParser::TableColumnDfntContext::codecExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnDfntContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::TableColumnDfntContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::TableColumnDfntContext::getRuleIndex() const { - return ClickHouseParser::RuleTableColumnDfnt; -} - -antlrcpp::Any ClickHouseParser::TableColumnDfntContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableColumnDfnt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableColumnDfntContext* ClickHouseParser::tableColumnDfnt() { - TableColumnDfntContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 66, ClickHouseParser::RuleTableColumnDfnt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(935); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 102, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(903); - nestedIdentifier(); - setState(904); - columnTypeExpr(); - setState(906); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ALIAS - - || _la == ClickHouseParser::DEFAULT || _la == ClickHouseParser::MATERIALIZED) { - setState(905); - tableColumnPropertyExpr(); - } - setState(910); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::COMMENT) { - setState(908); - match(ClickHouseParser::COMMENT); - setState(909); - match(ClickHouseParser::STRING_LITERAL); - } - setState(913); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::CODEC) { - setState(912); - codecExpr(); - } - setState(917); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TTL) { - setState(915); - match(ClickHouseParser::TTL); - setState(916); - columnExpr(0); - } - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(919); - nestedIdentifier(); - setState(921); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 98, _ctx)) { - case 1: { - setState(920); - columnTypeExpr(); - break; - } - - } - setState(923); - tableColumnPropertyExpr(); - setState(926); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::COMMENT) { - setState(924); - match(ClickHouseParser::COMMENT); - setState(925); - match(ClickHouseParser::STRING_LITERAL); - } - setState(929); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::CODEC) { - setState(928); - codecExpr(); - } - setState(933); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TTL) { - setState(931); - match(ClickHouseParser::TTL); - setState(932); - columnExpr(0); - } - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableColumnPropertyExprContext ------------------------------------------------------------------ - -ClickHouseParser::TableColumnPropertyExprContext::TableColumnPropertyExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::TableColumnPropertyExprContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyExprContext::DEFAULT() { - return getToken(ClickHouseParser::DEFAULT, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyExprContext::MATERIALIZED() { - return getToken(ClickHouseParser::MATERIALIZED, 0); -} - -tree::TerminalNode* ClickHouseParser::TableColumnPropertyExprContext::ALIAS() { - return getToken(ClickHouseParser::ALIAS, 0); -} - - -size_t ClickHouseParser::TableColumnPropertyExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTableColumnPropertyExpr; -} - -antlrcpp::Any ClickHouseParser::TableColumnPropertyExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableColumnPropertyExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableColumnPropertyExprContext* ClickHouseParser::tableColumnPropertyExpr() { - TableColumnPropertyExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 68, ClickHouseParser::RuleTableColumnPropertyExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(937); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ALIAS - - || _la == ClickHouseParser::DEFAULT || _la == ClickHouseParser::MATERIALIZED)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(938); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableIndexDfntContext ------------------------------------------------------------------ - -ClickHouseParser::TableIndexDfntContext::TableIndexDfntContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::TableIndexDfntContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::TableIndexDfntContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableIndexDfntContext::TYPE() { - return getToken(ClickHouseParser::TYPE, 0); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::TableIndexDfntContext::columnTypeExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableIndexDfntContext::GRANULARITY() { - return getToken(ClickHouseParser::GRANULARITY, 0); -} - -tree::TerminalNode* ClickHouseParser::TableIndexDfntContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - - -size_t ClickHouseParser::TableIndexDfntContext::getRuleIndex() const { - return ClickHouseParser::RuleTableIndexDfnt; -} - -antlrcpp::Any ClickHouseParser::TableIndexDfntContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableIndexDfnt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableIndexDfntContext* ClickHouseParser::tableIndexDfnt() { - TableIndexDfntContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 70, ClickHouseParser::RuleTableIndexDfnt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(940); - nestedIdentifier(); - setState(941); - columnExpr(0); - setState(942); - match(ClickHouseParser::TYPE); - setState(943); - columnTypeExpr(); - setState(944); - match(ClickHouseParser::GRANULARITY); - setState(945); - match(ClickHouseParser::DECIMAL_LITERAL); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableProjectionDfntContext ------------------------------------------------------------------ - -ClickHouseParser::TableProjectionDfntContext::TableProjectionDfntContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::TableProjectionDfntContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ProjectionSelectStmtContext* ClickHouseParser::TableProjectionDfntContext::projectionSelectStmt() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::TableProjectionDfntContext::getRuleIndex() const { - return ClickHouseParser::RuleTableProjectionDfnt; -} - -antlrcpp::Any ClickHouseParser::TableProjectionDfntContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableProjectionDfnt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableProjectionDfntContext* ClickHouseParser::tableProjectionDfnt() { - TableProjectionDfntContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 72, ClickHouseParser::RuleTableProjectionDfnt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(947); - nestedIdentifier(); - setState(948); - projectionSelectStmt(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- CodecExprContext ------------------------------------------------------------------ - -ClickHouseParser::CodecExprContext::CodecExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::CodecExprContext::CODEC() { - return getToken(ClickHouseParser::CODEC, 0); -} - -tree::TerminalNode* ClickHouseParser::CodecExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::CodecExprContext::codecArgExpr() { - return getRuleContexts(); -} - -ClickHouseParser::CodecArgExprContext* ClickHouseParser::CodecExprContext::codecArgExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::CodecExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::CodecExprContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::CodecExprContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::CodecExprContext::getRuleIndex() const { - return ClickHouseParser::RuleCodecExpr; -} - -antlrcpp::Any ClickHouseParser::CodecExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCodecExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::CodecExprContext* ClickHouseParser::codecExpr() { - CodecExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 74, ClickHouseParser::RuleCodecExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(950); - match(ClickHouseParser::CODEC); - setState(951); - match(ClickHouseParser::LPAREN); - setState(952); - codecArgExpr(); - setState(957); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(953); - match(ClickHouseParser::COMMA); - setState(954); - codecArgExpr(); - setState(959); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(960); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- CodecArgExprContext ------------------------------------------------------------------ - -ClickHouseParser::CodecArgExprContext::CodecArgExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::CodecArgExprContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::CodecArgExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::CodecArgExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::CodecArgExprContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::CodecArgExprContext::getRuleIndex() const { - return ClickHouseParser::RuleCodecArgExpr; -} - -antlrcpp::Any ClickHouseParser::CodecArgExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitCodecArgExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::CodecArgExprContext* ClickHouseParser::codecArgExpr() { - CodecArgExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 76, ClickHouseParser::RuleCodecArgExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(962); - identifier(); - setState(968); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::LPAREN) { - setState(963); - match(ClickHouseParser::LPAREN); - setState(965); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(964); - columnExprList(); - } - setState(967); - match(ClickHouseParser::RPAREN); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TtlExprContext ------------------------------------------------------------------ - -ClickHouseParser::TtlExprContext::TtlExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::TtlExprContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TtlExprContext::DELETE() { - return getToken(ClickHouseParser::DELETE, 0); -} - -tree::TerminalNode* ClickHouseParser::TtlExprContext::TO() { - return getToken(ClickHouseParser::TO, 0); -} - -tree::TerminalNode* ClickHouseParser::TtlExprContext::DISK() { - return getToken(ClickHouseParser::DISK, 0); -} - -tree::TerminalNode* ClickHouseParser::TtlExprContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::TtlExprContext::VOLUME() { - return getToken(ClickHouseParser::VOLUME, 0); -} - - -size_t ClickHouseParser::TtlExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTtlExpr; -} - -antlrcpp::Any ClickHouseParser::TtlExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTtlExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TtlExprContext* ClickHouseParser::ttlExpr() { - TtlExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 78, ClickHouseParser::RuleTtlExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(970); - columnExpr(0); - setState(978); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 106, _ctx)) { - case 1: { - setState(971); - match(ClickHouseParser::DELETE); - break; - } - - case 2: { - setState(972); - match(ClickHouseParser::TO); - setState(973); - match(ClickHouseParser::DISK); - setState(974); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 3: { - setState(975); - match(ClickHouseParser::TO); - setState(976); - match(ClickHouseParser::VOLUME); - setState(977); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DescribeStmtContext ------------------------------------------------------------------ - -ClickHouseParser::DescribeStmtContext::DescribeStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::TableExprContext* ClickHouseParser::DescribeStmtContext::tableExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DescribeStmtContext::DESCRIBE() { - return getToken(ClickHouseParser::DESCRIBE, 0); -} - -tree::TerminalNode* ClickHouseParser::DescribeStmtContext::DESC() { - return getToken(ClickHouseParser::DESC, 0); -} - -tree::TerminalNode* ClickHouseParser::DescribeStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - - -size_t ClickHouseParser::DescribeStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleDescribeStmt; -} - -antlrcpp::Any ClickHouseParser::DescribeStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDescribeStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DescribeStmtContext* ClickHouseParser::describeStmt() { - DescribeStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 80, ClickHouseParser::RuleDescribeStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(980); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::DESC - - || _la == ClickHouseParser::DESCRIBE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(982); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 107, _ctx)) { - case 1: { - setState(981); - match(ClickHouseParser::TABLE); - break; - } - - } - setState(984); - tableExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DropStmtContext ------------------------------------------------------------------ - -ClickHouseParser::DropStmtContext::DropStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::DropStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleDropStmt; -} - -void ClickHouseParser::DropStmtContext::copyFrom(DropStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- DropDatabaseStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::DropDatabaseStmtContext::DATABASE() { - return getToken(ClickHouseParser::DATABASE, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::DropDatabaseStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DropDatabaseStmtContext::DETACH() { - return getToken(ClickHouseParser::DETACH, 0); -} - -tree::TerminalNode* ClickHouseParser::DropDatabaseStmtContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::DropDatabaseStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::DropDatabaseStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::DropDatabaseStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::DropDatabaseStmtContext::DropDatabaseStmtContext(DropStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::DropDatabaseStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDropDatabaseStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- DropTableStmtContext ------------------------------------------------------------------ - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::DropTableStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::DETACH() { - return getToken(ClickHouseParser::DETACH, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::DropTableStmtContext::clusterClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::NO() { - return getToken(ClickHouseParser::NO, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::DELAY() { - return getToken(ClickHouseParser::DELAY, 0); -} - -tree::TerminalNode* ClickHouseParser::DropTableStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -ClickHouseParser::DropTableStmtContext::DropTableStmtContext(DropStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::DropTableStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDropTableStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::DropStmtContext* ClickHouseParser::dropStmt() { - DropStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 82, ClickHouseParser::RuleDropStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1017); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 115, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(986); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::DETACH - - || _la == ClickHouseParser::DROP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(987); - match(ClickHouseParser::DATABASE); - setState(990); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 108, _ctx)) { - case 1: { - setState(988); - match(ClickHouseParser::IF); - setState(989); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(992); - databaseIdentifier(); - setState(994); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(993); - clusterClause(); - } - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(996); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::DETACH - - || _la == ClickHouseParser::DROP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1003); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::DICTIONARY: { - setState(997); - match(ClickHouseParser::DICTIONARY); - break; - } - - case ClickHouseParser::TABLE: - case ClickHouseParser::TEMPORARY: { - setState(999); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TEMPORARY) { - setState(998); - match(ClickHouseParser::TEMPORARY); - } - setState(1001); - match(ClickHouseParser::TABLE); - break; - } - - case ClickHouseParser::VIEW: { - setState(1002); - match(ClickHouseParser::VIEW); - break; - } - - default: - throw NoViableAltException(this); - } - setState(1007); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 112, _ctx)) { - case 1: { - setState(1005); - match(ClickHouseParser::IF); - setState(1006); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(1009); - tableIdentifier(); - setState(1011); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(1010); - clusterClause(); - } - setState(1015); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::NO) { - setState(1013); - match(ClickHouseParser::NO); - setState(1014); - match(ClickHouseParser::DELAY); - } - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ExistsStmtContext ------------------------------------------------------------------ - -ClickHouseParser::ExistsStmtContext::ExistsStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ExistsStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleExistsStmt; -} - -void ClickHouseParser::ExistsStmtContext::copyFrom(ExistsStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ExistsTableStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ExistsTableStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ExistsTableStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ExistsTableStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -tree::TerminalNode* ClickHouseParser::ExistsTableStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -tree::TerminalNode* ClickHouseParser::ExistsTableStmtContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -tree::TerminalNode* ClickHouseParser::ExistsTableStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -ClickHouseParser::ExistsTableStmtContext::ExistsTableStmtContext(ExistsStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ExistsTableStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitExistsTableStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ExistsDatabaseStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ExistsDatabaseStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::ExistsDatabaseStmtContext::DATABASE() { - return getToken(ClickHouseParser::DATABASE, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::ExistsDatabaseStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ExistsDatabaseStmtContext::ExistsDatabaseStmtContext(ExistsStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ExistsDatabaseStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitExistsDatabaseStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::ExistsStmtContext* ClickHouseParser::existsStmt() { - ExistsStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 84, ClickHouseParser::RuleExistsStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1032); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 118, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1019); - match(ClickHouseParser::EXISTS); - setState(1020); - match(ClickHouseParser::DATABASE); - setState(1021); - databaseIdentifier(); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1022); - match(ClickHouseParser::EXISTS); - setState(1029); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 117, _ctx)) { - case 1: { - setState(1023); - match(ClickHouseParser::DICTIONARY); - break; - } - - case 2: { - setState(1025); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TEMPORARY) { - setState(1024); - match(ClickHouseParser::TEMPORARY); - } - setState(1027); - match(ClickHouseParser::TABLE); - break; - } - - case 3: { - setState(1028); - match(ClickHouseParser::VIEW); - break; - } - - } - setState(1031); - tableIdentifier(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ExplainStmtContext ------------------------------------------------------------------ - -ClickHouseParser::ExplainStmtContext::ExplainStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ExplainStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleExplainStmt; -} - -void ClickHouseParser::ExplainStmtContext::copyFrom(ExplainStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ExplainSyntaxStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ExplainSyntaxStmtContext::EXPLAIN() { - return getToken(ClickHouseParser::EXPLAIN, 0); -} - -tree::TerminalNode* ClickHouseParser::ExplainSyntaxStmtContext::SYNTAX() { - return getToken(ClickHouseParser::SYNTAX, 0); -} - -ClickHouseParser::QueryContext* ClickHouseParser::ExplainSyntaxStmtContext::query() { - return getRuleContext(0); -} - -ClickHouseParser::ExplainSyntaxStmtContext::ExplainSyntaxStmtContext(ExplainStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ExplainSyntaxStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitExplainSyntaxStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ExplainASTStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ExplainASTStmtContext::EXPLAIN() { - return getToken(ClickHouseParser::EXPLAIN, 0); -} - -tree::TerminalNode* ClickHouseParser::ExplainASTStmtContext::AST() { - return getToken(ClickHouseParser::AST, 0); -} - -ClickHouseParser::QueryContext* ClickHouseParser::ExplainASTStmtContext::query() { - return getRuleContext(0); -} - -ClickHouseParser::ExplainASTStmtContext::ExplainASTStmtContext(ExplainStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ExplainASTStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitExplainASTStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::ExplainStmtContext* ClickHouseParser::explainStmt() { - ExplainStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 86, ClickHouseParser::RuleExplainStmt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1040); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 119, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1034); - match(ClickHouseParser::EXPLAIN); - setState(1035); - match(ClickHouseParser::AST); - setState(1036); - query(); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1037); - match(ClickHouseParser::EXPLAIN); - setState(1038); - match(ClickHouseParser::SYNTAX); - setState(1039); - query(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- InsertStmtContext ------------------------------------------------------------------ - -ClickHouseParser::InsertStmtContext::InsertStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::InsertStmtContext::INSERT() { - return getToken(ClickHouseParser::INSERT, 0); -} - -tree::TerminalNode* ClickHouseParser::InsertStmtContext::INTO() { - return getToken(ClickHouseParser::INTO, 0); -} - -ClickHouseParser::DataClauseContext* ClickHouseParser::InsertStmtContext::dataClause() { - return getRuleContext(0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::InsertStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::InsertStmtContext::FUNCTION() { - return getToken(ClickHouseParser::FUNCTION, 0); -} - -ClickHouseParser::TableFunctionExprContext* ClickHouseParser::InsertStmtContext::tableFunctionExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::InsertStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::ColumnsClauseContext* ClickHouseParser::InsertStmtContext::columnsClause() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::InsertStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleInsertStmt; -} - -antlrcpp::Any ClickHouseParser::InsertStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitInsertStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::InsertStmtContext* ClickHouseParser::insertStmt() { - InsertStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 88, ClickHouseParser::RuleInsertStmt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1042); - match(ClickHouseParser::INSERT); - setState(1043); - match(ClickHouseParser::INTO); - setState(1045); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 120, _ctx)) { - case 1: { - setState(1044); - match(ClickHouseParser::TABLE); - break; - } - - } - setState(1050); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 121, _ctx)) { - case 1: { - setState(1047); - tableIdentifier(); - break; - } - - case 2: { - setState(1048); - match(ClickHouseParser::FUNCTION); - setState(1049); - tableFunctionExpr(); - break; - } - - } - setState(1053); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 122, _ctx)) { - case 1: { - setState(1052); - columnsClause(); - break; - } - - } - setState(1055); - dataClause(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnsClauseContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnsClauseContext::ColumnsClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ColumnsClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnsClauseContext::nestedIdentifier() { - return getRuleContexts(); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::ColumnsClauseContext::nestedIdentifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnsClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::ColumnsClauseContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnsClauseContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::ColumnsClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnsClause; -} - -antlrcpp::Any ClickHouseParser::ColumnsClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnsClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnsClauseContext* ClickHouseParser::columnsClause() { - ColumnsClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 90, ClickHouseParser::RuleColumnsClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1057); - match(ClickHouseParser::LPAREN); - setState(1058); - nestedIdentifier(); - setState(1063); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1059); - match(ClickHouseParser::COMMA); - setState(1060); - nestedIdentifier(); - setState(1065); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1066); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DataClauseContext ------------------------------------------------------------------ - -ClickHouseParser::DataClauseContext::DataClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::DataClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleDataClause; -} - -void ClickHouseParser::DataClauseContext::copyFrom(DataClauseContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- DataClauseValuesContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::DataClauseValuesContext::VALUES() { - return getToken(ClickHouseParser::VALUES, 0); -} - -ClickHouseParser::DataClauseValuesContext::DataClauseValuesContext(DataClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::DataClauseValuesContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDataClauseValues(this); - else - return visitor->visitChildren(this); -} -//----------------- DataClauseFormatContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::DataClauseFormatContext::FORMAT() { - return getToken(ClickHouseParser::FORMAT, 0); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::DataClauseFormatContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::DataClauseFormatContext::DataClauseFormatContext(DataClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::DataClauseFormatContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDataClauseFormat(this); - else - return visitor->visitChildren(this); -} -//----------------- DataClauseSelectContext ------------------------------------------------------------------ - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::DataClauseSelectContext::selectUnionStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::DataClauseSelectContext::EOF() { - return getToken(ClickHouseParser::EOF, 0); -} - -tree::TerminalNode* ClickHouseParser::DataClauseSelectContext::SEMICOLON() { - return getToken(ClickHouseParser::SEMICOLON, 0); -} - -ClickHouseParser::DataClauseSelectContext::DataClauseSelectContext(DataClauseContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::DataClauseSelectContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDataClauseSelect(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::DataClauseContext* ClickHouseParser::dataClause() { - DataClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 92, ClickHouseParser::RuleDataClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1077); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::FORMAT: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1068); - match(ClickHouseParser::FORMAT); - setState(1069); - identifier(); - break; - } - - case ClickHouseParser::VALUES: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1070); - match(ClickHouseParser::VALUES); - break; - } - - case ClickHouseParser::SELECT: - case ClickHouseParser::WITH: - case ClickHouseParser::LPAREN: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(1071); - selectUnionStmt(); - setState(1073); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::SEMICOLON) { - setState(1072); - match(ClickHouseParser::SEMICOLON); - } - setState(1075); - match(ClickHouseParser::EOF); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- KillStmtContext ------------------------------------------------------------------ - -ClickHouseParser::KillStmtContext::KillStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::KillStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleKillStmt; -} - -void ClickHouseParser::KillStmtContext::copyFrom(KillStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- KillMutationStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::KillMutationStmtContext::KILL() { - return getToken(ClickHouseParser::KILL, 0); -} - -tree::TerminalNode* ClickHouseParser::KillMutationStmtContext::MUTATION() { - return getToken(ClickHouseParser::MUTATION, 0); -} - -ClickHouseParser::WhereClauseContext* ClickHouseParser::KillMutationStmtContext::whereClause() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::KillMutationStmtContext::clusterClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::KillMutationStmtContext::SYNC() { - return getToken(ClickHouseParser::SYNC, 0); -} - -tree::TerminalNode* ClickHouseParser::KillMutationStmtContext::ASYNC() { - return getToken(ClickHouseParser::ASYNC, 0); -} - -tree::TerminalNode* ClickHouseParser::KillMutationStmtContext::TEST() { - return getToken(ClickHouseParser::TEST, 0); -} - -ClickHouseParser::KillMutationStmtContext::KillMutationStmtContext(KillStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::KillMutationStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitKillMutationStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::KillStmtContext* ClickHouseParser::killStmt() { - KillStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 94, ClickHouseParser::RuleKillStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1079); - match(ClickHouseParser::KILL); - setState(1080); - match(ClickHouseParser::MUTATION); - setState(1082); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(1081); - clusterClause(); - } - setState(1084); - whereClause(); - setState(1086); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ASYNC || _la == ClickHouseParser::SYNC - - || _la == ClickHouseParser::TEST) { - setState(1085); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ASYNC || _la == ClickHouseParser::SYNC - - || _la == ClickHouseParser::TEST)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- OptimizeStmtContext ------------------------------------------------------------------ - -ClickHouseParser::OptimizeStmtContext::OptimizeStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::OptimizeStmtContext::OPTIMIZE() { - return getToken(ClickHouseParser::OPTIMIZE, 0); -} - -tree::TerminalNode* ClickHouseParser::OptimizeStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::OptimizeStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::OptimizeStmtContext::clusterClause() { - return getRuleContext(0); -} - -ClickHouseParser::PartitionClauseContext* ClickHouseParser::OptimizeStmtContext::partitionClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::OptimizeStmtContext::FINAL() { - return getToken(ClickHouseParser::FINAL, 0); -} - -tree::TerminalNode* ClickHouseParser::OptimizeStmtContext::DEDUPLICATE() { - return getToken(ClickHouseParser::DEDUPLICATE, 0); -} - - -size_t ClickHouseParser::OptimizeStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleOptimizeStmt; -} - -antlrcpp::Any ClickHouseParser::OptimizeStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitOptimizeStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::OptimizeStmtContext* ClickHouseParser::optimizeStmt() { - OptimizeStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 96, ClickHouseParser::RuleOptimizeStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1088); - match(ClickHouseParser::OPTIMIZE); - setState(1089); - match(ClickHouseParser::TABLE); - setState(1090); - tableIdentifier(); - setState(1092); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(1091); - clusterClause(); - } - setState(1095); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::PARTITION) { - setState(1094); - partitionClause(); - } - setState(1098); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FINAL) { - setState(1097); - match(ClickHouseParser::FINAL); - } - setState(1101); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::DEDUPLICATE) { - setState(1100); - match(ClickHouseParser::DEDUPLICATE); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- RenameStmtContext ------------------------------------------------------------------ - -ClickHouseParser::RenameStmtContext::RenameStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::RenameStmtContext::RENAME() { - return getToken(ClickHouseParser::RENAME, 0); -} - -tree::TerminalNode* ClickHouseParser::RenameStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -std::vector ClickHouseParser::RenameStmtContext::tableIdentifier() { - return getRuleContexts(); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::RenameStmtContext::tableIdentifier(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::RenameStmtContext::TO() { - return getTokens(ClickHouseParser::TO); -} - -tree::TerminalNode* ClickHouseParser::RenameStmtContext::TO(size_t i) { - return getToken(ClickHouseParser::TO, i); -} - -std::vector ClickHouseParser::RenameStmtContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::RenameStmtContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::RenameStmtContext::clusterClause() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::RenameStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleRenameStmt; -} - -antlrcpp::Any ClickHouseParser::RenameStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitRenameStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::RenameStmtContext* ClickHouseParser::renameStmt() { - RenameStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 98, ClickHouseParser::RuleRenameStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1103); - match(ClickHouseParser::RENAME); - setState(1104); - match(ClickHouseParser::TABLE); - setState(1105); - tableIdentifier(); - setState(1106); - match(ClickHouseParser::TO); - setState(1107); - tableIdentifier(); - setState(1115); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1108); - match(ClickHouseParser::COMMA); - setState(1109); - tableIdentifier(); - setState(1110); - match(ClickHouseParser::TO); - setState(1111); - tableIdentifier(); - setState(1117); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1119); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(1118); - clusterClause(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ProjectionSelectStmtContext ------------------------------------------------------------------ - -ClickHouseParser::ProjectionSelectStmtContext::ProjectionSelectStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ProjectionSelectStmtContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::ProjectionSelectStmtContext::SELECT() { - return getToken(ClickHouseParser::SELECT, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ProjectionSelectStmtContext::columnExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ProjectionSelectStmtContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::WithClauseContext* ClickHouseParser::ProjectionSelectStmtContext::withClause() { - return getRuleContext(0); -} - -ClickHouseParser::GroupByClauseContext* ClickHouseParser::ProjectionSelectStmtContext::groupByClause() { - return getRuleContext(0); -} - -ClickHouseParser::ProjectionOrderByClauseContext* ClickHouseParser::ProjectionSelectStmtContext::projectionOrderByClause() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::ProjectionSelectStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleProjectionSelectStmt; -} - -antlrcpp::Any ClickHouseParser::ProjectionSelectStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitProjectionSelectStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ProjectionSelectStmtContext* ClickHouseParser::projectionSelectStmt() { - ProjectionSelectStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 100, ClickHouseParser::RuleProjectionSelectStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1121); - match(ClickHouseParser::LPAREN); - setState(1123); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WITH) { - setState(1122); - withClause(); - } - setState(1125); - match(ClickHouseParser::SELECT); - setState(1126); - columnExprList(); - setState(1128); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::GROUP) { - setState(1127); - groupByClause(); - } - setState(1131); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ORDER) { - setState(1130); - projectionOrderByClause(); - } - setState(1133); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SelectUnionStmtContext ------------------------------------------------------------------ - -ClickHouseParser::SelectUnionStmtContext::SelectUnionStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::SelectUnionStmtContext::selectStmtWithParens() { - return getRuleContexts(); -} - -ClickHouseParser::SelectStmtWithParensContext* ClickHouseParser::SelectUnionStmtContext::selectStmtWithParens(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::SelectUnionStmtContext::UNION() { - return getTokens(ClickHouseParser::UNION); -} - -tree::TerminalNode* ClickHouseParser::SelectUnionStmtContext::UNION(size_t i) { - return getToken(ClickHouseParser::UNION, i); -} - -std::vector ClickHouseParser::SelectUnionStmtContext::ALL() { - return getTokens(ClickHouseParser::ALL); -} - -tree::TerminalNode* ClickHouseParser::SelectUnionStmtContext::ALL(size_t i) { - return getToken(ClickHouseParser::ALL, i); -} - - -size_t ClickHouseParser::SelectUnionStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleSelectUnionStmt; -} - -antlrcpp::Any ClickHouseParser::SelectUnionStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSelectUnionStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::selectUnionStmt() { - SelectUnionStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 102, ClickHouseParser::RuleSelectUnionStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1135); - selectStmtWithParens(); - setState(1141); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::UNION) { - setState(1136); - match(ClickHouseParser::UNION); - setState(1137); - match(ClickHouseParser::ALL); - setState(1138); - selectStmtWithParens(); - setState(1143); - _errHandler->sync(this); - _la = _input->LA(1); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SelectStmtWithParensContext ------------------------------------------------------------------ - -ClickHouseParser::SelectStmtWithParensContext::SelectStmtWithParensContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::SelectStmtContext* ClickHouseParser::SelectStmtWithParensContext::selectStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtWithParensContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::SelectStmtWithParensContext::selectUnionStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtWithParensContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - - -size_t ClickHouseParser::SelectStmtWithParensContext::getRuleIndex() const { - return ClickHouseParser::RuleSelectStmtWithParens; -} - -antlrcpp::Any ClickHouseParser::SelectStmtWithParensContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSelectStmtWithParens(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SelectStmtWithParensContext* ClickHouseParser::selectStmtWithParens() { - SelectStmtWithParensContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 104, ClickHouseParser::RuleSelectStmtWithParens); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1149); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::SELECT: - case ClickHouseParser::WITH: { - enterOuterAlt(_localctx, 1); - setState(1144); - selectStmt(); - break; - } - - case ClickHouseParser::LPAREN: { - enterOuterAlt(_localctx, 2); - setState(1145); - match(ClickHouseParser::LPAREN); - setState(1146); - selectUnionStmt(); - setState(1147); - match(ClickHouseParser::RPAREN); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SelectStmtContext ------------------------------------------------------------------ - -ClickHouseParser::SelectStmtContext::SelectStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::SELECT() { - return getToken(ClickHouseParser::SELECT, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::SelectStmtContext::columnExprList() { - return getRuleContext(0); -} - -ClickHouseParser::WithClauseContext* ClickHouseParser::SelectStmtContext::withClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::DISTINCT() { - return getToken(ClickHouseParser::DISTINCT, 0); -} - -ClickHouseParser::TopClauseContext* ClickHouseParser::SelectStmtContext::topClause() { - return getRuleContext(0); -} - -ClickHouseParser::FromClauseContext* ClickHouseParser::SelectStmtContext::fromClause() { - return getRuleContext(0); -} - -ClickHouseParser::ArrayJoinClauseContext* ClickHouseParser::SelectStmtContext::arrayJoinClause() { - return getRuleContext(0); -} - -ClickHouseParser::PrewhereClauseContext* ClickHouseParser::SelectStmtContext::prewhereClause() { - return getRuleContext(0); -} - -ClickHouseParser::WhereClauseContext* ClickHouseParser::SelectStmtContext::whereClause() { - return getRuleContext(0); -} - -ClickHouseParser::GroupByClauseContext* ClickHouseParser::SelectStmtContext::groupByClause() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::SelectStmtContext::WITH() { - return getTokens(ClickHouseParser::WITH); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::WITH(size_t i) { - return getToken(ClickHouseParser::WITH, i); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::TOTALS() { - return getToken(ClickHouseParser::TOTALS, 0); -} - -ClickHouseParser::HavingClauseContext* ClickHouseParser::SelectStmtContext::havingClause() { - return getRuleContext(0); -} - -ClickHouseParser::OrderByClauseContext* ClickHouseParser::SelectStmtContext::orderByClause() { - return getRuleContext(0); -} - -ClickHouseParser::LimitByClauseContext* ClickHouseParser::SelectStmtContext::limitByClause() { - return getRuleContext(0); -} - -ClickHouseParser::LimitClauseContext* ClickHouseParser::SelectStmtContext::limitClause() { - return getRuleContext(0); -} - -ClickHouseParser::SettingsClauseContext* ClickHouseParser::SelectStmtContext::settingsClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::CUBE() { - return getToken(ClickHouseParser::CUBE, 0); -} - -tree::TerminalNode* ClickHouseParser::SelectStmtContext::ROLLUP() { - return getToken(ClickHouseParser::ROLLUP, 0); -} - - -size_t ClickHouseParser::SelectStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleSelectStmt; -} - -antlrcpp::Any ClickHouseParser::SelectStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSelectStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SelectStmtContext* ClickHouseParser::selectStmt() { - SelectStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 106, ClickHouseParser::RuleSelectStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1152); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WITH) { - setState(1151); - withClause(); - } - setState(1154); - match(ClickHouseParser::SELECT); - setState(1156); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 140, _ctx)) { - case 1: { - setState(1155); - match(ClickHouseParser::DISTINCT); - break; - } - - } - setState(1159); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 141, _ctx)) { - case 1: { - setState(1158); - topClause(); - break; - } - - } - setState(1161); - columnExprList(); - setState(1163); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FROM) { - setState(1162); - fromClause(); - } - setState(1166); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ARRAY || _la == ClickHouseParser::INNER - - || _la == ClickHouseParser::LEFT) { - setState(1165); - arrayJoinClause(); - } - setState(1169); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::PREWHERE) { - setState(1168); - prewhereClause(); - } - setState(1172); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WHERE) { - setState(1171); - whereClause(); - } - setState(1175); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::GROUP) { - setState(1174); - groupByClause(); - } - setState(1179); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 147, _ctx)) { - case 1: { - setState(1177); - match(ClickHouseParser::WITH); - setState(1178); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::CUBE || _la == ClickHouseParser::ROLLUP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - setState(1183); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WITH) { - setState(1181); - match(ClickHouseParser::WITH); - setState(1182); - match(ClickHouseParser::TOTALS); - } - setState(1186); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::HAVING) { - setState(1185); - havingClause(); - } - setState(1189); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ORDER) { - setState(1188); - orderByClause(); - } - setState(1192); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 151, _ctx)) { - case 1: { - setState(1191); - limitByClause(); - break; - } - - } - setState(1195); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::LIMIT) { - setState(1194); - limitClause(); - } - setState(1198); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::SETTINGS) { - setState(1197); - settingsClause(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- WithClauseContext ------------------------------------------------------------------ - -ClickHouseParser::WithClauseContext::WithClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::WithClauseContext::WITH() { - return getToken(ClickHouseParser::WITH, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::WithClauseContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::WithClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleWithClause; -} - -antlrcpp::Any ClickHouseParser::WithClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitWithClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::WithClauseContext* ClickHouseParser::withClause() { - WithClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 108, ClickHouseParser::RuleWithClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1200); - match(ClickHouseParser::WITH); - setState(1201); - columnExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TopClauseContext ------------------------------------------------------------------ - -ClickHouseParser::TopClauseContext::TopClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::TopClauseContext::TOP() { - return getToken(ClickHouseParser::TOP, 0); -} - -tree::TerminalNode* ClickHouseParser::TopClauseContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::TopClauseContext::WITH() { - return getToken(ClickHouseParser::WITH, 0); -} - -tree::TerminalNode* ClickHouseParser::TopClauseContext::TIES() { - return getToken(ClickHouseParser::TIES, 0); -} - - -size_t ClickHouseParser::TopClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleTopClause; -} - -antlrcpp::Any ClickHouseParser::TopClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTopClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TopClauseContext* ClickHouseParser::topClause() { - TopClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 110, ClickHouseParser::RuleTopClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1203); - match(ClickHouseParser::TOP); - setState(1204); - match(ClickHouseParser::DECIMAL_LITERAL); - setState(1207); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 154, _ctx)) { - case 1: { - setState(1205); - match(ClickHouseParser::WITH); - setState(1206); - match(ClickHouseParser::TIES); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- FromClauseContext ------------------------------------------------------------------ - -ClickHouseParser::FromClauseContext::FromClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::FromClauseContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::FromClauseContext::joinExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::FromClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleFromClause; -} - -antlrcpp::Any ClickHouseParser::FromClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitFromClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::FromClauseContext* ClickHouseParser::fromClause() { - FromClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 112, ClickHouseParser::RuleFromClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1209); - match(ClickHouseParser::FROM); - setState(1210); - joinExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ArrayJoinClauseContext ------------------------------------------------------------------ - -ClickHouseParser::ArrayJoinClauseContext::ArrayJoinClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ArrayJoinClauseContext::ARRAY() { - return getToken(ClickHouseParser::ARRAY, 0); -} - -tree::TerminalNode* ClickHouseParser::ArrayJoinClauseContext::JOIN() { - return getToken(ClickHouseParser::JOIN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ArrayJoinClauseContext::columnExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ArrayJoinClauseContext::LEFT() { - return getToken(ClickHouseParser::LEFT, 0); -} - -tree::TerminalNode* ClickHouseParser::ArrayJoinClauseContext::INNER() { - return getToken(ClickHouseParser::INNER, 0); -} - - -size_t ClickHouseParser::ArrayJoinClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleArrayJoinClause; -} - -antlrcpp::Any ClickHouseParser::ArrayJoinClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitArrayJoinClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ArrayJoinClauseContext* ClickHouseParser::arrayJoinClause() { - ArrayJoinClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 114, ClickHouseParser::RuleArrayJoinClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1213); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::INNER - - || _la == ClickHouseParser::LEFT) { - setState(1212); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::INNER - - || _la == ClickHouseParser::LEFT)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1215); - match(ClickHouseParser::ARRAY); - setState(1216); - match(ClickHouseParser::JOIN); - setState(1217); - columnExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- PrewhereClauseContext ------------------------------------------------------------------ - -ClickHouseParser::PrewhereClauseContext::PrewhereClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::PrewhereClauseContext::PREWHERE() { - return getToken(ClickHouseParser::PREWHERE, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::PrewhereClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::PrewhereClauseContext::getRuleIndex() const { - return ClickHouseParser::RulePrewhereClause; -} - -antlrcpp::Any ClickHouseParser::PrewhereClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitPrewhereClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::PrewhereClauseContext* ClickHouseParser::prewhereClause() { - PrewhereClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 116, ClickHouseParser::RulePrewhereClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1219); - match(ClickHouseParser::PREWHERE); - setState(1220); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- WhereClauseContext ------------------------------------------------------------------ - -ClickHouseParser::WhereClauseContext::WhereClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::WhereClauseContext::WHERE() { - return getToken(ClickHouseParser::WHERE, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::WhereClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::WhereClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleWhereClause; -} - -antlrcpp::Any ClickHouseParser::WhereClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitWhereClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::WhereClauseContext* ClickHouseParser::whereClause() { - WhereClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 118, ClickHouseParser::RuleWhereClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1222); - match(ClickHouseParser::WHERE); - setState(1223); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- GroupByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::GroupByClauseContext::GroupByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::GROUP() { - return getToken(ClickHouseParser::GROUP, 0); -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::GroupByClauseContext::columnExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::CUBE() { - return getToken(ClickHouseParser::CUBE, 0); -} - -tree::TerminalNode* ClickHouseParser::GroupByClauseContext::ROLLUP() { - return getToken(ClickHouseParser::ROLLUP, 0); -} - - -size_t ClickHouseParser::GroupByClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleGroupByClause; -} - -antlrcpp::Any ClickHouseParser::GroupByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitGroupByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::GroupByClauseContext* ClickHouseParser::groupByClause() { - GroupByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 120, ClickHouseParser::RuleGroupByClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1225); - match(ClickHouseParser::GROUP); - setState(1226); - match(ClickHouseParser::BY); - setState(1233); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 156, _ctx)) { - case 1: { - setState(1227); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::CUBE || _la == ClickHouseParser::ROLLUP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1228); - match(ClickHouseParser::LPAREN); - setState(1229); - columnExprList(); - setState(1230); - match(ClickHouseParser::RPAREN); - break; - } - - case 2: { - setState(1232); - columnExprList(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- HavingClauseContext ------------------------------------------------------------------ - -ClickHouseParser::HavingClauseContext::HavingClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::HavingClauseContext::HAVING() { - return getToken(ClickHouseParser::HAVING, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::HavingClauseContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::HavingClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleHavingClause; -} - -antlrcpp::Any ClickHouseParser::HavingClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitHavingClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::HavingClauseContext* ClickHouseParser::havingClause() { - HavingClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 122, ClickHouseParser::RuleHavingClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1235); - match(ClickHouseParser::HAVING); - setState(1236); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- OrderByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::OrderByClauseContext::OrderByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::OrderByClauseContext::ORDER() { - return getToken(ClickHouseParser::ORDER, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::OrderExprListContext* ClickHouseParser::OrderByClauseContext::orderExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::OrderByClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleOrderByClause; -} - -antlrcpp::Any ClickHouseParser::OrderByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitOrderByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::OrderByClauseContext* ClickHouseParser::orderByClause() { - OrderByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 124, ClickHouseParser::RuleOrderByClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1238); - match(ClickHouseParser::ORDER); - setState(1239); - match(ClickHouseParser::BY); - setState(1240); - orderExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ProjectionOrderByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::ProjectionOrderByClauseContext::ProjectionOrderByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ProjectionOrderByClauseContext::ORDER() { - return getToken(ClickHouseParser::ORDER, 0); -} - -tree::TerminalNode* ClickHouseParser::ProjectionOrderByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ProjectionOrderByClauseContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::ProjectionOrderByClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleProjectionOrderByClause; -} - -antlrcpp::Any ClickHouseParser::ProjectionOrderByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitProjectionOrderByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ProjectionOrderByClauseContext* ClickHouseParser::projectionOrderByClause() { - ProjectionOrderByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 126, ClickHouseParser::RuleProjectionOrderByClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1242); - match(ClickHouseParser::ORDER); - setState(1243); - match(ClickHouseParser::BY); - setState(1244); - columnExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LimitByClauseContext ------------------------------------------------------------------ - -ClickHouseParser::LimitByClauseContext::LimitByClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::LimitByClauseContext::LIMIT() { - return getToken(ClickHouseParser::LIMIT, 0); -} - -ClickHouseParser::LimitExprContext* ClickHouseParser::LimitByClauseContext::limitExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::LimitByClauseContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::LimitByClauseContext::columnExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::LimitByClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleLimitByClause; -} - -antlrcpp::Any ClickHouseParser::LimitByClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLimitByClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LimitByClauseContext* ClickHouseParser::limitByClause() { - LimitByClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 128, ClickHouseParser::RuleLimitByClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1246); - match(ClickHouseParser::LIMIT); - setState(1247); - limitExpr(); - setState(1248); - match(ClickHouseParser::BY); - setState(1249); - columnExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LimitClauseContext ------------------------------------------------------------------ - -ClickHouseParser::LimitClauseContext::LimitClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::LimitClauseContext::LIMIT() { - return getToken(ClickHouseParser::LIMIT, 0); -} - -ClickHouseParser::LimitExprContext* ClickHouseParser::LimitClauseContext::limitExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::LimitClauseContext::WITH() { - return getToken(ClickHouseParser::WITH, 0); -} - -tree::TerminalNode* ClickHouseParser::LimitClauseContext::TIES() { - return getToken(ClickHouseParser::TIES, 0); -} - - -size_t ClickHouseParser::LimitClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleLimitClause; -} - -antlrcpp::Any ClickHouseParser::LimitClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLimitClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LimitClauseContext* ClickHouseParser::limitClause() { - LimitClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 130, ClickHouseParser::RuleLimitClause); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1251); - match(ClickHouseParser::LIMIT); - setState(1252); - limitExpr(); - setState(1255); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::WITH) { - setState(1253); - match(ClickHouseParser::WITH); - setState(1254); - match(ClickHouseParser::TIES); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SettingsClauseContext ------------------------------------------------------------------ - -ClickHouseParser::SettingsClauseContext::SettingsClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SettingsClauseContext::SETTINGS() { - return getToken(ClickHouseParser::SETTINGS, 0); -} - -ClickHouseParser::SettingExprListContext* ClickHouseParser::SettingsClauseContext::settingExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::SettingsClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleSettingsClause; -} - -antlrcpp::Any ClickHouseParser::SettingsClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSettingsClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SettingsClauseContext* ClickHouseParser::settingsClause() { - SettingsClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 132, ClickHouseParser::RuleSettingsClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1257); - match(ClickHouseParser::SETTINGS); - setState(1258); - settingExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- JoinExprContext ------------------------------------------------------------------ - -ClickHouseParser::JoinExprContext::JoinExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::JoinExprContext::getRuleIndex() const { - return ClickHouseParser::RuleJoinExpr; -} - -void ClickHouseParser::JoinExprContext::copyFrom(JoinExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- JoinExprOpContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::JoinExprOpContext::joinExpr() { - return getRuleContexts(); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::JoinExprOpContext::joinExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::JoinExprOpContext::JOIN() { - return getToken(ClickHouseParser::JOIN, 0); -} - -ClickHouseParser::JoinConstraintClauseContext* ClickHouseParser::JoinExprOpContext::joinConstraintClause() { - return getRuleContext(0); -} - -ClickHouseParser::JoinOpContext* ClickHouseParser::JoinExprOpContext::joinOp() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::JoinExprOpContext::GLOBAL() { - return getToken(ClickHouseParser::GLOBAL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinExprOpContext::LOCAL() { - return getToken(ClickHouseParser::LOCAL, 0); -} - -ClickHouseParser::JoinExprOpContext::JoinExprOpContext(JoinExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinExprOpContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinExprOp(this); - else - return visitor->visitChildren(this); -} -//----------------- JoinExprTableContext ------------------------------------------------------------------ - -ClickHouseParser::TableExprContext* ClickHouseParser::JoinExprTableContext::tableExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::JoinExprTableContext::FINAL() { - return getToken(ClickHouseParser::FINAL, 0); -} - -ClickHouseParser::SampleClauseContext* ClickHouseParser::JoinExprTableContext::sampleClause() { - return getRuleContext(0); -} - -ClickHouseParser::JoinExprTableContext::JoinExprTableContext(JoinExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinExprTableContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinExprTable(this); - else - return visitor->visitChildren(this); -} -//----------------- JoinExprParensContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::JoinExprParensContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::JoinExprParensContext::joinExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::JoinExprParensContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::JoinExprParensContext::JoinExprParensContext(JoinExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinExprParensContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinExprParens(this); - else - return visitor->visitChildren(this); -} -//----------------- JoinExprCrossOpContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::JoinExprCrossOpContext::joinExpr() { - return getRuleContexts(); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::JoinExprCrossOpContext::joinExpr(size_t i) { - return getRuleContext(i); -} - -ClickHouseParser::JoinOpCrossContext* ClickHouseParser::JoinExprCrossOpContext::joinOpCross() { - return getRuleContext(0); -} - -ClickHouseParser::JoinExprCrossOpContext::JoinExprCrossOpContext(JoinExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinExprCrossOpContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinExprCrossOp(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::joinExpr() { - return joinExpr(0); -} - -ClickHouseParser::JoinExprContext* ClickHouseParser::joinExpr(int precedence) { - ParserRuleContext *parentContext = _ctx; - size_t parentState = getState(); - ClickHouseParser::JoinExprContext *_localctx = _tracker.createInstance(_ctx, parentState); - ClickHouseParser::JoinExprContext *previousContext = _localctx; - (void)previousContext; // Silence compiler, in case the context is not used by generated code. - size_t startState = 134; - enterRecursionRule(_localctx, 134, ClickHouseParser::RuleJoinExpr, precedence); - - size_t _la = 0; - - auto onExit = finally([=] { - unrollRecursionContexts(parentContext); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1272); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 160, _ctx)) { - case 1: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - - setState(1261); - tableExpr(0); - setState(1263); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 158, _ctx)) { - case 1: { - setState(1262); - match(ClickHouseParser::FINAL); - break; - } - - } - setState(1266); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 159, _ctx)) { - case 1: { - setState(1265); - sampleClause(); - break; - } - - } - break; - } - - case 2: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1268); - match(ClickHouseParser::LPAREN); - setState(1269); - joinExpr(0); - setState(1270); - match(ClickHouseParser::RPAREN); - break; - } - - } - _ctx->stop = _input->LT(-1); - setState(1291); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 164, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - if (!_parseListeners.empty()) - triggerExitRuleEvent(); - previousContext = _localctx; - setState(1289); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 163, _ctx)) { - case 1: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleJoinExpr); - setState(1274); - - if (!(precpred(_ctx, 3))) throw FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(1275); - joinOpCross(); - setState(1276); - joinExpr(4); - break; - } - - case 2: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleJoinExpr); - setState(1278); - - if (!(precpred(_ctx, 4))) throw FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(1280); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::GLOBAL - - || _la == ClickHouseParser::LOCAL) { - setState(1279); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::GLOBAL - - || _la == ClickHouseParser::LOCAL)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1283); - _errHandler->sync(this); - - _la = _input->LA(1); - if (((((_la - 4) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 4)) & ((1ULL << (ClickHouseParser::ALL - 4)) - | (1ULL << (ClickHouseParser::ANTI - 4)) - | (1ULL << (ClickHouseParser::ANY - 4)) - | (1ULL << (ClickHouseParser::ASOF - 4)) - | (1ULL << (ClickHouseParser::FULL - 4)))) != 0) || ((((_la - 81) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 81)) & ((1ULL << (ClickHouseParser::INNER - 81)) - | (1ULL << (ClickHouseParser::LEFT - 81)) - | (1ULL << (ClickHouseParser::RIGHT - 81)) - | (1ULL << (ClickHouseParser::SEMI - 81)))) != 0)) { - setState(1282); - joinOp(); - } - setState(1285); - match(ClickHouseParser::JOIN); - setState(1286); - joinExpr(0); - setState(1287); - joinConstraintClause(); - break; - } - - } - } - setState(1293); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 164, _ctx); - } - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - return _localctx; -} - -//----------------- JoinOpContext ------------------------------------------------------------------ - -ClickHouseParser::JoinOpContext::JoinOpContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::JoinOpContext::getRuleIndex() const { - return ClickHouseParser::RuleJoinOp; -} - -void ClickHouseParser::JoinOpContext::copyFrom(JoinOpContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- JoinOpFullContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::JoinOpFullContext::FULL() { - return getToken(ClickHouseParser::FULL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpFullContext::OUTER() { - return getToken(ClickHouseParser::OUTER, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpFullContext::ALL() { - return getToken(ClickHouseParser::ALL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpFullContext::ANY() { - return getToken(ClickHouseParser::ANY, 0); -} - -ClickHouseParser::JoinOpFullContext::JoinOpFullContext(JoinOpContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinOpFullContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinOpFull(this); - else - return visitor->visitChildren(this); -} -//----------------- JoinOpInnerContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::JoinOpInnerContext::INNER() { - return getToken(ClickHouseParser::INNER, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpInnerContext::ALL() { - return getToken(ClickHouseParser::ALL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpInnerContext::ANY() { - return getToken(ClickHouseParser::ANY, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpInnerContext::ASOF() { - return getToken(ClickHouseParser::ASOF, 0); -} - -ClickHouseParser::JoinOpInnerContext::JoinOpInnerContext(JoinOpContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinOpInnerContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinOpInner(this); - else - return visitor->visitChildren(this); -} -//----------------- JoinOpLeftRightContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::LEFT() { - return getToken(ClickHouseParser::LEFT, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::RIGHT() { - return getToken(ClickHouseParser::RIGHT, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::OUTER() { - return getToken(ClickHouseParser::OUTER, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::SEMI() { - return getToken(ClickHouseParser::SEMI, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::ALL() { - return getToken(ClickHouseParser::ALL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::ANTI() { - return getToken(ClickHouseParser::ANTI, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::ANY() { - return getToken(ClickHouseParser::ANY, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpLeftRightContext::ASOF() { - return getToken(ClickHouseParser::ASOF, 0); -} - -ClickHouseParser::JoinOpLeftRightContext::JoinOpLeftRightContext(JoinOpContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::JoinOpLeftRightContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinOpLeftRight(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::JoinOpContext* ClickHouseParser::joinOp() { - JoinOpContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 136, ClickHouseParser::RuleJoinOp); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1337); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 178, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1303); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 167, _ctx)) { - case 1: { - setState(1295); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0)) { - setState(1294); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1297); - match(ClickHouseParser::INNER); - break; - } - - case 2: { - setState(1298); - match(ClickHouseParser::INNER); - setState(1300); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0)) { - setState(1299); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - break; - } - - case 3: { - setState(1302); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1319); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 172, _ctx)) { - case 1: { - setState(1306); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0) || _la == ClickHouseParser::SEMI) { - setState(1305); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0) || _la == ClickHouseParser::SEMI)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1308); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::LEFT - - || _la == ClickHouseParser::RIGHT)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1310); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OUTER) { - setState(1309); - match(ClickHouseParser::OUTER); - } - break; - } - - case 2: { - setState(1312); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::LEFT - - || _la == ClickHouseParser::RIGHT)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1314); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OUTER) { - setState(1313); - match(ClickHouseParser::OUTER); - } - setState(1317); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0) || _la == ClickHouseParser::SEMI) { - setState(1316); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ASOF))) != 0) || _la == ClickHouseParser::SEMI)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - break; - } - - } - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(1335); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 177, _ctx)) { - case 1: { - setState(1322); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ALL - - || _la == ClickHouseParser::ANY) { - setState(1321); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ALL - - || _la == ClickHouseParser::ANY)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1324); - match(ClickHouseParser::FULL); - setState(1326); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OUTER) { - setState(1325); - match(ClickHouseParser::OUTER); - } - break; - } - - case 2: { - setState(1328); - match(ClickHouseParser::FULL); - setState(1330); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OUTER) { - setState(1329); - match(ClickHouseParser::OUTER); - } - setState(1333); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ALL - - || _la == ClickHouseParser::ANY) { - setState(1332); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ALL - - || _la == ClickHouseParser::ANY)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - break; - } - - } - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- JoinOpCrossContext ------------------------------------------------------------------ - -ClickHouseParser::JoinOpCrossContext::JoinOpCrossContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::JoinOpCrossContext::CROSS() { - return getToken(ClickHouseParser::CROSS, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpCrossContext::JOIN() { - return getToken(ClickHouseParser::JOIN, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpCrossContext::GLOBAL() { - return getToken(ClickHouseParser::GLOBAL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpCrossContext::LOCAL() { - return getToken(ClickHouseParser::LOCAL, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinOpCrossContext::COMMA() { - return getToken(ClickHouseParser::COMMA, 0); -} - - -size_t ClickHouseParser::JoinOpCrossContext::getRuleIndex() const { - return ClickHouseParser::RuleJoinOpCross; -} - -antlrcpp::Any ClickHouseParser::JoinOpCrossContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinOpCross(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::JoinOpCrossContext* ClickHouseParser::joinOpCross() { - JoinOpCrossContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 138, ClickHouseParser::RuleJoinOpCross); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1345); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::CROSS: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::LOCAL: { - enterOuterAlt(_localctx, 1); - setState(1340); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::GLOBAL - - || _la == ClickHouseParser::LOCAL) { - setState(1339); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::GLOBAL - - || _la == ClickHouseParser::LOCAL)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1342); - match(ClickHouseParser::CROSS); - setState(1343); - match(ClickHouseParser::JOIN); - break; - } - - case ClickHouseParser::COMMA: { - enterOuterAlt(_localctx, 2); - setState(1344); - match(ClickHouseParser::COMMA); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- JoinConstraintClauseContext ------------------------------------------------------------------ - -ClickHouseParser::JoinConstraintClauseContext::JoinConstraintClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::JoinConstraintClauseContext::ON() { - return getToken(ClickHouseParser::ON, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::JoinConstraintClauseContext::columnExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::JoinConstraintClauseContext::USING() { - return getToken(ClickHouseParser::USING, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinConstraintClauseContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::JoinConstraintClauseContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - - -size_t ClickHouseParser::JoinConstraintClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleJoinConstraintClause; -} - -antlrcpp::Any ClickHouseParser::JoinConstraintClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitJoinConstraintClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::JoinConstraintClauseContext* ClickHouseParser::joinConstraintClause() { - JoinConstraintClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 140, ClickHouseParser::RuleJoinConstraintClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1356); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 181, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(1347); - match(ClickHouseParser::ON); - setState(1348); - columnExprList(); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(1349); - match(ClickHouseParser::USING); - setState(1350); - match(ClickHouseParser::LPAREN); - setState(1351); - columnExprList(); - setState(1352); - match(ClickHouseParser::RPAREN); - break; - } - - case 3: { - enterOuterAlt(_localctx, 3); - setState(1354); - match(ClickHouseParser::USING); - setState(1355); - columnExprList(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SampleClauseContext ------------------------------------------------------------------ - -ClickHouseParser::SampleClauseContext::SampleClauseContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SampleClauseContext::SAMPLE() { - return getToken(ClickHouseParser::SAMPLE, 0); -} - -std::vector ClickHouseParser::SampleClauseContext::ratioExpr() { - return getRuleContexts(); -} - -ClickHouseParser::RatioExprContext* ClickHouseParser::SampleClauseContext::ratioExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::SampleClauseContext::OFFSET() { - return getToken(ClickHouseParser::OFFSET, 0); -} - - -size_t ClickHouseParser::SampleClauseContext::getRuleIndex() const { - return ClickHouseParser::RuleSampleClause; -} - -antlrcpp::Any ClickHouseParser::SampleClauseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSampleClause(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SampleClauseContext* ClickHouseParser::sampleClause() { - SampleClauseContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 142, ClickHouseParser::RuleSampleClause); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1358); - match(ClickHouseParser::SAMPLE); - setState(1359); - ratioExpr(); - setState(1362); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 182, _ctx)) { - case 1: { - setState(1360); - match(ClickHouseParser::OFFSET); - setState(1361); - ratioExpr(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LimitExprContext ------------------------------------------------------------------ - -ClickHouseParser::LimitExprContext::LimitExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::LimitExprContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::LimitExprContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::LimitExprContext::COMMA() { - return getToken(ClickHouseParser::COMMA, 0); -} - -tree::TerminalNode* ClickHouseParser::LimitExprContext::OFFSET() { - return getToken(ClickHouseParser::OFFSET, 0); -} - - -size_t ClickHouseParser::LimitExprContext::getRuleIndex() const { - return ClickHouseParser::RuleLimitExpr; -} - -antlrcpp::Any ClickHouseParser::LimitExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLimitExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LimitExprContext* ClickHouseParser::limitExpr() { - LimitExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 144, ClickHouseParser::RuleLimitExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1364); - columnExpr(0); - setState(1367); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::OFFSET || _la == ClickHouseParser::COMMA) { - setState(1365); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::OFFSET || _la == ClickHouseParser::COMMA)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1366); - columnExpr(0); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- OrderExprListContext ------------------------------------------------------------------ - -ClickHouseParser::OrderExprListContext::OrderExprListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::OrderExprListContext::orderExpr() { - return getRuleContexts(); -} - -ClickHouseParser::OrderExprContext* ClickHouseParser::OrderExprListContext::orderExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::OrderExprListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::OrderExprListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::OrderExprListContext::getRuleIndex() const { - return ClickHouseParser::RuleOrderExprList; -} - -antlrcpp::Any ClickHouseParser::OrderExprListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitOrderExprList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::OrderExprListContext* ClickHouseParser::orderExprList() { - OrderExprListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 146, ClickHouseParser::RuleOrderExprList); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1369); - orderExpr(); - setState(1374); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 184, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(1370); - match(ClickHouseParser::COMMA); - setState(1371); - orderExpr(); - } - setState(1376); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 184, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- OrderExprContext ------------------------------------------------------------------ - -ClickHouseParser::OrderExprContext::OrderExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::OrderExprContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::NULLS() { - return getToken(ClickHouseParser::NULLS, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::COLLATE() { - return getToken(ClickHouseParser::COLLATE, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::ASCENDING() { - return getToken(ClickHouseParser::ASCENDING, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::DESCENDING() { - return getToken(ClickHouseParser::DESCENDING, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::DESC() { - return getToken(ClickHouseParser::DESC, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::FIRST() { - return getToken(ClickHouseParser::FIRST, 0); -} - -tree::TerminalNode* ClickHouseParser::OrderExprContext::LAST() { - return getToken(ClickHouseParser::LAST, 0); -} - - -size_t ClickHouseParser::OrderExprContext::getRuleIndex() const { - return ClickHouseParser::RuleOrderExpr; -} - -antlrcpp::Any ClickHouseParser::OrderExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitOrderExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::OrderExprContext* ClickHouseParser::orderExpr() { - OrderExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 148, ClickHouseParser::RuleOrderExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1377); - columnExpr(0); - setState(1379); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 185, _ctx)) { - case 1: { - setState(1378); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - setState(1383); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 186, _ctx)) { - case 1: { - setState(1381); - match(ClickHouseParser::NULLS); - setState(1382); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::FIRST - - || _la == ClickHouseParser::LAST)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - setState(1387); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 187, _ctx)) { - case 1: { - setState(1385); - match(ClickHouseParser::COLLATE); - setState(1386); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- RatioExprContext ------------------------------------------------------------------ - -ClickHouseParser::RatioExprContext::RatioExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::RatioExprContext::numberLiteral() { - return getRuleContexts(); -} - -ClickHouseParser::NumberLiteralContext* ClickHouseParser::RatioExprContext::numberLiteral(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::RatioExprContext::SLASH() { - return getToken(ClickHouseParser::SLASH, 0); -} - - -size_t ClickHouseParser::RatioExprContext::getRuleIndex() const { - return ClickHouseParser::RuleRatioExpr; -} - -antlrcpp::Any ClickHouseParser::RatioExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitRatioExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::RatioExprContext* ClickHouseParser::ratioExpr() { - RatioExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 150, ClickHouseParser::RuleRatioExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1389); - numberLiteral(); - setState(1392); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 188, _ctx)) { - case 1: { - setState(1390); - match(ClickHouseParser::SLASH); - setState(1391); - numberLiteral(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SettingExprListContext ------------------------------------------------------------------ - -ClickHouseParser::SettingExprListContext::SettingExprListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::SettingExprListContext::settingExpr() { - return getRuleContexts(); -} - -ClickHouseParser::SettingExprContext* ClickHouseParser::SettingExprListContext::settingExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::SettingExprListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::SettingExprListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::SettingExprListContext::getRuleIndex() const { - return ClickHouseParser::RuleSettingExprList; -} - -antlrcpp::Any ClickHouseParser::SettingExprListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSettingExprList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SettingExprListContext* ClickHouseParser::settingExprList() { - SettingExprListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 152, ClickHouseParser::RuleSettingExprList); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1394); - settingExpr(); - setState(1399); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 189, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(1395); - match(ClickHouseParser::COMMA); - setState(1396); - settingExpr(); - } - setState(1401); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 189, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SettingExprContext ------------------------------------------------------------------ - -ClickHouseParser::SettingExprContext::SettingExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::SettingExprContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SettingExprContext::EQ_SINGLE() { - return getToken(ClickHouseParser::EQ_SINGLE, 0); -} - -ClickHouseParser::LiteralContext* ClickHouseParser::SettingExprContext::literal() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::SettingExprContext::getRuleIndex() const { - return ClickHouseParser::RuleSettingExpr; -} - -antlrcpp::Any ClickHouseParser::SettingExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSettingExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SettingExprContext* ClickHouseParser::settingExpr() { - SettingExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 154, ClickHouseParser::RuleSettingExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1402); - identifier(); - setState(1403); - match(ClickHouseParser::EQ_SINGLE); - setState(1404); - literal(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SetStmtContext ------------------------------------------------------------------ - -ClickHouseParser::SetStmtContext::SetStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SetStmtContext::SET() { - return getToken(ClickHouseParser::SET, 0); -} - -ClickHouseParser::SettingExprListContext* ClickHouseParser::SetStmtContext::settingExprList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::SetStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleSetStmt; -} - -antlrcpp::Any ClickHouseParser::SetStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSetStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SetStmtContext* ClickHouseParser::setStmt() { - SetStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 156, ClickHouseParser::RuleSetStmt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1406); - match(ClickHouseParser::SET); - setState(1407); - settingExprList(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ShowStmtContext ------------------------------------------------------------------ - -ClickHouseParser::ShowStmtContext::ShowStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ShowStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleShowStmt; -} - -void ClickHouseParser::ShowStmtContext::copyFrom(ShowStmtContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ShowCreateDatabaseStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowCreateDatabaseStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateDatabaseStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateDatabaseStmtContext::DATABASE() { - return getToken(ClickHouseParser::DATABASE, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::ShowCreateDatabaseStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ShowCreateDatabaseStmtContext::ShowCreateDatabaseStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowCreateDatabaseStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowCreateDatabaseStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ShowDatabasesStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowDatabasesStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowDatabasesStmtContext::DATABASES() { - return getToken(ClickHouseParser::DATABASES, 0); -} - -ClickHouseParser::ShowDatabasesStmtContext::ShowDatabasesStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowDatabasesStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowDatabasesStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ShowCreateTableStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowCreateTableStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateTableStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ShowCreateTableStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateTableStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateTableStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -ClickHouseParser::ShowCreateTableStmtContext::ShowCreateTableStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowCreateTableStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowCreateTableStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ShowTablesStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::TABLES() { - return getToken(ClickHouseParser::TABLES, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::ShowTablesStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::LIKE() { - return getToken(ClickHouseParser::LIKE, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -ClickHouseParser::WhereClauseContext* ClickHouseParser::ShowTablesStmtContext::whereClause() { - return getRuleContext(0); -} - -ClickHouseParser::LimitClauseContext* ClickHouseParser::ShowTablesStmtContext::limitClause() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowTablesStmtContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -ClickHouseParser::ShowTablesStmtContext::ShowTablesStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowTablesStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowTablesStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ShowDictionariesStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowDictionariesStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowDictionariesStmtContext::DICTIONARIES() { - return getToken(ClickHouseParser::DICTIONARIES, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowDictionariesStmtContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::ShowDictionariesStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ShowDictionariesStmtContext::ShowDictionariesStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowDictionariesStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowDictionariesStmt(this); - else - return visitor->visitChildren(this); -} -//----------------- ShowCreateDictionaryStmtContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ShowCreateDictionaryStmtContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateDictionaryStmtContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::ShowCreateDictionaryStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ShowCreateDictionaryStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ShowCreateDictionaryStmtContext::ShowCreateDictionaryStmtContext(ShowStmtContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ShowCreateDictionaryStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitShowCreateDictionaryStmt(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::ShowStmtContext* ClickHouseParser::showStmt() { - ShowStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 158, ClickHouseParser::RuleShowStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1451); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 197, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1409); - match(ClickHouseParser::SHOW); - setState(1410); - match(ClickHouseParser::CREATE); - setState(1411); - match(ClickHouseParser::DATABASE); - setState(1412); - databaseIdentifier(); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1413); - match(ClickHouseParser::SHOW); - setState(1414); - match(ClickHouseParser::CREATE); - setState(1415); - match(ClickHouseParser::DICTIONARY); - setState(1416); - tableIdentifier(); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(1417); - match(ClickHouseParser::SHOW); - setState(1418); - match(ClickHouseParser::CREATE); - setState(1420); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 190, _ctx)) { - case 1: { - setState(1419); - match(ClickHouseParser::TEMPORARY); - break; - } - - } - setState(1423); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 191, _ctx)) { - case 1: { - setState(1422); - match(ClickHouseParser::TABLE); - break; - } - - } - setState(1425); - tableIdentifier(); - break; - } - - case 4: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 4); - setState(1426); - match(ClickHouseParser::SHOW); - setState(1427); - match(ClickHouseParser::DATABASES); - break; - } - - case 5: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 5); - setState(1428); - match(ClickHouseParser::SHOW); - setState(1429); - match(ClickHouseParser::DICTIONARIES); - setState(1432); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FROM) { - setState(1430); - match(ClickHouseParser::FROM); - setState(1431); - databaseIdentifier(); - } - break; - } - - case 6: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 6); - setState(1434); - match(ClickHouseParser::SHOW); - setState(1436); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TEMPORARY) { - setState(1435); - match(ClickHouseParser::TEMPORARY); - } - setState(1438); - match(ClickHouseParser::TABLES); - setState(1441); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FROM - - || _la == ClickHouseParser::IN) { - setState(1439); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::FROM - - || _la == ClickHouseParser::IN)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1440); - databaseIdentifier(); - } - setState(1446); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::LIKE: { - setState(1443); - match(ClickHouseParser::LIKE); - setState(1444); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case ClickHouseParser::WHERE: { - setState(1445); - whereClause(); - break; - } - - case ClickHouseParser::EOF: - case ClickHouseParser::FORMAT: - case ClickHouseParser::INTO: - case ClickHouseParser::LIMIT: - case ClickHouseParser::SEMICOLON: { - break; - } - - default: - break; - } - setState(1449); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::LIMIT) { - setState(1448); - limitClause(); - } - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- SystemStmtContext ------------------------------------------------------------------ - -ClickHouseParser::SystemStmtContext::SystemStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::SYSTEM() { - return getToken(ClickHouseParser::SYSTEM, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::FLUSH() { - return getToken(ClickHouseParser::FLUSH, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::DISTRIBUTED() { - return getToken(ClickHouseParser::DISTRIBUTED, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::SystemStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::LOGS() { - return getToken(ClickHouseParser::LOGS, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::RELOAD() { - return getToken(ClickHouseParser::RELOAD, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::DICTIONARIES() { - return getToken(ClickHouseParser::DICTIONARIES, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::START() { - return getToken(ClickHouseParser::START, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::STOP() { - return getToken(ClickHouseParser::STOP, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::SENDS() { - return getToken(ClickHouseParser::SENDS, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::FETCHES() { - return getToken(ClickHouseParser::FETCHES, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::MERGES() { - return getToken(ClickHouseParser::MERGES, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::REPLICATED() { - return getToken(ClickHouseParser::REPLICATED, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::SYNC() { - return getToken(ClickHouseParser::SYNC, 0); -} - -tree::TerminalNode* ClickHouseParser::SystemStmtContext::REPLICA() { - return getToken(ClickHouseParser::REPLICA, 0); -} - - -size_t ClickHouseParser::SystemStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleSystemStmt; -} - -antlrcpp::Any ClickHouseParser::SystemStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitSystemStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::SystemStmtContext* ClickHouseParser::systemStmt() { - SystemStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 160, ClickHouseParser::RuleSystemStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1487); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 200, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(1453); - match(ClickHouseParser::SYSTEM); - setState(1454); - match(ClickHouseParser::FLUSH); - setState(1455); - match(ClickHouseParser::DISTRIBUTED); - setState(1456); - tableIdentifier(); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(1457); - match(ClickHouseParser::SYSTEM); - setState(1458); - match(ClickHouseParser::FLUSH); - setState(1459); - match(ClickHouseParser::LOGS); - break; - } - - case 3: { - enterOuterAlt(_localctx, 3); - setState(1460); - match(ClickHouseParser::SYSTEM); - setState(1461); - match(ClickHouseParser::RELOAD); - setState(1462); - match(ClickHouseParser::DICTIONARIES); - break; - } - - case 4: { - enterOuterAlt(_localctx, 4); - setState(1463); - match(ClickHouseParser::SYSTEM); - setState(1464); - match(ClickHouseParser::RELOAD); - setState(1465); - match(ClickHouseParser::DICTIONARY); - setState(1466); - tableIdentifier(); - break; - } - - case 5: { - enterOuterAlt(_localctx, 5); - setState(1467); - match(ClickHouseParser::SYSTEM); - setState(1468); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::START - - || _la == ClickHouseParser::STOP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1476); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::DISTRIBUTED: { - setState(1469); - match(ClickHouseParser::DISTRIBUTED); - setState(1470); - match(ClickHouseParser::SENDS); - break; - } - - case ClickHouseParser::FETCHES: { - setState(1471); - match(ClickHouseParser::FETCHES); - break; - } - - case ClickHouseParser::MERGES: - case ClickHouseParser::TTL: { - setState(1473); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::TTL) { - setState(1472); - match(ClickHouseParser::TTL); - } - setState(1475); - match(ClickHouseParser::MERGES); - break; - } - - default: - throw NoViableAltException(this); - } - setState(1478); - tableIdentifier(); - break; - } - - case 6: { - enterOuterAlt(_localctx, 6); - setState(1479); - match(ClickHouseParser::SYSTEM); - setState(1480); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::START - - || _la == ClickHouseParser::STOP)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1481); - match(ClickHouseParser::REPLICATED); - setState(1482); - match(ClickHouseParser::SENDS); - break; - } - - case 7: { - enterOuterAlt(_localctx, 7); - setState(1483); - match(ClickHouseParser::SYSTEM); - setState(1484); - match(ClickHouseParser::SYNC); - setState(1485); - match(ClickHouseParser::REPLICA); - setState(1486); - tableIdentifier(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TruncateStmtContext ------------------------------------------------------------------ - -ClickHouseParser::TruncateStmtContext::TruncateStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::TruncateStmtContext::TRUNCATE() { - return getToken(ClickHouseParser::TRUNCATE, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::TruncateStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TruncateStmtContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -tree::TerminalNode* ClickHouseParser::TruncateStmtContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -tree::TerminalNode* ClickHouseParser::TruncateStmtContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::TruncateStmtContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -ClickHouseParser::ClusterClauseContext* ClickHouseParser::TruncateStmtContext::clusterClause() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::TruncateStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleTruncateStmt; -} - -antlrcpp::Any ClickHouseParser::TruncateStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTruncateStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TruncateStmtContext* ClickHouseParser::truncateStmt() { - TruncateStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 162, ClickHouseParser::RuleTruncateStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1489); - match(ClickHouseParser::TRUNCATE); - setState(1491); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 201, _ctx)) { - case 1: { - setState(1490); - match(ClickHouseParser::TEMPORARY); - break; - } - - } - setState(1494); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 202, _ctx)) { - case 1: { - setState(1493); - match(ClickHouseParser::TABLE); - break; - } - - } - setState(1498); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 203, _ctx)) { - case 1: { - setState(1496); - match(ClickHouseParser::IF); - setState(1497); - match(ClickHouseParser::EXISTS); - break; - } - - } - setState(1500); - tableIdentifier(); - setState(1502); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ON) { - setState(1501); - clusterClause(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- UseStmtContext ------------------------------------------------------------------ - -ClickHouseParser::UseStmtContext::UseStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::UseStmtContext::USE() { - return getToken(ClickHouseParser::USE, 0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::UseStmtContext::databaseIdentifier() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::UseStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleUseStmt; -} - -antlrcpp::Any ClickHouseParser::UseStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitUseStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::UseStmtContext* ClickHouseParser::useStmt() { - UseStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 164, ClickHouseParser::RuleUseStmt); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1504); - match(ClickHouseParser::USE); - setState(1505); - databaseIdentifier(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- WatchStmtContext ------------------------------------------------------------------ - -ClickHouseParser::WatchStmtContext::WatchStmtContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::WatchStmtContext::WATCH() { - return getToken(ClickHouseParser::WATCH, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::WatchStmtContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::WatchStmtContext::EVENTS() { - return getToken(ClickHouseParser::EVENTS, 0); -} - -tree::TerminalNode* ClickHouseParser::WatchStmtContext::LIMIT() { - return getToken(ClickHouseParser::LIMIT, 0); -} - -tree::TerminalNode* ClickHouseParser::WatchStmtContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - - -size_t ClickHouseParser::WatchStmtContext::getRuleIndex() const { - return ClickHouseParser::RuleWatchStmt; -} - -antlrcpp::Any ClickHouseParser::WatchStmtContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitWatchStmt(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::WatchStmtContext* ClickHouseParser::watchStmt() { - WatchStmtContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 166, ClickHouseParser::RuleWatchStmt); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1507); - match(ClickHouseParser::WATCH); - setState(1508); - tableIdentifier(); - setState(1510); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::EVENTS) { - setState(1509); - match(ClickHouseParser::EVENTS); - } - setState(1514); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::LIMIT) { - setState(1512); - match(ClickHouseParser::LIMIT); - setState(1513); - match(ClickHouseParser::DECIMAL_LITERAL); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnTypeExprContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnTypeExprContext::ColumnTypeExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ColumnTypeExprContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnTypeExpr; -} - -void ClickHouseParser::ColumnTypeExprContext::copyFrom(ColumnTypeExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ColumnTypeExprNestedContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnTypeExprNestedContext::identifier() { - return getRuleContexts(); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnTypeExprNestedContext::identifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprNestedContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprNestedContext::columnTypeExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::ColumnTypeExprNestedContext::columnTypeExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprNestedContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprNestedContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprNestedContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::ColumnTypeExprNestedContext::ColumnTypeExprNestedContext(ColumnTypeExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnTypeExprNestedContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnTypeExprNested(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnTypeExprParamContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnTypeExprParamContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprParamContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprParamContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ColumnTypeExprParamContext::columnExprList() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnTypeExprParamContext::ColumnTypeExprParamContext(ColumnTypeExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnTypeExprParamContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnTypeExprParam(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnTypeExprSimpleContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnTypeExprSimpleContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnTypeExprSimpleContext::ColumnTypeExprSimpleContext(ColumnTypeExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnTypeExprSimpleContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnTypeExprSimple(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnTypeExprComplexContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnTypeExprComplexContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprComplexContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprComplexContext::columnTypeExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::ColumnTypeExprComplexContext::columnTypeExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprComplexContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprComplexContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprComplexContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::ColumnTypeExprComplexContext::ColumnTypeExprComplexContext(ColumnTypeExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnTypeExprComplexContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnTypeExprComplex(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnTypeExprEnumContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnTypeExprEnumContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprEnumContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprEnumContext::enumValue() { - return getRuleContexts(); -} - -ClickHouseParser::EnumValueContext* ClickHouseParser::ColumnTypeExprEnumContext::enumValue(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprEnumContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::ColumnTypeExprEnumContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnTypeExprEnumContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - -ClickHouseParser::ColumnTypeExprEnumContext::ColumnTypeExprEnumContext(ColumnTypeExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnTypeExprEnumContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnTypeExprEnum(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::columnTypeExpr() { - ColumnTypeExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 168, ClickHouseParser::RuleColumnTypeExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1563); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 211, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1516); - identifier(); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1517); - identifier(); - setState(1518); - match(ClickHouseParser::LPAREN); - setState(1519); - identifier(); - setState(1520); - columnTypeExpr(); - setState(1527); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1521); - match(ClickHouseParser::COMMA); - setState(1522); - identifier(); - setState(1523); - columnTypeExpr(); - setState(1529); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1530); - match(ClickHouseParser::RPAREN); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(1532); - identifier(); - setState(1533); - match(ClickHouseParser::LPAREN); - setState(1534); - enumValue(); - setState(1539); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1535); - match(ClickHouseParser::COMMA); - setState(1536); - enumValue(); - setState(1541); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1542); - match(ClickHouseParser::RPAREN); - break; - } - - case 4: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 4); - setState(1544); - identifier(); - setState(1545); - match(ClickHouseParser::LPAREN); - setState(1546); - columnTypeExpr(); - setState(1551); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1547); - match(ClickHouseParser::COMMA); - setState(1548); - columnTypeExpr(); - setState(1553); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1554); - match(ClickHouseParser::RPAREN); - break; - } - - case 5: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 5); - setState(1556); - identifier(); - setState(1557); - match(ClickHouseParser::LPAREN); - setState(1559); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(1558); - columnExprList(); - } - setState(1561); - match(ClickHouseParser::RPAREN); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnExprListContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprListContext::ColumnExprListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::ColumnExprListContext::columnsExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnsExprContext* ClickHouseParser::ColumnExprListContext::columnsExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::ColumnExprListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::ColumnExprListContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnExprList; -} - -antlrcpp::Any ClickHouseParser::ColumnExprListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::columnExprList() { - ColumnExprListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 170, ClickHouseParser::RuleColumnExprList); - - auto onExit = finally([=] { - exitRule(); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1565); - columnsExpr(); - setState(1570); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 212, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - setState(1566); - match(ClickHouseParser::COMMA); - setState(1567); - columnsExpr(); - } - setState(1572); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 212, _ctx); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnsExprContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnsExprContext::ColumnsExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ColumnsExprContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnsExpr; -} - -void ClickHouseParser::ColumnsExprContext::copyFrom(ColumnsExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ColumnsExprColumnContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnsExprColumnContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnsExprColumnContext::ColumnsExprColumnContext(ColumnsExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnsExprColumnContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnsExprColumn(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnsExprAsteriskContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnsExprAsteriskContext::ASTERISK() { - return getToken(ClickHouseParser::ASTERISK, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ColumnsExprAsteriskContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnsExprAsteriskContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - -ClickHouseParser::ColumnsExprAsteriskContext::ColumnsExprAsteriskContext(ColumnsExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnsExprAsteriskContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnsExprAsterisk(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnsExprSubqueryContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnsExprSubqueryContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::ColumnsExprSubqueryContext::selectUnionStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnsExprSubqueryContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnsExprSubqueryContext::ColumnsExprSubqueryContext(ColumnsExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnsExprSubqueryContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnsExprSubquery(this); - else - return visitor->visitChildren(this); -} -ClickHouseParser::ColumnsExprContext* ClickHouseParser::columnsExpr() { - ColumnsExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 172, ClickHouseParser::RuleColumnsExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1584); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 214, _ctx)) { - case 1: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 1); - setState(1576); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)))) != 0)) { - setState(1573); - tableIdentifier(); - setState(1574); - match(ClickHouseParser::DOT); - } - setState(1578); - match(ClickHouseParser::ASTERISK); - break; - } - - case 2: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 2); - setState(1579); - match(ClickHouseParser::LPAREN); - setState(1580); - selectUnionStmt(); - setState(1581); - match(ClickHouseParser::RPAREN); - break; - } - - case 3: { - _localctx = dynamic_cast(_tracker.createInstance(_localctx)); - enterOuterAlt(_localctx, 3); - setState(1583); - columnExpr(0); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnExprContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprContext::ColumnExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::ColumnExprContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnExpr; -} - -void ClickHouseParser::ColumnExprContext::copyFrom(ColumnExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- ColumnExprTernaryOpContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprTernaryOpContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprTernaryOpContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTernaryOpContext::QUERY() { - return getToken(ClickHouseParser::QUERY, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTernaryOpContext::COLON() { - return getToken(ClickHouseParser::COLON, 0); -} - -ClickHouseParser::ColumnExprTernaryOpContext::ColumnExprTernaryOpContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprTernaryOpContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprTernaryOp(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprAliasContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprAliasContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::AliasContext* ClickHouseParser::ColumnExprAliasContext::alias() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprAliasContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnExprAliasContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprAliasContext::ColumnExprAliasContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprAliasContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprAlias(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprExtractContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprExtractContext::EXTRACT() { - return getToken(ClickHouseParser::EXTRACT, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprExtractContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::IntervalContext* ClickHouseParser::ColumnExprExtractContext::interval() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprExtractContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprExtractContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprExtractContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprExtractContext::ColumnExprExtractContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprExtractContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprExtract(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprNegateContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprNegateContext::DASH() { - return getToken(ClickHouseParser::DASH, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprNegateContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprNegateContext::ColumnExprNegateContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprNegateContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprNegate(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprSubqueryContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprSubqueryContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::ColumnExprSubqueryContext::selectUnionStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprSubqueryContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprSubqueryContext::ColumnExprSubqueryContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprSubqueryContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprSubquery(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprLiteralContext ------------------------------------------------------------------ - -ClickHouseParser::LiteralContext* ClickHouseParser::ColumnExprLiteralContext::literal() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprLiteralContext::ColumnExprLiteralContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprLiteralContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprLiteral(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprArrayContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprArrayContext::LBRACKET() { - return getToken(ClickHouseParser::LBRACKET, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprArrayContext::RBRACKET() { - return getToken(ClickHouseParser::RBRACKET, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ColumnExprArrayContext::columnExprList() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprArrayContext::ColumnExprArrayContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprArrayContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprArray(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprSubstringContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprSubstringContext::SUBSTRING() { - return getToken(ClickHouseParser::SUBSTRING, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprSubstringContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnExprSubstringContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprSubstringContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprSubstringContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprSubstringContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprSubstringContext::FOR() { - return getToken(ClickHouseParser::FOR, 0); -} - -ClickHouseParser::ColumnExprSubstringContext::ColumnExprSubstringContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprSubstringContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprSubstring(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprCastContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprCastContext::CAST() { - return getToken(ClickHouseParser::CAST, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCastContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprCastContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCastContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::ColumnTypeExprContext* ClickHouseParser::ColumnExprCastContext::columnTypeExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCastContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprCastContext::ColumnExprCastContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprCastContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprCast(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprOrContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprOrContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprOrContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprOrContext::OR() { - return getToken(ClickHouseParser::OR, 0); -} - -ClickHouseParser::ColumnExprOrContext::ColumnExprOrContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprOrContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprOr(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprPrecedence1Context ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprPrecedence1Context::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprPrecedence1Context::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence1Context::ASTERISK() { - return getToken(ClickHouseParser::ASTERISK, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence1Context::SLASH() { - return getToken(ClickHouseParser::SLASH, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence1Context::PERCENT() { - return getToken(ClickHouseParser::PERCENT, 0); -} - -ClickHouseParser::ColumnExprPrecedence1Context::ColumnExprPrecedence1Context(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprPrecedence1Context::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprPrecedence1(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprPrecedence2Context ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprPrecedence2Context::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprPrecedence2Context::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence2Context::PLUS() { - return getToken(ClickHouseParser::PLUS, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence2Context::DASH() { - return getToken(ClickHouseParser::DASH, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence2Context::CONCAT() { - return getToken(ClickHouseParser::CONCAT, 0); -} - -ClickHouseParser::ColumnExprPrecedence2Context::ColumnExprPrecedence2Context(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprPrecedence2Context::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprPrecedence2(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprPrecedence3Context ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprPrecedence3Context::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprPrecedence3Context::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::EQ_DOUBLE() { - return getToken(ClickHouseParser::EQ_DOUBLE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::EQ_SINGLE() { - return getToken(ClickHouseParser::EQ_SINGLE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::NOT_EQ() { - return getToken(ClickHouseParser::NOT_EQ, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::LE() { - return getToken(ClickHouseParser::LE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::GE() { - return getToken(ClickHouseParser::GE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::LT() { - return getToken(ClickHouseParser::LT, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::GT() { - return getToken(ClickHouseParser::GT, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::LIKE() { - return getToken(ClickHouseParser::LIKE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::ILIKE() { - return getToken(ClickHouseParser::ILIKE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::GLOBAL() { - return getToken(ClickHouseParser::GLOBAL, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprPrecedence3Context::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -ClickHouseParser::ColumnExprPrecedence3Context::ColumnExprPrecedence3Context(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprPrecedence3Context::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprPrecedence3(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprIntervalContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprIntervalContext::INTERVAL() { - return getToken(ClickHouseParser::INTERVAL, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprIntervalContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::IntervalContext* ClickHouseParser::ColumnExprIntervalContext::interval() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprIntervalContext::ColumnExprIntervalContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprIntervalContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprInterval(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprIsNullContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprIsNullContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprIsNullContext::IS() { - return getToken(ClickHouseParser::IS, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprIsNullContext::NULL_SQL() { - return getToken(ClickHouseParser::NULL_SQL, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprIsNullContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -ClickHouseParser::ColumnExprIsNullContext::ColumnExprIsNullContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprIsNullContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprIsNull(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprTrimContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::TRIM() { - return getToken(ClickHouseParser::TRIM, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprTrimContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::BOTH() { - return getToken(ClickHouseParser::BOTH, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::LEADING() { - return getToken(ClickHouseParser::LEADING, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTrimContext::TRAILING() { - return getToken(ClickHouseParser::TRAILING, 0); -} - -ClickHouseParser::ColumnExprTrimContext::ColumnExprTrimContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprTrimContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprTrim(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprTupleContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprTupleContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ColumnExprTupleContext::columnExprList() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTupleContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprTupleContext::ColumnExprTupleContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprTupleContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprTuple(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprArrayAccessContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprArrayAccessContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprArrayAccessContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprArrayAccessContext::LBRACKET() { - return getToken(ClickHouseParser::LBRACKET, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprArrayAccessContext::RBRACKET() { - return getToken(ClickHouseParser::RBRACKET, 0); -} - -ClickHouseParser::ColumnExprArrayAccessContext::ColumnExprArrayAccessContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprArrayAccessContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprArrayAccess(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprBetweenContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprBetweenContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprBetweenContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprBetweenContext::BETWEEN() { - return getToken(ClickHouseParser::BETWEEN, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprBetweenContext::AND() { - return getToken(ClickHouseParser::AND, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprBetweenContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -ClickHouseParser::ColumnExprBetweenContext::ColumnExprBetweenContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprBetweenContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprBetween(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprParensContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprParensContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprParensContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprParensContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::ColumnExprParensContext::ColumnExprParensContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprParensContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprParens(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprTimestampContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprTimestampContext::TIMESTAMP() { - return getToken(ClickHouseParser::TIMESTAMP, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTimestampContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -ClickHouseParser::ColumnExprTimestampContext::ColumnExprTimestampContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprTimestampContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprTimestamp(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprAndContext ------------------------------------------------------------------ - -std::vector ClickHouseParser::ColumnExprAndContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprAndContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprAndContext::AND() { - return getToken(ClickHouseParser::AND, 0); -} - -ClickHouseParser::ColumnExprAndContext::ColumnExprAndContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprAndContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprAnd(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprTupleAccessContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprTupleAccessContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTupleAccessContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprTupleAccessContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - -ClickHouseParser::ColumnExprTupleAccessContext::ColumnExprTupleAccessContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprTupleAccessContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprTupleAccess(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprCaseContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprCaseContext::CASE() { - return getToken(ClickHouseParser::CASE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCaseContext::END() { - return getToken(ClickHouseParser::END, 0); -} - -std::vector ClickHouseParser::ColumnExprCaseContext::columnExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprCaseContext::columnExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::ColumnExprCaseContext::WHEN() { - return getTokens(ClickHouseParser::WHEN); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCaseContext::WHEN(size_t i) { - return getToken(ClickHouseParser::WHEN, i); -} - -std::vector ClickHouseParser::ColumnExprCaseContext::THEN() { - return getTokens(ClickHouseParser::THEN); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCaseContext::THEN(size_t i) { - return getToken(ClickHouseParser::THEN, i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprCaseContext::ELSE() { - return getToken(ClickHouseParser::ELSE, 0); -} - -ClickHouseParser::ColumnExprCaseContext::ColumnExprCaseContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprCaseContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprCase(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprDateContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprDateContext::DATE() { - return getToken(ClickHouseParser::DATE, 0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprDateContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -ClickHouseParser::ColumnExprDateContext::ColumnExprDateContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprDateContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprDate(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprNotContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprNotContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnExprNotContext::columnExpr() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprNotContext::ColumnExprNotContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprNotContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprNot(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnIdentifierContext* ClickHouseParser::ColumnExprIdentifierContext::columnIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprIdentifierContext::ColumnExprIdentifierContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprIdentifier(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprFunctionContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnExprFunctionContext::identifier() { - return getRuleContext(0); -} - -std::vector ClickHouseParser::ColumnExprFunctionContext::LPAREN() { - return getTokens(ClickHouseParser::LPAREN); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprFunctionContext::LPAREN(size_t i) { - return getToken(ClickHouseParser::LPAREN, i); -} - -std::vector ClickHouseParser::ColumnExprFunctionContext::RPAREN() { - return getTokens(ClickHouseParser::RPAREN); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprFunctionContext::RPAREN(size_t i) { - return getToken(ClickHouseParser::RPAREN, i); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprFunctionContext::DISTINCT() { - return getToken(ClickHouseParser::DISTINCT, 0); -} - -ClickHouseParser::ColumnArgListContext* ClickHouseParser::ColumnExprFunctionContext::columnArgList() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprListContext* ClickHouseParser::ColumnExprFunctionContext::columnExprList() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprFunctionContext::ColumnExprFunctionContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprFunctionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprFunction(this); - else - return visitor->visitChildren(this); -} -//----------------- ColumnExprAsteriskContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::ColumnExprAsteriskContext::ASTERISK() { - return getToken(ClickHouseParser::ASTERISK, 0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ColumnExprAsteriskContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnExprAsteriskContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - -ClickHouseParser::ColumnExprAsteriskContext::ColumnExprAsteriskContext(ColumnExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::ColumnExprAsteriskContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnExprAsterisk(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::columnExpr() { - return columnExpr(0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::columnExpr(int precedence) { - ParserRuleContext *parentContext = _ctx; - size_t parentState = getState(); - ClickHouseParser::ColumnExprContext *_localctx = _tracker.createInstance(_ctx, parentState); - ClickHouseParser::ColumnExprContext *previousContext = _localctx; - (void)previousContext; // Silence compiler, in case the context is not used by generated code. - size_t startState = 174; - enterRecursionRule(_localctx, 174, ClickHouseParser::RuleColumnExpr, precedence); - - size_t _la = 0; - - auto onExit = finally([=] { - unrollRecursionContexts(parentContext); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1693); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 225, _ctx)) { - case 1: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - - setState(1587); - match(ClickHouseParser::CASE); - setState(1589); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 215, _ctx)) { - case 1: { - setState(1588); - columnExpr(0); - break; - } - - } - setState(1596); - _errHandler->sync(this); - _la = _input->LA(1); - do { - setState(1591); - match(ClickHouseParser::WHEN); - setState(1592); - columnExpr(0); - setState(1593); - match(ClickHouseParser::THEN); - setState(1594); - columnExpr(0); - setState(1598); - _errHandler->sync(this); - _la = _input->LA(1); - } while (_la == ClickHouseParser::WHEN); - setState(1602); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::ELSE) { - setState(1600); - match(ClickHouseParser::ELSE); - setState(1601); - columnExpr(0); - } - setState(1604); - match(ClickHouseParser::END); - break; - } - - case 2: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1606); - match(ClickHouseParser::CAST); - setState(1607); - match(ClickHouseParser::LPAREN); - setState(1608); - columnExpr(0); - setState(1609); - match(ClickHouseParser::AS); - setState(1610); - columnTypeExpr(); - setState(1611); - match(ClickHouseParser::RPAREN); - break; - } - - case 3: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1613); - match(ClickHouseParser::DATE); - setState(1614); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 4: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1615); - match(ClickHouseParser::EXTRACT); - setState(1616); - match(ClickHouseParser::LPAREN); - setState(1617); - interval(); - setState(1618); - match(ClickHouseParser::FROM); - setState(1619); - columnExpr(0); - setState(1620); - match(ClickHouseParser::RPAREN); - break; - } - - case 5: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1622); - match(ClickHouseParser::INTERVAL); - setState(1623); - columnExpr(0); - setState(1624); - interval(); - break; - } - - case 6: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1626); - match(ClickHouseParser::SUBSTRING); - setState(1627); - match(ClickHouseParser::LPAREN); - setState(1628); - columnExpr(0); - setState(1629); - match(ClickHouseParser::FROM); - setState(1630); - columnExpr(0); - setState(1633); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::FOR) { - setState(1631); - match(ClickHouseParser::FOR); - setState(1632); - columnExpr(0); - } - setState(1635); - match(ClickHouseParser::RPAREN); - break; - } - - case 7: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1637); - match(ClickHouseParser::TIMESTAMP); - setState(1638); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case 8: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1639); - match(ClickHouseParser::TRIM); - setState(1640); - match(ClickHouseParser::LPAREN); - setState(1641); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::BOTH || _la == ClickHouseParser::LEADING || _la == ClickHouseParser::TRAILING)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1642); - match(ClickHouseParser::STRING_LITERAL); - setState(1643); - match(ClickHouseParser::FROM); - setState(1644); - columnExpr(0); - setState(1645); - match(ClickHouseParser::RPAREN); - break; - } - - case 9: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1647); - identifier(); - setState(1653); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 220, _ctx)) { - case 1: { - setState(1648); - match(ClickHouseParser::LPAREN); - setState(1650); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(1649); - columnExprList(); - } - setState(1652); - match(ClickHouseParser::RPAREN); - break; - } - - } - setState(1655); - match(ClickHouseParser::LPAREN); - setState(1657); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 221, _ctx)) { - case 1: { - setState(1656); - match(ClickHouseParser::DISTINCT); - break; - } - - } - setState(1660); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(1659); - columnArgList(); - } - setState(1662); - match(ClickHouseParser::RPAREN); - break; - } - - case 10: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1664); - literal(); - break; - } - - case 11: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1665); - match(ClickHouseParser::DASH); - setState(1666); - columnExpr(17); - break; - } - - case 12: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1667); - match(ClickHouseParser::NOT); - setState(1668); - columnExpr(12); - break; - } - - case 13: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1672); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)))) != 0)) { - setState(1669); - tableIdentifier(); - setState(1670); - match(ClickHouseParser::DOT); - } - setState(1674); - match(ClickHouseParser::ASTERISK); - break; - } - - case 14: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1675); - match(ClickHouseParser::LPAREN); - setState(1676); - selectUnionStmt(); - setState(1677); - match(ClickHouseParser::RPAREN); - break; - } - - case 15: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1679); - match(ClickHouseParser::LPAREN); - setState(1680); - columnExpr(0); - setState(1681); - match(ClickHouseParser::RPAREN); - break; - } - - case 16: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1683); - match(ClickHouseParser::LPAREN); - setState(1684); - columnExprList(); - setState(1685); - match(ClickHouseParser::RPAREN); - break; - } - - case 17: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1687); - match(ClickHouseParser::LBRACKET); - setState(1689); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::ASTERISK - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::LBRACKET - 197)) - | (1ULL << (ClickHouseParser::LPAREN - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(1688); - columnExprList(); - } - setState(1691); - match(ClickHouseParser::RBRACKET); - break; - } - - case 18: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1692); - columnIdentifier(); - break; - } - - } - _ctx->stop = _input->LT(-1); - setState(1766); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 234, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - if (!_parseListeners.empty()) - triggerExitRuleEvent(); - previousContext = _localctx; - setState(1764); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 233, _ctx)) { - case 1: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1695); - - if (!(precpred(_ctx, 16))) throw FailedPredicateException(this, "precpred(_ctx, 16)"); - setState(1696); - _la = _input->LA(1); - if (!(((((_la - 191) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 191)) & ((1ULL << (ClickHouseParser::ASTERISK - 191)) - | (1ULL << (ClickHouseParser::PERCENT - 191)) - | (1ULL << (ClickHouseParser::SLASH - 191)))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1697); - columnExpr(17); - break; - } - - case 2: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1698); - - if (!(precpred(_ctx, 15))) throw FailedPredicateException(this, "precpred(_ctx, 15)"); - setState(1699); - _la = _input->LA(1); - if (!(((((_la - 196) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 196)) & ((1ULL << (ClickHouseParser::CONCAT - 196)) - | (1ULL << (ClickHouseParser::DASH - 196)) - | (1ULL << (ClickHouseParser::PLUS - 196)))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - setState(1700); - columnExpr(16); - break; - } - - case 3: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1701); - - if (!(precpred(_ctx, 14))) throw FailedPredicateException(this, "precpred(_ctx, 14)"); - setState(1720); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 229, _ctx)) { - case 1: { - setState(1702); - match(ClickHouseParser::EQ_DOUBLE); - break; - } - - case 2: { - setState(1703); - match(ClickHouseParser::EQ_SINGLE); - break; - } - - case 3: { - setState(1704); - match(ClickHouseParser::NOT_EQ); - break; - } - - case 4: { - setState(1705); - match(ClickHouseParser::LE); - break; - } - - case 5: { - setState(1706); - match(ClickHouseParser::GE); - break; - } - - case 6: { - setState(1707); - match(ClickHouseParser::LT); - break; - } - - case 7: { - setState(1708); - match(ClickHouseParser::GT); - break; - } - - case 8: { - setState(1710); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::GLOBAL) { - setState(1709); - match(ClickHouseParser::GLOBAL); - } - setState(1713); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::NOT) { - setState(1712); - match(ClickHouseParser::NOT); - } - setState(1715); - match(ClickHouseParser::IN); - break; - } - - case 9: { - setState(1717); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::NOT) { - setState(1716); - match(ClickHouseParser::NOT); - } - setState(1719); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::ILIKE - - || _la == ClickHouseParser::LIKE)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - setState(1722); - columnExpr(15); - break; - } - - case 4: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1723); - - if (!(precpred(_ctx, 11))) throw FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(1724); - match(ClickHouseParser::AND); - setState(1725); - columnExpr(12); - break; - } - - case 5: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1726); - - if (!(precpred(_ctx, 10))) throw FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(1727); - match(ClickHouseParser::OR); - setState(1728); - columnExpr(11); - break; - } - - case 6: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1729); - - if (!(precpred(_ctx, 9))) throw FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(1731); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::NOT) { - setState(1730); - match(ClickHouseParser::NOT); - } - setState(1733); - match(ClickHouseParser::BETWEEN); - setState(1734); - columnExpr(0); - setState(1735); - match(ClickHouseParser::AND); - setState(1736); - columnExpr(10); - break; - } - - case 7: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1738); - - if (!(precpred(_ctx, 8))) throw FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(1739); - match(ClickHouseParser::QUERY); - setState(1740); - columnExpr(0); - setState(1741); - match(ClickHouseParser::COLON); - setState(1742); - columnExpr(8); - break; - } - - case 8: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1744); - - if (!(precpred(_ctx, 19))) throw FailedPredicateException(this, "precpred(_ctx, 19)"); - setState(1745); - match(ClickHouseParser::LBRACKET); - setState(1746); - columnExpr(0); - setState(1747); - match(ClickHouseParser::RBRACKET); - break; - } - - case 9: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1749); - - if (!(precpred(_ctx, 18))) throw FailedPredicateException(this, "precpred(_ctx, 18)"); - setState(1750); - match(ClickHouseParser::DOT); - setState(1751); - match(ClickHouseParser::DECIMAL_LITERAL); - break; - } - - case 10: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1752); - - if (!(precpred(_ctx, 13))) throw FailedPredicateException(this, "precpred(_ctx, 13)"); - setState(1753); - match(ClickHouseParser::IS); - setState(1755); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::NOT) { - setState(1754); - match(ClickHouseParser::NOT); - } - setState(1757); - match(ClickHouseParser::NULL_SQL); - break; - } - - case 11: { - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleColumnExpr); - setState(1758); - - if (!(precpred(_ctx, 7))) throw FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(1762); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::DATE: - case ClickHouseParser::FIRST: - case ClickHouseParser::ID: - case ClickHouseParser::KEY: - case ClickHouseParser::IDENTIFIER: { - setState(1759); - alias(); - break; - } - - case ClickHouseParser::AS: { - setState(1760); - match(ClickHouseParser::AS); - setState(1761); - identifier(); - break; - } - - default: - throw NoViableAltException(this); - } - break; - } - - } - } - setState(1768); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 234, _ctx); - } - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - return _localctx; -} - -//----------------- ColumnArgListContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnArgListContext::ColumnArgListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::ColumnArgListContext::columnArgExpr() { - return getRuleContexts(); -} - -ClickHouseParser::ColumnArgExprContext* ClickHouseParser::ColumnArgListContext::columnArgExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::ColumnArgListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnArgListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::ColumnArgListContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnArgList; -} - -antlrcpp::Any ClickHouseParser::ColumnArgListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnArgList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnArgListContext* ClickHouseParser::columnArgList() { - ColumnArgListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 176, ClickHouseParser::RuleColumnArgList); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1769); - columnArgExpr(); - setState(1774); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1770); - match(ClickHouseParser::COMMA); - setState(1771); - columnArgExpr(); - setState(1776); - _errHandler->sync(this); - _la = _input->LA(1); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnArgExprContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnArgExprContext::ColumnArgExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::ColumnLambdaExprContext* ClickHouseParser::ColumnArgExprContext::columnLambdaExpr() { - return getRuleContext(0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnArgExprContext::columnExpr() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::ColumnArgExprContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnArgExpr; -} - -antlrcpp::Any ClickHouseParser::ColumnArgExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnArgExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnArgExprContext* ClickHouseParser::columnArgExpr() { - ColumnArgExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 178, ClickHouseParser::RuleColumnArgExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1779); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 236, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(1777); - columnLambdaExpr(); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(1778); - columnExpr(0); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnLambdaExprContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnLambdaExprContext::ColumnLambdaExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::ColumnLambdaExprContext::ARROW() { - return getToken(ClickHouseParser::ARROW, 0); -} - -ClickHouseParser::ColumnExprContext* ClickHouseParser::ColumnLambdaExprContext::columnExpr() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnLambdaExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -std::vector ClickHouseParser::ColumnLambdaExprContext::identifier() { - return getRuleContexts(); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::ColumnLambdaExprContext::identifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::ColumnLambdaExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -std::vector ClickHouseParser::ColumnLambdaExprContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::ColumnLambdaExprContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::ColumnLambdaExprContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnLambdaExpr; -} - -antlrcpp::Any ClickHouseParser::ColumnLambdaExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnLambdaExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnLambdaExprContext* ClickHouseParser::columnLambdaExpr() { - ColumnLambdaExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 180, ClickHouseParser::RuleColumnLambdaExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1800); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::LPAREN: { - setState(1781); - match(ClickHouseParser::LPAREN); - setState(1782); - identifier(); - setState(1787); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1783); - match(ClickHouseParser::COMMA); - setState(1784); - identifier(); - setState(1789); - _errHandler->sync(this); - _la = _input->LA(1); - } - setState(1790); - match(ClickHouseParser::RPAREN); - break; - } - - case ClickHouseParser::AFTER: - case ClickHouseParser::ALIAS: - case ClickHouseParser::ALL: - case ClickHouseParser::ALTER: - case ClickHouseParser::AND: - case ClickHouseParser::ANTI: - case ClickHouseParser::ANY: - case ClickHouseParser::ARRAY: - case ClickHouseParser::AS: - case ClickHouseParser::ASCENDING: - case ClickHouseParser::ASOF: - case ClickHouseParser::AST: - case ClickHouseParser::ASYNC: - case ClickHouseParser::ATTACH: - case ClickHouseParser::BETWEEN: - case ClickHouseParser::BOTH: - case ClickHouseParser::BY: - case ClickHouseParser::CASE: - case ClickHouseParser::CAST: - case ClickHouseParser::CHECK: - case ClickHouseParser::CLEAR: - case ClickHouseParser::CLUSTER: - case ClickHouseParser::CODEC: - case ClickHouseParser::COLLATE: - case ClickHouseParser::COLUMN: - case ClickHouseParser::COMMENT: - case ClickHouseParser::CONSTRAINT: - case ClickHouseParser::CREATE: - case ClickHouseParser::CROSS: - case ClickHouseParser::CUBE: - case ClickHouseParser::DATABASE: - case ClickHouseParser::DATABASES: - case ClickHouseParser::DATE: - case ClickHouseParser::DAY: - case ClickHouseParser::DEDUPLICATE: - case ClickHouseParser::DEFAULT: - case ClickHouseParser::DELAY: - case ClickHouseParser::DELETE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCENDING: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DICTIONARIES: - case ClickHouseParser::DICTIONARY: - case ClickHouseParser::DISK: - case ClickHouseParser::DISTINCT: - case ClickHouseParser::DISTRIBUTED: - case ClickHouseParser::DROP: - case ClickHouseParser::ELSE: - case ClickHouseParser::END: - case ClickHouseParser::ENGINE: - case ClickHouseParser::EVENTS: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::EXPRESSION: - case ClickHouseParser::EXTRACT: - case ClickHouseParser::FETCHES: - case ClickHouseParser::FINAL: - case ClickHouseParser::FIRST: - case ClickHouseParser::FLUSH: - case ClickHouseParser::FOR: - case ClickHouseParser::FORMAT: - case ClickHouseParser::FREEZE: - case ClickHouseParser::FROM: - case ClickHouseParser::FULL: - case ClickHouseParser::FUNCTION: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::GRANULARITY: - case ClickHouseParser::GROUP: - case ClickHouseParser::HAVING: - case ClickHouseParser::HIERARCHICAL: - case ClickHouseParser::HOUR: - case ClickHouseParser::ID: - case ClickHouseParser::IF: - case ClickHouseParser::ILIKE: - case ClickHouseParser::IN: - case ClickHouseParser::INDEX: - case ClickHouseParser::INJECTIVE: - case ClickHouseParser::INNER: - case ClickHouseParser::INSERT: - case ClickHouseParser::INTERVAL: - case ClickHouseParser::INTO: - case ClickHouseParser::IS: - case ClickHouseParser::IS_OBJECT_ID: - case ClickHouseParser::JOIN: - case ClickHouseParser::KEY: - case ClickHouseParser::KILL: - case ClickHouseParser::LAST: - case ClickHouseParser::LAYOUT: - case ClickHouseParser::LEADING: - case ClickHouseParser::LEFT: - case ClickHouseParser::LIFETIME: - case ClickHouseParser::LIKE: - case ClickHouseParser::LIMIT: - case ClickHouseParser::LIVE: - case ClickHouseParser::LOCAL: - case ClickHouseParser::LOGS: - case ClickHouseParser::MATERIALIZE: - case ClickHouseParser::MATERIALIZED: - case ClickHouseParser::MAX: - case ClickHouseParser::MERGES: - case ClickHouseParser::MIN: - case ClickHouseParser::MINUTE: - case ClickHouseParser::MODIFY: - case ClickHouseParser::MONTH: - case ClickHouseParser::MOVE: - case ClickHouseParser::MUTATION: - case ClickHouseParser::NO: - case ClickHouseParser::NOT: - case ClickHouseParser::NULLS: - case ClickHouseParser::OFFSET: - case ClickHouseParser::ON: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::OR: - case ClickHouseParser::ORDER: - case ClickHouseParser::OUTER: - case ClickHouseParser::OUTFILE: - case ClickHouseParser::PARTITION: - case ClickHouseParser::POPULATE: - case ClickHouseParser::PREWHERE: - case ClickHouseParser::PRIMARY: - case ClickHouseParser::QUARTER: - case ClickHouseParser::RANGE: - case ClickHouseParser::RELOAD: - case ClickHouseParser::REMOVE: - case ClickHouseParser::RENAME: - case ClickHouseParser::REPLACE: - case ClickHouseParser::REPLICA: - case ClickHouseParser::REPLICATED: - case ClickHouseParser::RIGHT: - case ClickHouseParser::ROLLUP: - case ClickHouseParser::SAMPLE: - case ClickHouseParser::SECOND: - case ClickHouseParser::SELECT: - case ClickHouseParser::SEMI: - case ClickHouseParser::SENDS: - case ClickHouseParser::SET: - case ClickHouseParser::SETTINGS: - case ClickHouseParser::SHOW: - case ClickHouseParser::SOURCE: - case ClickHouseParser::START: - case ClickHouseParser::STOP: - case ClickHouseParser::SUBSTRING: - case ClickHouseParser::SYNC: - case ClickHouseParser::SYNTAX: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TABLE: - case ClickHouseParser::TABLES: - case ClickHouseParser::TEMPORARY: - case ClickHouseParser::TEST: - case ClickHouseParser::THEN: - case ClickHouseParser::TIES: - case ClickHouseParser::TIMEOUT: - case ClickHouseParser::TIMESTAMP: - case ClickHouseParser::TO: - case ClickHouseParser::TOP: - case ClickHouseParser::TOTALS: - case ClickHouseParser::TRAILING: - case ClickHouseParser::TRIM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::TTL: - case ClickHouseParser::TYPE: - case ClickHouseParser::UNION: - case ClickHouseParser::UPDATE: - case ClickHouseParser::USE: - case ClickHouseParser::USING: - case ClickHouseParser::UUID: - case ClickHouseParser::VALUES: - case ClickHouseParser::VIEW: - case ClickHouseParser::VOLUME: - case ClickHouseParser::WATCH: - case ClickHouseParser::WEEK: - case ClickHouseParser::WHEN: - case ClickHouseParser::WHERE: - case ClickHouseParser::WITH: - case ClickHouseParser::YEAR: - case ClickHouseParser::JSON_FALSE: - case ClickHouseParser::JSON_TRUE: - case ClickHouseParser::IDENTIFIER: { - setState(1792); - identifier(); - setState(1797); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1793); - match(ClickHouseParser::COMMA); - setState(1794); - identifier(); - setState(1799); - _errHandler->sync(this); - _la = _input->LA(1); - } - break; - } - - default: - throw NoViableAltException(this); - } - setState(1802); - match(ClickHouseParser::ARROW); - setState(1803); - columnExpr(0); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- ColumnIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::ColumnIdentifierContext::ColumnIdentifierContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::ColumnIdentifierContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::ColumnIdentifierContext::tableIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::ColumnIdentifierContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - - -size_t ClickHouseParser::ColumnIdentifierContext::getRuleIndex() const { - return ClickHouseParser::RuleColumnIdentifier; -} - -antlrcpp::Any ClickHouseParser::ColumnIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitColumnIdentifier(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::ColumnIdentifierContext* ClickHouseParser::columnIdentifier() { - ColumnIdentifierContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 182, ClickHouseParser::RuleColumnIdentifier); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1808); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 240, _ctx)) { - case 1: { - setState(1805); - tableIdentifier(); - setState(1806); - match(ClickHouseParser::DOT); - break; - } - - } - setState(1810); - nestedIdentifier(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- NestedIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::NestedIdentifierContext::NestedIdentifierContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::NestedIdentifierContext::identifier() { - return getRuleContexts(); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::NestedIdentifierContext::identifier(size_t i) { - return getRuleContext(i); -} - -tree::TerminalNode* ClickHouseParser::NestedIdentifierContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - - -size_t ClickHouseParser::NestedIdentifierContext::getRuleIndex() const { - return ClickHouseParser::RuleNestedIdentifier; -} - -antlrcpp::Any ClickHouseParser::NestedIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitNestedIdentifier(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::nestedIdentifier() { - NestedIdentifierContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 184, ClickHouseParser::RuleNestedIdentifier); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1812); - identifier(); - setState(1815); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 241, _ctx)) { - case 1: { - setState(1813); - match(ClickHouseParser::DOT); - setState(1814); - identifier(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableExprContext ------------------------------------------------------------------ - -ClickHouseParser::TableExprContext::TableExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - - -size_t ClickHouseParser::TableExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTableExpr; -} - -void ClickHouseParser::TableExprContext::copyFrom(TableExprContext *ctx) { - ParserRuleContext::copyFrom(ctx); -} - -//----------------- TableExprIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::TableExprIdentifierContext::tableIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::TableExprIdentifierContext::TableExprIdentifierContext(TableExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableExprIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableExprIdentifier(this); - else - return visitor->visitChildren(this); -} -//----------------- TableExprSubqueryContext ------------------------------------------------------------------ - -tree::TerminalNode* ClickHouseParser::TableExprSubqueryContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -ClickHouseParser::SelectUnionStmtContext* ClickHouseParser::TableExprSubqueryContext::selectUnionStmt() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableExprSubqueryContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::TableExprSubqueryContext::TableExprSubqueryContext(TableExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableExprSubqueryContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableExprSubquery(this); - else - return visitor->visitChildren(this); -} -//----------------- TableExprAliasContext ------------------------------------------------------------------ - -ClickHouseParser::TableExprContext* ClickHouseParser::TableExprAliasContext::tableExpr() { - return getRuleContext(0); -} - -ClickHouseParser::AliasContext* ClickHouseParser::TableExprAliasContext::alias() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableExprAliasContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::TableExprAliasContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::TableExprAliasContext::TableExprAliasContext(TableExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableExprAliasContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableExprAlias(this); - else - return visitor->visitChildren(this); -} -//----------------- TableExprFunctionContext ------------------------------------------------------------------ - -ClickHouseParser::TableFunctionExprContext* ClickHouseParser::TableExprFunctionContext::tableFunctionExpr() { - return getRuleContext(0); -} - -ClickHouseParser::TableExprFunctionContext::TableExprFunctionContext(TableExprContext *ctx) { copyFrom(ctx); } - -antlrcpp::Any ClickHouseParser::TableExprFunctionContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableExprFunction(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableExprContext* ClickHouseParser::tableExpr() { - return tableExpr(0); -} - -ClickHouseParser::TableExprContext* ClickHouseParser::tableExpr(int precedence) { - ParserRuleContext *parentContext = _ctx; - size_t parentState = getState(); - ClickHouseParser::TableExprContext *_localctx = _tracker.createInstance(_ctx, parentState); - ClickHouseParser::TableExprContext *previousContext = _localctx; - (void)previousContext; // Silence compiler, in case the context is not used by generated code. - size_t startState = 186; - enterRecursionRule(_localctx, 186, ClickHouseParser::RuleTableExpr, precedence); - - - - auto onExit = finally([=] { - unrollRecursionContexts(parentContext); - }); - try { - size_t alt; - enterOuterAlt(_localctx, 1); - setState(1824); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 242, _ctx)) { - case 1: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - - setState(1818); - tableIdentifier(); - break; - } - - case 2: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1819); - tableFunctionExpr(); - break; - } - - case 3: { - _localctx = _tracker.createInstance(_localctx); - _ctx = _localctx; - previousContext = _localctx; - setState(1820); - match(ClickHouseParser::LPAREN); - setState(1821); - selectUnionStmt(); - setState(1822); - match(ClickHouseParser::RPAREN); - break; - } - - } - _ctx->stop = _input->LT(-1); - setState(1834); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 244, _ctx); - while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) { - if (alt == 1) { - if (!_parseListeners.empty()) - triggerExitRuleEvent(); - previousContext = _localctx; - auto newContext = _tracker.createInstance(_tracker.createInstance(parentContext, parentState)); - _localctx = newContext; - pushNewRecursionContext(newContext, startState, RuleTableExpr); - setState(1826); - - if (!(precpred(_ctx, 1))) throw FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(1830); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::DATE: - case ClickHouseParser::FIRST: - case ClickHouseParser::ID: - case ClickHouseParser::KEY: - case ClickHouseParser::IDENTIFIER: { - setState(1827); - alias(); - break; - } - - case ClickHouseParser::AS: { - setState(1828); - match(ClickHouseParser::AS); - setState(1829); - identifier(); - break; - } - - default: - throw NoViableAltException(this); - } - } - setState(1836); - _errHandler->sync(this); - alt = getInterpreter()->adaptivePredict(_input, 244, _ctx); - } - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - return _localctx; -} - -//----------------- TableFunctionExprContext ------------------------------------------------------------------ - -ClickHouseParser::TableFunctionExprContext::TableFunctionExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::TableFunctionExprContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableFunctionExprContext::LPAREN() { - return getToken(ClickHouseParser::LPAREN, 0); -} - -tree::TerminalNode* ClickHouseParser::TableFunctionExprContext::RPAREN() { - return getToken(ClickHouseParser::RPAREN, 0); -} - -ClickHouseParser::TableArgListContext* ClickHouseParser::TableFunctionExprContext::tableArgList() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::TableFunctionExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTableFunctionExpr; -} - -antlrcpp::Any ClickHouseParser::TableFunctionExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableFunctionExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableFunctionExprContext* ClickHouseParser::tableFunctionExpr() { - TableFunctionExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 188, ClickHouseParser::RuleTableFunctionExpr); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1837); - identifier(); - setState(1838); - match(ClickHouseParser::LPAREN); - setState(1840); - _errHandler->sync(this); - - _la = _input->LA(1); - if ((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DAY) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::HOUR - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INF - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MINUTE - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MONTH - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NAN_SQL - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULL_SQL - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)) - | (1ULL << (ClickHouseParser::QUARTER - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SECOND - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WEEK - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::YEAR - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)) - | (1ULL << (ClickHouseParser::IDENTIFIER - 128)) - | (1ULL << (ClickHouseParser::FLOATING_LITERAL - 128)) - | (1ULL << (ClickHouseParser::OCTAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::DECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::HEXADECIMAL_LITERAL - 128)) - | (1ULL << (ClickHouseParser::STRING_LITERAL - 128)))) != 0) || ((((_la - 197) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 197)) & ((1ULL << (ClickHouseParser::DASH - 197)) - | (1ULL << (ClickHouseParser::DOT - 197)) - | (1ULL << (ClickHouseParser::PLUS - 197)))) != 0)) { - setState(1839); - tableArgList(); - } - setState(1842); - match(ClickHouseParser::RPAREN); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::TableIdentifierContext::TableIdentifierContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::TableIdentifierContext::identifier() { - return getRuleContext(0); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::TableIdentifierContext::databaseIdentifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::TableIdentifierContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - - -size_t ClickHouseParser::TableIdentifierContext::getRuleIndex() const { - return ClickHouseParser::RuleTableIdentifier; -} - -antlrcpp::Any ClickHouseParser::TableIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableIdentifier(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableIdentifierContext* ClickHouseParser::tableIdentifier() { - TableIdentifierContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 190, ClickHouseParser::RuleTableIdentifier); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1847); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 246, _ctx)) { - case 1: { - setState(1844); - databaseIdentifier(); - setState(1845); - match(ClickHouseParser::DOT); - break; - } - - } - setState(1849); - identifier(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableArgListContext ------------------------------------------------------------------ - -ClickHouseParser::TableArgListContext::TableArgListContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -std::vector ClickHouseParser::TableArgListContext::tableArgExpr() { - return getRuleContexts(); -} - -ClickHouseParser::TableArgExprContext* ClickHouseParser::TableArgListContext::tableArgExpr(size_t i) { - return getRuleContext(i); -} - -std::vector ClickHouseParser::TableArgListContext::COMMA() { - return getTokens(ClickHouseParser::COMMA); -} - -tree::TerminalNode* ClickHouseParser::TableArgListContext::COMMA(size_t i) { - return getToken(ClickHouseParser::COMMA, i); -} - - -size_t ClickHouseParser::TableArgListContext::getRuleIndex() const { - return ClickHouseParser::RuleTableArgList; -} - -antlrcpp::Any ClickHouseParser::TableArgListContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableArgList(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableArgListContext* ClickHouseParser::tableArgList() { - TableArgListContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 192, ClickHouseParser::RuleTableArgList); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1851); - tableArgExpr(); - setState(1856); - _errHandler->sync(this); - _la = _input->LA(1); - while (_la == ClickHouseParser::COMMA) { - setState(1852); - match(ClickHouseParser::COMMA); - setState(1853); - tableArgExpr(); - setState(1858); - _errHandler->sync(this); - _la = _input->LA(1); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- TableArgExprContext ------------------------------------------------------------------ - -ClickHouseParser::TableArgExprContext::TableArgExprContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NestedIdentifierContext* ClickHouseParser::TableArgExprContext::nestedIdentifier() { - return getRuleContext(0); -} - -ClickHouseParser::TableFunctionExprContext* ClickHouseParser::TableArgExprContext::tableFunctionExpr() { - return getRuleContext(0); -} - -ClickHouseParser::LiteralContext* ClickHouseParser::TableArgExprContext::literal() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::TableArgExprContext::getRuleIndex() const { - return ClickHouseParser::RuleTableArgExpr; -} - -antlrcpp::Any ClickHouseParser::TableArgExprContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitTableArgExpr(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::TableArgExprContext* ClickHouseParser::tableArgExpr() { - TableArgExprContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 194, ClickHouseParser::RuleTableArgExpr); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1862); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 248, _ctx)) { - case 1: { - enterOuterAlt(_localctx, 1); - setState(1859); - nestedIdentifier(); - break; - } - - case 2: { - enterOuterAlt(_localctx, 2); - setState(1860); - tableFunctionExpr(); - break; - } - - case 3: { - enterOuterAlt(_localctx, 3); - setState(1861); - literal(); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- DatabaseIdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::DatabaseIdentifierContext::DatabaseIdentifierContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::DatabaseIdentifierContext::identifier() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::DatabaseIdentifierContext::getRuleIndex() const { - return ClickHouseParser::RuleDatabaseIdentifier; -} - -antlrcpp::Any ClickHouseParser::DatabaseIdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitDatabaseIdentifier(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::DatabaseIdentifierContext* ClickHouseParser::databaseIdentifier() { - DatabaseIdentifierContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 196, ClickHouseParser::RuleDatabaseIdentifier); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1864); - identifier(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- FloatingLiteralContext ------------------------------------------------------------------ - -ClickHouseParser::FloatingLiteralContext::FloatingLiteralContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::FloatingLiteralContext::FLOATING_LITERAL() { - return getToken(ClickHouseParser::FLOATING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::FloatingLiteralContext::DOT() { - return getToken(ClickHouseParser::DOT, 0); -} - -std::vector ClickHouseParser::FloatingLiteralContext::DECIMAL_LITERAL() { - return getTokens(ClickHouseParser::DECIMAL_LITERAL); -} - -tree::TerminalNode* ClickHouseParser::FloatingLiteralContext::DECIMAL_LITERAL(size_t i) { - return getToken(ClickHouseParser::DECIMAL_LITERAL, i); -} - -tree::TerminalNode* ClickHouseParser::FloatingLiteralContext::OCTAL_LITERAL() { - return getToken(ClickHouseParser::OCTAL_LITERAL, 0); -} - - -size_t ClickHouseParser::FloatingLiteralContext::getRuleIndex() const { - return ClickHouseParser::RuleFloatingLiteral; -} - -antlrcpp::Any ClickHouseParser::FloatingLiteralContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitFloatingLiteral(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::FloatingLiteralContext* ClickHouseParser::floatingLiteral() { - FloatingLiteralContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 198, ClickHouseParser::RuleFloatingLiteral); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1874); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::FLOATING_LITERAL: { - enterOuterAlt(_localctx, 1); - setState(1866); - match(ClickHouseParser::FLOATING_LITERAL); - break; - } - - case ClickHouseParser::DOT: { - enterOuterAlt(_localctx, 2); - setState(1867); - match(ClickHouseParser::DOT); - setState(1868); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::OCTAL_LITERAL - - || _la == ClickHouseParser::DECIMAL_LITERAL)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - case ClickHouseParser::DECIMAL_LITERAL: { - enterOuterAlt(_localctx, 3); - setState(1869); - match(ClickHouseParser::DECIMAL_LITERAL); - setState(1870); - match(ClickHouseParser::DOT); - setState(1872); - _errHandler->sync(this); - - switch (getInterpreter()->adaptivePredict(_input, 249, _ctx)) { - case 1: { - setState(1871); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::OCTAL_LITERAL - - || _la == ClickHouseParser::DECIMAL_LITERAL)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - break; - } - - } - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- NumberLiteralContext ------------------------------------------------------------------ - -ClickHouseParser::NumberLiteralContext::NumberLiteralContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::FloatingLiteralContext* ClickHouseParser::NumberLiteralContext::floatingLiteral() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::OCTAL_LITERAL() { - return getToken(ClickHouseParser::OCTAL_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::DECIMAL_LITERAL() { - return getToken(ClickHouseParser::DECIMAL_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::HEXADECIMAL_LITERAL() { - return getToken(ClickHouseParser::HEXADECIMAL_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::INF() { - return getToken(ClickHouseParser::INF, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::NAN_SQL() { - return getToken(ClickHouseParser::NAN_SQL, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::PLUS() { - return getToken(ClickHouseParser::PLUS, 0); -} - -tree::TerminalNode* ClickHouseParser::NumberLiteralContext::DASH() { - return getToken(ClickHouseParser::DASH, 0); -} - - -size_t ClickHouseParser::NumberLiteralContext::getRuleIndex() const { - return ClickHouseParser::RuleNumberLiteral; -} - -antlrcpp::Any ClickHouseParser::NumberLiteralContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitNumberLiteral(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::NumberLiteralContext* ClickHouseParser::numberLiteral() { - NumberLiteralContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 200, ClickHouseParser::RuleNumberLiteral); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1877); - _errHandler->sync(this); - - _la = _input->LA(1); - if (_la == ClickHouseParser::DASH - - || _la == ClickHouseParser::PLUS) { - setState(1876); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::DASH - - || _la == ClickHouseParser::PLUS)) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - } - setState(1885); - _errHandler->sync(this); - switch (getInterpreter()->adaptivePredict(_input, 252, _ctx)) { - case 1: { - setState(1879); - floatingLiteral(); - break; - } - - case 2: { - setState(1880); - match(ClickHouseParser::OCTAL_LITERAL); - break; - } - - case 3: { - setState(1881); - match(ClickHouseParser::DECIMAL_LITERAL); - break; - } - - case 4: { - setState(1882); - match(ClickHouseParser::HEXADECIMAL_LITERAL); - break; - } - - case 5: { - setState(1883); - match(ClickHouseParser::INF); - break; - } - - case 6: { - setState(1884); - match(ClickHouseParser::NAN_SQL); - break; - } - - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- LiteralContext ------------------------------------------------------------------ - -ClickHouseParser::LiteralContext::LiteralContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::NumberLiteralContext* ClickHouseParser::LiteralContext::numberLiteral() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::LiteralContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::LiteralContext::NULL_SQL() { - return getToken(ClickHouseParser::NULL_SQL, 0); -} - - -size_t ClickHouseParser::LiteralContext::getRuleIndex() const { - return ClickHouseParser::RuleLiteral; -} - -antlrcpp::Any ClickHouseParser::LiteralContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitLiteral(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::LiteralContext* ClickHouseParser::literal() { - LiteralContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 202, ClickHouseParser::RuleLiteral); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1890); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::INF: - case ClickHouseParser::NAN_SQL: - case ClickHouseParser::FLOATING_LITERAL: - case ClickHouseParser::OCTAL_LITERAL: - case ClickHouseParser::DECIMAL_LITERAL: - case ClickHouseParser::HEXADECIMAL_LITERAL: - case ClickHouseParser::DASH: - case ClickHouseParser::DOT: - case ClickHouseParser::PLUS: { - enterOuterAlt(_localctx, 1); - setState(1887); - numberLiteral(); - break; - } - - case ClickHouseParser::STRING_LITERAL: { - enterOuterAlt(_localctx, 2); - setState(1888); - match(ClickHouseParser::STRING_LITERAL); - break; - } - - case ClickHouseParser::NULL_SQL: { - enterOuterAlt(_localctx, 3); - setState(1889); - match(ClickHouseParser::NULL_SQL); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- IntervalContext ------------------------------------------------------------------ - -ClickHouseParser::IntervalContext::IntervalContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::SECOND() { - return getToken(ClickHouseParser::SECOND, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::MINUTE() { - return getToken(ClickHouseParser::MINUTE, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::HOUR() { - return getToken(ClickHouseParser::HOUR, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::DAY() { - return getToken(ClickHouseParser::DAY, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::WEEK() { - return getToken(ClickHouseParser::WEEK, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::MONTH() { - return getToken(ClickHouseParser::MONTH, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::QUARTER() { - return getToken(ClickHouseParser::QUARTER, 0); -} - -tree::TerminalNode* ClickHouseParser::IntervalContext::YEAR() { - return getToken(ClickHouseParser::YEAR, 0); -} - - -size_t ClickHouseParser::IntervalContext::getRuleIndex() const { - return ClickHouseParser::RuleInterval; -} - -antlrcpp::Any ClickHouseParser::IntervalContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitInterval(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::IntervalContext* ClickHouseParser::interval() { - IntervalContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 204, ClickHouseParser::RuleInterval); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1892); - _la = _input->LA(1); - if (!(_la == ClickHouseParser::DAY || ((((_la - 73) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 73)) & ((1ULL << (ClickHouseParser::HOUR - 73)) - | (1ULL << (ClickHouseParser::MINUTE - 73)) - | (1ULL << (ClickHouseParser::MONTH - 73)) - | (1ULL << (ClickHouseParser::QUARTER - 73)))) != 0) || ((((_la - 138) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 138)) & ((1ULL << (ClickHouseParser::SECOND - 138)) - | (1ULL << (ClickHouseParser::WEEK - 138)) - | (1ULL << (ClickHouseParser::YEAR - 138)))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- KeywordContext ------------------------------------------------------------------ - -ClickHouseParser::KeywordContext::KeywordContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::AFTER() { - return getToken(ClickHouseParser::AFTER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ALIAS() { - return getToken(ClickHouseParser::ALIAS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ALL() { - return getToken(ClickHouseParser::ALL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ALTER() { - return getToken(ClickHouseParser::ALTER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::AND() { - return getToken(ClickHouseParser::AND, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ANTI() { - return getToken(ClickHouseParser::ANTI, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ANY() { - return getToken(ClickHouseParser::ANY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ARRAY() { - return getToken(ClickHouseParser::ARRAY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::AS() { - return getToken(ClickHouseParser::AS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ASCENDING() { - return getToken(ClickHouseParser::ASCENDING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ASOF() { - return getToken(ClickHouseParser::ASOF, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::AST() { - return getToken(ClickHouseParser::AST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ASYNC() { - return getToken(ClickHouseParser::ASYNC, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ATTACH() { - return getToken(ClickHouseParser::ATTACH, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::BETWEEN() { - return getToken(ClickHouseParser::BETWEEN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::BOTH() { - return getToken(ClickHouseParser::BOTH, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::BY() { - return getToken(ClickHouseParser::BY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CASE() { - return getToken(ClickHouseParser::CASE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CAST() { - return getToken(ClickHouseParser::CAST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CHECK() { - return getToken(ClickHouseParser::CHECK, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CLEAR() { - return getToken(ClickHouseParser::CLEAR, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CLUSTER() { - return getToken(ClickHouseParser::CLUSTER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CODEC() { - return getToken(ClickHouseParser::CODEC, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::COLLATE() { - return getToken(ClickHouseParser::COLLATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::COLUMN() { - return getToken(ClickHouseParser::COLUMN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::COMMENT() { - return getToken(ClickHouseParser::COMMENT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CONSTRAINT() { - return getToken(ClickHouseParser::CONSTRAINT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CREATE() { - return getToken(ClickHouseParser::CREATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CROSS() { - return getToken(ClickHouseParser::CROSS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::CUBE() { - return getToken(ClickHouseParser::CUBE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DATABASE() { - return getToken(ClickHouseParser::DATABASE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DATABASES() { - return getToken(ClickHouseParser::DATABASES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DATE() { - return getToken(ClickHouseParser::DATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DEDUPLICATE() { - return getToken(ClickHouseParser::DEDUPLICATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DEFAULT() { - return getToken(ClickHouseParser::DEFAULT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DELAY() { - return getToken(ClickHouseParser::DELAY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DELETE() { - return getToken(ClickHouseParser::DELETE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DESCRIBE() { - return getToken(ClickHouseParser::DESCRIBE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DESC() { - return getToken(ClickHouseParser::DESC, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DESCENDING() { - return getToken(ClickHouseParser::DESCENDING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DETACH() { - return getToken(ClickHouseParser::DETACH, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DICTIONARIES() { - return getToken(ClickHouseParser::DICTIONARIES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DICTIONARY() { - return getToken(ClickHouseParser::DICTIONARY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DISK() { - return getToken(ClickHouseParser::DISK, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DISTINCT() { - return getToken(ClickHouseParser::DISTINCT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DISTRIBUTED() { - return getToken(ClickHouseParser::DISTRIBUTED, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::DROP() { - return getToken(ClickHouseParser::DROP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ELSE() { - return getToken(ClickHouseParser::ELSE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::END() { - return getToken(ClickHouseParser::END, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ENGINE() { - return getToken(ClickHouseParser::ENGINE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::EVENTS() { - return getToken(ClickHouseParser::EVENTS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::EXISTS() { - return getToken(ClickHouseParser::EXISTS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::EXPLAIN() { - return getToken(ClickHouseParser::EXPLAIN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::EXPRESSION() { - return getToken(ClickHouseParser::EXPRESSION, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::EXTRACT() { - return getToken(ClickHouseParser::EXTRACT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FETCHES() { - return getToken(ClickHouseParser::FETCHES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FINAL() { - return getToken(ClickHouseParser::FINAL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FIRST() { - return getToken(ClickHouseParser::FIRST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FLUSH() { - return getToken(ClickHouseParser::FLUSH, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FOR() { - return getToken(ClickHouseParser::FOR, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FORMAT() { - return getToken(ClickHouseParser::FORMAT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FREEZE() { - return getToken(ClickHouseParser::FREEZE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FROM() { - return getToken(ClickHouseParser::FROM, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FULL() { - return getToken(ClickHouseParser::FULL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::FUNCTION() { - return getToken(ClickHouseParser::FUNCTION, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::GLOBAL() { - return getToken(ClickHouseParser::GLOBAL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::GRANULARITY() { - return getToken(ClickHouseParser::GRANULARITY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::GROUP() { - return getToken(ClickHouseParser::GROUP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::HAVING() { - return getToken(ClickHouseParser::HAVING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::HIERARCHICAL() { - return getToken(ClickHouseParser::HIERARCHICAL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ID() { - return getToken(ClickHouseParser::ID, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::IF() { - return getToken(ClickHouseParser::IF, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ILIKE() { - return getToken(ClickHouseParser::ILIKE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::IN() { - return getToken(ClickHouseParser::IN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INDEX() { - return getToken(ClickHouseParser::INDEX, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INJECTIVE() { - return getToken(ClickHouseParser::INJECTIVE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INNER() { - return getToken(ClickHouseParser::INNER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INSERT() { - return getToken(ClickHouseParser::INSERT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INTERVAL() { - return getToken(ClickHouseParser::INTERVAL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::INTO() { - return getToken(ClickHouseParser::INTO, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::IS() { - return getToken(ClickHouseParser::IS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::IS_OBJECT_ID() { - return getToken(ClickHouseParser::IS_OBJECT_ID, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::JOIN() { - return getToken(ClickHouseParser::JOIN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::JSON_FALSE() { - return getToken(ClickHouseParser::JSON_FALSE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::JSON_TRUE() { - return getToken(ClickHouseParser::JSON_TRUE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::KEY() { - return getToken(ClickHouseParser::KEY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::KILL() { - return getToken(ClickHouseParser::KILL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LAST() { - return getToken(ClickHouseParser::LAST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LAYOUT() { - return getToken(ClickHouseParser::LAYOUT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LEADING() { - return getToken(ClickHouseParser::LEADING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LEFT() { - return getToken(ClickHouseParser::LEFT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LIFETIME() { - return getToken(ClickHouseParser::LIFETIME, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LIKE() { - return getToken(ClickHouseParser::LIKE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LIMIT() { - return getToken(ClickHouseParser::LIMIT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LIVE() { - return getToken(ClickHouseParser::LIVE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LOCAL() { - return getToken(ClickHouseParser::LOCAL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::LOGS() { - return getToken(ClickHouseParser::LOGS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MATERIALIZE() { - return getToken(ClickHouseParser::MATERIALIZE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MATERIALIZED() { - return getToken(ClickHouseParser::MATERIALIZED, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MAX() { - return getToken(ClickHouseParser::MAX, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MERGES() { - return getToken(ClickHouseParser::MERGES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MIN() { - return getToken(ClickHouseParser::MIN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MODIFY() { - return getToken(ClickHouseParser::MODIFY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MOVE() { - return getToken(ClickHouseParser::MOVE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::MUTATION() { - return getToken(ClickHouseParser::MUTATION, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::NO() { - return getToken(ClickHouseParser::NO, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::NOT() { - return getToken(ClickHouseParser::NOT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::NULLS() { - return getToken(ClickHouseParser::NULLS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::OFFSET() { - return getToken(ClickHouseParser::OFFSET, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ON() { - return getToken(ClickHouseParser::ON, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::OPTIMIZE() { - return getToken(ClickHouseParser::OPTIMIZE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::OR() { - return getToken(ClickHouseParser::OR, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ORDER() { - return getToken(ClickHouseParser::ORDER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::OUTER() { - return getToken(ClickHouseParser::OUTER, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::OUTFILE() { - return getToken(ClickHouseParser::OUTFILE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::PARTITION() { - return getToken(ClickHouseParser::PARTITION, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::POPULATE() { - return getToken(ClickHouseParser::POPULATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::PREWHERE() { - return getToken(ClickHouseParser::PREWHERE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::PRIMARY() { - return getToken(ClickHouseParser::PRIMARY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::RANGE() { - return getToken(ClickHouseParser::RANGE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::RELOAD() { - return getToken(ClickHouseParser::RELOAD, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::REMOVE() { - return getToken(ClickHouseParser::REMOVE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::RENAME() { - return getToken(ClickHouseParser::RENAME, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::REPLACE() { - return getToken(ClickHouseParser::REPLACE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::REPLICA() { - return getToken(ClickHouseParser::REPLICA, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::REPLICATED() { - return getToken(ClickHouseParser::REPLICATED, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::RIGHT() { - return getToken(ClickHouseParser::RIGHT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::ROLLUP() { - return getToken(ClickHouseParser::ROLLUP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SAMPLE() { - return getToken(ClickHouseParser::SAMPLE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SELECT() { - return getToken(ClickHouseParser::SELECT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SEMI() { - return getToken(ClickHouseParser::SEMI, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SENDS() { - return getToken(ClickHouseParser::SENDS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SET() { - return getToken(ClickHouseParser::SET, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SETTINGS() { - return getToken(ClickHouseParser::SETTINGS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SHOW() { - return getToken(ClickHouseParser::SHOW, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SOURCE() { - return getToken(ClickHouseParser::SOURCE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::START() { - return getToken(ClickHouseParser::START, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::STOP() { - return getToken(ClickHouseParser::STOP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SUBSTRING() { - return getToken(ClickHouseParser::SUBSTRING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SYNC() { - return getToken(ClickHouseParser::SYNC, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SYNTAX() { - return getToken(ClickHouseParser::SYNTAX, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::SYSTEM() { - return getToken(ClickHouseParser::SYSTEM, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TABLE() { - return getToken(ClickHouseParser::TABLE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TABLES() { - return getToken(ClickHouseParser::TABLES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TEMPORARY() { - return getToken(ClickHouseParser::TEMPORARY, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TEST() { - return getToken(ClickHouseParser::TEST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::THEN() { - return getToken(ClickHouseParser::THEN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TIES() { - return getToken(ClickHouseParser::TIES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TIMEOUT() { - return getToken(ClickHouseParser::TIMEOUT, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TIMESTAMP() { - return getToken(ClickHouseParser::TIMESTAMP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TOTALS() { - return getToken(ClickHouseParser::TOTALS, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TRAILING() { - return getToken(ClickHouseParser::TRAILING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TRIM() { - return getToken(ClickHouseParser::TRIM, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TRUNCATE() { - return getToken(ClickHouseParser::TRUNCATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TO() { - return getToken(ClickHouseParser::TO, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TOP() { - return getToken(ClickHouseParser::TOP, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TTL() { - return getToken(ClickHouseParser::TTL, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::TYPE() { - return getToken(ClickHouseParser::TYPE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::UNION() { - return getToken(ClickHouseParser::UNION, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::UPDATE() { - return getToken(ClickHouseParser::UPDATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::USE() { - return getToken(ClickHouseParser::USE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::USING() { - return getToken(ClickHouseParser::USING, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::UUID() { - return getToken(ClickHouseParser::UUID, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::VALUES() { - return getToken(ClickHouseParser::VALUES, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::VIEW() { - return getToken(ClickHouseParser::VIEW, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::VOLUME() { - return getToken(ClickHouseParser::VOLUME, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::WATCH() { - return getToken(ClickHouseParser::WATCH, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::WHEN() { - return getToken(ClickHouseParser::WHEN, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::WHERE() { - return getToken(ClickHouseParser::WHERE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordContext::WITH() { - return getToken(ClickHouseParser::WITH, 0); -} - - -size_t ClickHouseParser::KeywordContext::getRuleIndex() const { - return ClickHouseParser::RuleKeyword; -} - -antlrcpp::Any ClickHouseParser::KeywordContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitKeyword(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::KeywordContext* ClickHouseParser::keyword() { - KeywordContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 206, ClickHouseParser::RuleKeyword); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1894); - _la = _input->LA(1); - if (!((((_la & ~ 0x3fULL) == 0) && - ((1ULL << _la) & ((1ULL << ClickHouseParser::AFTER) - | (1ULL << ClickHouseParser::ALIAS) - | (1ULL << ClickHouseParser::ALL) - | (1ULL << ClickHouseParser::ALTER) - | (1ULL << ClickHouseParser::AND) - | (1ULL << ClickHouseParser::ANTI) - | (1ULL << ClickHouseParser::ANY) - | (1ULL << ClickHouseParser::ARRAY) - | (1ULL << ClickHouseParser::AS) - | (1ULL << ClickHouseParser::ASCENDING) - | (1ULL << ClickHouseParser::ASOF) - | (1ULL << ClickHouseParser::AST) - | (1ULL << ClickHouseParser::ASYNC) - | (1ULL << ClickHouseParser::ATTACH) - | (1ULL << ClickHouseParser::BETWEEN) - | (1ULL << ClickHouseParser::BOTH) - | (1ULL << ClickHouseParser::BY) - | (1ULL << ClickHouseParser::CASE) - | (1ULL << ClickHouseParser::CAST) - | (1ULL << ClickHouseParser::CHECK) - | (1ULL << ClickHouseParser::CLEAR) - | (1ULL << ClickHouseParser::CLUSTER) - | (1ULL << ClickHouseParser::CODEC) - | (1ULL << ClickHouseParser::COLLATE) - | (1ULL << ClickHouseParser::COLUMN) - | (1ULL << ClickHouseParser::COMMENT) - | (1ULL << ClickHouseParser::CONSTRAINT) - | (1ULL << ClickHouseParser::CREATE) - | (1ULL << ClickHouseParser::CROSS) - | (1ULL << ClickHouseParser::CUBE) - | (1ULL << ClickHouseParser::DATABASE) - | (1ULL << ClickHouseParser::DATABASES) - | (1ULL << ClickHouseParser::DATE) - | (1ULL << ClickHouseParser::DEDUPLICATE) - | (1ULL << ClickHouseParser::DEFAULT) - | (1ULL << ClickHouseParser::DELAY) - | (1ULL << ClickHouseParser::DELETE) - | (1ULL << ClickHouseParser::DESC) - | (1ULL << ClickHouseParser::DESCENDING) - | (1ULL << ClickHouseParser::DESCRIBE) - | (1ULL << ClickHouseParser::DETACH) - | (1ULL << ClickHouseParser::DICTIONARIES) - | (1ULL << ClickHouseParser::DICTIONARY) - | (1ULL << ClickHouseParser::DISK) - | (1ULL << ClickHouseParser::DISTINCT) - | (1ULL << ClickHouseParser::DISTRIBUTED) - | (1ULL << ClickHouseParser::DROP) - | (1ULL << ClickHouseParser::ELSE) - | (1ULL << ClickHouseParser::END) - | (1ULL << ClickHouseParser::ENGINE) - | (1ULL << ClickHouseParser::EVENTS) - | (1ULL << ClickHouseParser::EXISTS) - | (1ULL << ClickHouseParser::EXPLAIN) - | (1ULL << ClickHouseParser::EXPRESSION) - | (1ULL << ClickHouseParser::EXTRACT) - | (1ULL << ClickHouseParser::FETCHES) - | (1ULL << ClickHouseParser::FINAL) - | (1ULL << ClickHouseParser::FIRST) - | (1ULL << ClickHouseParser::FLUSH) - | (1ULL << ClickHouseParser::FOR) - | (1ULL << ClickHouseParser::FORMAT))) != 0) || ((((_la - 64) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 64)) & ((1ULL << (ClickHouseParser::FREEZE - 64)) - | (1ULL << (ClickHouseParser::FROM - 64)) - | (1ULL << (ClickHouseParser::FULL - 64)) - | (1ULL << (ClickHouseParser::FUNCTION - 64)) - | (1ULL << (ClickHouseParser::GLOBAL - 64)) - | (1ULL << (ClickHouseParser::GRANULARITY - 64)) - | (1ULL << (ClickHouseParser::GROUP - 64)) - | (1ULL << (ClickHouseParser::HAVING - 64)) - | (1ULL << (ClickHouseParser::HIERARCHICAL - 64)) - | (1ULL << (ClickHouseParser::ID - 64)) - | (1ULL << (ClickHouseParser::IF - 64)) - | (1ULL << (ClickHouseParser::ILIKE - 64)) - | (1ULL << (ClickHouseParser::IN - 64)) - | (1ULL << (ClickHouseParser::INDEX - 64)) - | (1ULL << (ClickHouseParser::INJECTIVE - 64)) - | (1ULL << (ClickHouseParser::INNER - 64)) - | (1ULL << (ClickHouseParser::INSERT - 64)) - | (1ULL << (ClickHouseParser::INTERVAL - 64)) - | (1ULL << (ClickHouseParser::INTO - 64)) - | (1ULL << (ClickHouseParser::IS - 64)) - | (1ULL << (ClickHouseParser::IS_OBJECT_ID - 64)) - | (1ULL << (ClickHouseParser::JOIN - 64)) - | (1ULL << (ClickHouseParser::KEY - 64)) - | (1ULL << (ClickHouseParser::KILL - 64)) - | (1ULL << (ClickHouseParser::LAST - 64)) - | (1ULL << (ClickHouseParser::LAYOUT - 64)) - | (1ULL << (ClickHouseParser::LEADING - 64)) - | (1ULL << (ClickHouseParser::LEFT - 64)) - | (1ULL << (ClickHouseParser::LIFETIME - 64)) - | (1ULL << (ClickHouseParser::LIKE - 64)) - | (1ULL << (ClickHouseParser::LIMIT - 64)) - | (1ULL << (ClickHouseParser::LIVE - 64)) - | (1ULL << (ClickHouseParser::LOCAL - 64)) - | (1ULL << (ClickHouseParser::LOGS - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZE - 64)) - | (1ULL << (ClickHouseParser::MATERIALIZED - 64)) - | (1ULL << (ClickHouseParser::MAX - 64)) - | (1ULL << (ClickHouseParser::MERGES - 64)) - | (1ULL << (ClickHouseParser::MIN - 64)) - | (1ULL << (ClickHouseParser::MODIFY - 64)) - | (1ULL << (ClickHouseParser::MOVE - 64)) - | (1ULL << (ClickHouseParser::MUTATION - 64)) - | (1ULL << (ClickHouseParser::NO - 64)) - | (1ULL << (ClickHouseParser::NOT - 64)) - | (1ULL << (ClickHouseParser::NULLS - 64)) - | (1ULL << (ClickHouseParser::OFFSET - 64)) - | (1ULL << (ClickHouseParser::ON - 64)) - | (1ULL << (ClickHouseParser::OPTIMIZE - 64)) - | (1ULL << (ClickHouseParser::OR - 64)) - | (1ULL << (ClickHouseParser::ORDER - 64)) - | (1ULL << (ClickHouseParser::OUTER - 64)) - | (1ULL << (ClickHouseParser::OUTFILE - 64)) - | (1ULL << (ClickHouseParser::PARTITION - 64)) - | (1ULL << (ClickHouseParser::POPULATE - 64)) - | (1ULL << (ClickHouseParser::PREWHERE - 64)) - | (1ULL << (ClickHouseParser::PRIMARY - 64)))) != 0) || ((((_la - 128) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 128)) & ((1ULL << (ClickHouseParser::RANGE - 128)) - | (1ULL << (ClickHouseParser::RELOAD - 128)) - | (1ULL << (ClickHouseParser::REMOVE - 128)) - | (1ULL << (ClickHouseParser::RENAME - 128)) - | (1ULL << (ClickHouseParser::REPLACE - 128)) - | (1ULL << (ClickHouseParser::REPLICA - 128)) - | (1ULL << (ClickHouseParser::REPLICATED - 128)) - | (1ULL << (ClickHouseParser::RIGHT - 128)) - | (1ULL << (ClickHouseParser::ROLLUP - 128)) - | (1ULL << (ClickHouseParser::SAMPLE - 128)) - | (1ULL << (ClickHouseParser::SELECT - 128)) - | (1ULL << (ClickHouseParser::SEMI - 128)) - | (1ULL << (ClickHouseParser::SENDS - 128)) - | (1ULL << (ClickHouseParser::SET - 128)) - | (1ULL << (ClickHouseParser::SETTINGS - 128)) - | (1ULL << (ClickHouseParser::SHOW - 128)) - | (1ULL << (ClickHouseParser::SOURCE - 128)) - | (1ULL << (ClickHouseParser::START - 128)) - | (1ULL << (ClickHouseParser::STOP - 128)) - | (1ULL << (ClickHouseParser::SUBSTRING - 128)) - | (1ULL << (ClickHouseParser::SYNC - 128)) - | (1ULL << (ClickHouseParser::SYNTAX - 128)) - | (1ULL << (ClickHouseParser::SYSTEM - 128)) - | (1ULL << (ClickHouseParser::TABLE - 128)) - | (1ULL << (ClickHouseParser::TABLES - 128)) - | (1ULL << (ClickHouseParser::TEMPORARY - 128)) - | (1ULL << (ClickHouseParser::TEST - 128)) - | (1ULL << (ClickHouseParser::THEN - 128)) - | (1ULL << (ClickHouseParser::TIES - 128)) - | (1ULL << (ClickHouseParser::TIMEOUT - 128)) - | (1ULL << (ClickHouseParser::TIMESTAMP - 128)) - | (1ULL << (ClickHouseParser::TO - 128)) - | (1ULL << (ClickHouseParser::TOP - 128)) - | (1ULL << (ClickHouseParser::TOTALS - 128)) - | (1ULL << (ClickHouseParser::TRAILING - 128)) - | (1ULL << (ClickHouseParser::TRIM - 128)) - | (1ULL << (ClickHouseParser::TRUNCATE - 128)) - | (1ULL << (ClickHouseParser::TTL - 128)) - | (1ULL << (ClickHouseParser::TYPE - 128)) - | (1ULL << (ClickHouseParser::UNION - 128)) - | (1ULL << (ClickHouseParser::UPDATE - 128)) - | (1ULL << (ClickHouseParser::USE - 128)) - | (1ULL << (ClickHouseParser::USING - 128)) - | (1ULL << (ClickHouseParser::UUID - 128)) - | (1ULL << (ClickHouseParser::VALUES - 128)) - | (1ULL << (ClickHouseParser::VIEW - 128)) - | (1ULL << (ClickHouseParser::VOLUME - 128)) - | (1ULL << (ClickHouseParser::WATCH - 128)) - | (1ULL << (ClickHouseParser::WHEN - 128)) - | (1ULL << (ClickHouseParser::WHERE - 128)) - | (1ULL << (ClickHouseParser::WITH - 128)) - | (1ULL << (ClickHouseParser::JSON_FALSE - 128)) - | (1ULL << (ClickHouseParser::JSON_TRUE - 128)))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- KeywordForAliasContext ------------------------------------------------------------------ - -ClickHouseParser::KeywordForAliasContext::KeywordForAliasContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::KeywordForAliasContext::DATE() { - return getToken(ClickHouseParser::DATE, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordForAliasContext::FIRST() { - return getToken(ClickHouseParser::FIRST, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordForAliasContext::ID() { - return getToken(ClickHouseParser::ID, 0); -} - -tree::TerminalNode* ClickHouseParser::KeywordForAliasContext::KEY() { - return getToken(ClickHouseParser::KEY, 0); -} - - -size_t ClickHouseParser::KeywordForAliasContext::getRuleIndex() const { - return ClickHouseParser::RuleKeywordForAlias; -} - -antlrcpp::Any ClickHouseParser::KeywordForAliasContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitKeywordForAlias(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::KeywordForAliasContext* ClickHouseParser::keywordForAlias() { - KeywordForAliasContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 208, ClickHouseParser::RuleKeywordForAlias); - size_t _la = 0; - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1896); - _la = _input->LA(1); - if (!(((((_la - 34) & ~ 0x3fULL) == 0) && - ((1ULL << (_la - 34)) & ((1ULL << (ClickHouseParser::DATE - 34)) - | (1ULL << (ClickHouseParser::FIRST - 34)) - | (1ULL << (ClickHouseParser::ID - 34)) - | (1ULL << (ClickHouseParser::KEY - 34)))) != 0))) { - _errHandler->recoverInline(this); - } - else { - _errHandler->reportMatch(this); - consume(); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- AliasContext ------------------------------------------------------------------ - -ClickHouseParser::AliasContext::AliasContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::AliasContext::IDENTIFIER() { - return getToken(ClickHouseParser::IDENTIFIER, 0); -} - -ClickHouseParser::KeywordForAliasContext* ClickHouseParser::AliasContext::keywordForAlias() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::AliasContext::getRuleIndex() const { - return ClickHouseParser::RuleAlias; -} - -antlrcpp::Any ClickHouseParser::AliasContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitAlias(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::AliasContext* ClickHouseParser::alias() { - AliasContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 210, ClickHouseParser::RuleAlias); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1900); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::IDENTIFIER: { - enterOuterAlt(_localctx, 1); - setState(1898); - match(ClickHouseParser::IDENTIFIER); - break; - } - - case ClickHouseParser::DATE: - case ClickHouseParser::FIRST: - case ClickHouseParser::ID: - case ClickHouseParser::KEY: { - enterOuterAlt(_localctx, 2); - setState(1899); - keywordForAlias(); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- IdentifierContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierContext::IdentifierContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::IdentifierContext::IDENTIFIER() { - return getToken(ClickHouseParser::IDENTIFIER, 0); -} - -ClickHouseParser::IntervalContext* ClickHouseParser::IdentifierContext::interval() { - return getRuleContext(0); -} - -ClickHouseParser::KeywordContext* ClickHouseParser::IdentifierContext::keyword() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::IdentifierContext::getRuleIndex() const { - return ClickHouseParser::RuleIdentifier; -} - -antlrcpp::Any ClickHouseParser::IdentifierContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitIdentifier(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::identifier() { - IdentifierContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 212, ClickHouseParser::RuleIdentifier); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1905); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::IDENTIFIER: { - enterOuterAlt(_localctx, 1); - setState(1902); - match(ClickHouseParser::IDENTIFIER); - break; - } - - case ClickHouseParser::DAY: - case ClickHouseParser::HOUR: - case ClickHouseParser::MINUTE: - case ClickHouseParser::MONTH: - case ClickHouseParser::QUARTER: - case ClickHouseParser::SECOND: - case ClickHouseParser::WEEK: - case ClickHouseParser::YEAR: { - enterOuterAlt(_localctx, 2); - setState(1903); - interval(); - break; - } - - case ClickHouseParser::AFTER: - case ClickHouseParser::ALIAS: - case ClickHouseParser::ALL: - case ClickHouseParser::ALTER: - case ClickHouseParser::AND: - case ClickHouseParser::ANTI: - case ClickHouseParser::ANY: - case ClickHouseParser::ARRAY: - case ClickHouseParser::AS: - case ClickHouseParser::ASCENDING: - case ClickHouseParser::ASOF: - case ClickHouseParser::AST: - case ClickHouseParser::ASYNC: - case ClickHouseParser::ATTACH: - case ClickHouseParser::BETWEEN: - case ClickHouseParser::BOTH: - case ClickHouseParser::BY: - case ClickHouseParser::CASE: - case ClickHouseParser::CAST: - case ClickHouseParser::CHECK: - case ClickHouseParser::CLEAR: - case ClickHouseParser::CLUSTER: - case ClickHouseParser::CODEC: - case ClickHouseParser::COLLATE: - case ClickHouseParser::COLUMN: - case ClickHouseParser::COMMENT: - case ClickHouseParser::CONSTRAINT: - case ClickHouseParser::CREATE: - case ClickHouseParser::CROSS: - case ClickHouseParser::CUBE: - case ClickHouseParser::DATABASE: - case ClickHouseParser::DATABASES: - case ClickHouseParser::DATE: - case ClickHouseParser::DEDUPLICATE: - case ClickHouseParser::DEFAULT: - case ClickHouseParser::DELAY: - case ClickHouseParser::DELETE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCENDING: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DICTIONARIES: - case ClickHouseParser::DICTIONARY: - case ClickHouseParser::DISK: - case ClickHouseParser::DISTINCT: - case ClickHouseParser::DISTRIBUTED: - case ClickHouseParser::DROP: - case ClickHouseParser::ELSE: - case ClickHouseParser::END: - case ClickHouseParser::ENGINE: - case ClickHouseParser::EVENTS: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::EXPRESSION: - case ClickHouseParser::EXTRACT: - case ClickHouseParser::FETCHES: - case ClickHouseParser::FINAL: - case ClickHouseParser::FIRST: - case ClickHouseParser::FLUSH: - case ClickHouseParser::FOR: - case ClickHouseParser::FORMAT: - case ClickHouseParser::FREEZE: - case ClickHouseParser::FROM: - case ClickHouseParser::FULL: - case ClickHouseParser::FUNCTION: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::GRANULARITY: - case ClickHouseParser::GROUP: - case ClickHouseParser::HAVING: - case ClickHouseParser::HIERARCHICAL: - case ClickHouseParser::ID: - case ClickHouseParser::IF: - case ClickHouseParser::ILIKE: - case ClickHouseParser::IN: - case ClickHouseParser::INDEX: - case ClickHouseParser::INJECTIVE: - case ClickHouseParser::INNER: - case ClickHouseParser::INSERT: - case ClickHouseParser::INTERVAL: - case ClickHouseParser::INTO: - case ClickHouseParser::IS: - case ClickHouseParser::IS_OBJECT_ID: - case ClickHouseParser::JOIN: - case ClickHouseParser::KEY: - case ClickHouseParser::KILL: - case ClickHouseParser::LAST: - case ClickHouseParser::LAYOUT: - case ClickHouseParser::LEADING: - case ClickHouseParser::LEFT: - case ClickHouseParser::LIFETIME: - case ClickHouseParser::LIKE: - case ClickHouseParser::LIMIT: - case ClickHouseParser::LIVE: - case ClickHouseParser::LOCAL: - case ClickHouseParser::LOGS: - case ClickHouseParser::MATERIALIZE: - case ClickHouseParser::MATERIALIZED: - case ClickHouseParser::MAX: - case ClickHouseParser::MERGES: - case ClickHouseParser::MIN: - case ClickHouseParser::MODIFY: - case ClickHouseParser::MOVE: - case ClickHouseParser::MUTATION: - case ClickHouseParser::NO: - case ClickHouseParser::NOT: - case ClickHouseParser::NULLS: - case ClickHouseParser::OFFSET: - case ClickHouseParser::ON: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::OR: - case ClickHouseParser::ORDER: - case ClickHouseParser::OUTER: - case ClickHouseParser::OUTFILE: - case ClickHouseParser::PARTITION: - case ClickHouseParser::POPULATE: - case ClickHouseParser::PREWHERE: - case ClickHouseParser::PRIMARY: - case ClickHouseParser::RANGE: - case ClickHouseParser::RELOAD: - case ClickHouseParser::REMOVE: - case ClickHouseParser::RENAME: - case ClickHouseParser::REPLACE: - case ClickHouseParser::REPLICA: - case ClickHouseParser::REPLICATED: - case ClickHouseParser::RIGHT: - case ClickHouseParser::ROLLUP: - case ClickHouseParser::SAMPLE: - case ClickHouseParser::SELECT: - case ClickHouseParser::SEMI: - case ClickHouseParser::SENDS: - case ClickHouseParser::SET: - case ClickHouseParser::SETTINGS: - case ClickHouseParser::SHOW: - case ClickHouseParser::SOURCE: - case ClickHouseParser::START: - case ClickHouseParser::STOP: - case ClickHouseParser::SUBSTRING: - case ClickHouseParser::SYNC: - case ClickHouseParser::SYNTAX: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TABLE: - case ClickHouseParser::TABLES: - case ClickHouseParser::TEMPORARY: - case ClickHouseParser::TEST: - case ClickHouseParser::THEN: - case ClickHouseParser::TIES: - case ClickHouseParser::TIMEOUT: - case ClickHouseParser::TIMESTAMP: - case ClickHouseParser::TO: - case ClickHouseParser::TOP: - case ClickHouseParser::TOTALS: - case ClickHouseParser::TRAILING: - case ClickHouseParser::TRIM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::TTL: - case ClickHouseParser::TYPE: - case ClickHouseParser::UNION: - case ClickHouseParser::UPDATE: - case ClickHouseParser::USE: - case ClickHouseParser::USING: - case ClickHouseParser::UUID: - case ClickHouseParser::VALUES: - case ClickHouseParser::VIEW: - case ClickHouseParser::VOLUME: - case ClickHouseParser::WATCH: - case ClickHouseParser::WHEN: - case ClickHouseParser::WHERE: - case ClickHouseParser::WITH: - case ClickHouseParser::JSON_FALSE: - case ClickHouseParser::JSON_TRUE: { - enterOuterAlt(_localctx, 3); - setState(1904); - keyword(); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- IdentifierOrNullContext ------------------------------------------------------------------ - -ClickHouseParser::IdentifierOrNullContext::IdentifierOrNullContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -ClickHouseParser::IdentifierContext* ClickHouseParser::IdentifierOrNullContext::identifier() { - return getRuleContext(0); -} - -tree::TerminalNode* ClickHouseParser::IdentifierOrNullContext::NULL_SQL() { - return getToken(ClickHouseParser::NULL_SQL, 0); -} - - -size_t ClickHouseParser::IdentifierOrNullContext::getRuleIndex() const { - return ClickHouseParser::RuleIdentifierOrNull; -} - -antlrcpp::Any ClickHouseParser::IdentifierOrNullContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitIdentifierOrNull(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::IdentifierOrNullContext* ClickHouseParser::identifierOrNull() { - IdentifierOrNullContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 214, ClickHouseParser::RuleIdentifierOrNull); - - auto onExit = finally([=] { - exitRule(); - }); - try { - setState(1909); - _errHandler->sync(this); - switch (_input->LA(1)) { - case ClickHouseParser::AFTER: - case ClickHouseParser::ALIAS: - case ClickHouseParser::ALL: - case ClickHouseParser::ALTER: - case ClickHouseParser::AND: - case ClickHouseParser::ANTI: - case ClickHouseParser::ANY: - case ClickHouseParser::ARRAY: - case ClickHouseParser::AS: - case ClickHouseParser::ASCENDING: - case ClickHouseParser::ASOF: - case ClickHouseParser::AST: - case ClickHouseParser::ASYNC: - case ClickHouseParser::ATTACH: - case ClickHouseParser::BETWEEN: - case ClickHouseParser::BOTH: - case ClickHouseParser::BY: - case ClickHouseParser::CASE: - case ClickHouseParser::CAST: - case ClickHouseParser::CHECK: - case ClickHouseParser::CLEAR: - case ClickHouseParser::CLUSTER: - case ClickHouseParser::CODEC: - case ClickHouseParser::COLLATE: - case ClickHouseParser::COLUMN: - case ClickHouseParser::COMMENT: - case ClickHouseParser::CONSTRAINT: - case ClickHouseParser::CREATE: - case ClickHouseParser::CROSS: - case ClickHouseParser::CUBE: - case ClickHouseParser::DATABASE: - case ClickHouseParser::DATABASES: - case ClickHouseParser::DATE: - case ClickHouseParser::DAY: - case ClickHouseParser::DEDUPLICATE: - case ClickHouseParser::DEFAULT: - case ClickHouseParser::DELAY: - case ClickHouseParser::DELETE: - case ClickHouseParser::DESC: - case ClickHouseParser::DESCENDING: - case ClickHouseParser::DESCRIBE: - case ClickHouseParser::DETACH: - case ClickHouseParser::DICTIONARIES: - case ClickHouseParser::DICTIONARY: - case ClickHouseParser::DISK: - case ClickHouseParser::DISTINCT: - case ClickHouseParser::DISTRIBUTED: - case ClickHouseParser::DROP: - case ClickHouseParser::ELSE: - case ClickHouseParser::END: - case ClickHouseParser::ENGINE: - case ClickHouseParser::EVENTS: - case ClickHouseParser::EXISTS: - case ClickHouseParser::EXPLAIN: - case ClickHouseParser::EXPRESSION: - case ClickHouseParser::EXTRACT: - case ClickHouseParser::FETCHES: - case ClickHouseParser::FINAL: - case ClickHouseParser::FIRST: - case ClickHouseParser::FLUSH: - case ClickHouseParser::FOR: - case ClickHouseParser::FORMAT: - case ClickHouseParser::FREEZE: - case ClickHouseParser::FROM: - case ClickHouseParser::FULL: - case ClickHouseParser::FUNCTION: - case ClickHouseParser::GLOBAL: - case ClickHouseParser::GRANULARITY: - case ClickHouseParser::GROUP: - case ClickHouseParser::HAVING: - case ClickHouseParser::HIERARCHICAL: - case ClickHouseParser::HOUR: - case ClickHouseParser::ID: - case ClickHouseParser::IF: - case ClickHouseParser::ILIKE: - case ClickHouseParser::IN: - case ClickHouseParser::INDEX: - case ClickHouseParser::INJECTIVE: - case ClickHouseParser::INNER: - case ClickHouseParser::INSERT: - case ClickHouseParser::INTERVAL: - case ClickHouseParser::INTO: - case ClickHouseParser::IS: - case ClickHouseParser::IS_OBJECT_ID: - case ClickHouseParser::JOIN: - case ClickHouseParser::KEY: - case ClickHouseParser::KILL: - case ClickHouseParser::LAST: - case ClickHouseParser::LAYOUT: - case ClickHouseParser::LEADING: - case ClickHouseParser::LEFT: - case ClickHouseParser::LIFETIME: - case ClickHouseParser::LIKE: - case ClickHouseParser::LIMIT: - case ClickHouseParser::LIVE: - case ClickHouseParser::LOCAL: - case ClickHouseParser::LOGS: - case ClickHouseParser::MATERIALIZE: - case ClickHouseParser::MATERIALIZED: - case ClickHouseParser::MAX: - case ClickHouseParser::MERGES: - case ClickHouseParser::MIN: - case ClickHouseParser::MINUTE: - case ClickHouseParser::MODIFY: - case ClickHouseParser::MONTH: - case ClickHouseParser::MOVE: - case ClickHouseParser::MUTATION: - case ClickHouseParser::NO: - case ClickHouseParser::NOT: - case ClickHouseParser::NULLS: - case ClickHouseParser::OFFSET: - case ClickHouseParser::ON: - case ClickHouseParser::OPTIMIZE: - case ClickHouseParser::OR: - case ClickHouseParser::ORDER: - case ClickHouseParser::OUTER: - case ClickHouseParser::OUTFILE: - case ClickHouseParser::PARTITION: - case ClickHouseParser::POPULATE: - case ClickHouseParser::PREWHERE: - case ClickHouseParser::PRIMARY: - case ClickHouseParser::QUARTER: - case ClickHouseParser::RANGE: - case ClickHouseParser::RELOAD: - case ClickHouseParser::REMOVE: - case ClickHouseParser::RENAME: - case ClickHouseParser::REPLACE: - case ClickHouseParser::REPLICA: - case ClickHouseParser::REPLICATED: - case ClickHouseParser::RIGHT: - case ClickHouseParser::ROLLUP: - case ClickHouseParser::SAMPLE: - case ClickHouseParser::SECOND: - case ClickHouseParser::SELECT: - case ClickHouseParser::SEMI: - case ClickHouseParser::SENDS: - case ClickHouseParser::SET: - case ClickHouseParser::SETTINGS: - case ClickHouseParser::SHOW: - case ClickHouseParser::SOURCE: - case ClickHouseParser::START: - case ClickHouseParser::STOP: - case ClickHouseParser::SUBSTRING: - case ClickHouseParser::SYNC: - case ClickHouseParser::SYNTAX: - case ClickHouseParser::SYSTEM: - case ClickHouseParser::TABLE: - case ClickHouseParser::TABLES: - case ClickHouseParser::TEMPORARY: - case ClickHouseParser::TEST: - case ClickHouseParser::THEN: - case ClickHouseParser::TIES: - case ClickHouseParser::TIMEOUT: - case ClickHouseParser::TIMESTAMP: - case ClickHouseParser::TO: - case ClickHouseParser::TOP: - case ClickHouseParser::TOTALS: - case ClickHouseParser::TRAILING: - case ClickHouseParser::TRIM: - case ClickHouseParser::TRUNCATE: - case ClickHouseParser::TTL: - case ClickHouseParser::TYPE: - case ClickHouseParser::UNION: - case ClickHouseParser::UPDATE: - case ClickHouseParser::USE: - case ClickHouseParser::USING: - case ClickHouseParser::UUID: - case ClickHouseParser::VALUES: - case ClickHouseParser::VIEW: - case ClickHouseParser::VOLUME: - case ClickHouseParser::WATCH: - case ClickHouseParser::WEEK: - case ClickHouseParser::WHEN: - case ClickHouseParser::WHERE: - case ClickHouseParser::WITH: - case ClickHouseParser::YEAR: - case ClickHouseParser::JSON_FALSE: - case ClickHouseParser::JSON_TRUE: - case ClickHouseParser::IDENTIFIER: { - enterOuterAlt(_localctx, 1); - setState(1907); - identifier(); - break; - } - - case ClickHouseParser::NULL_SQL: { - enterOuterAlt(_localctx, 2); - setState(1908); - match(ClickHouseParser::NULL_SQL); - break; - } - - default: - throw NoViableAltException(this); - } - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -//----------------- EnumValueContext ------------------------------------------------------------------ - -ClickHouseParser::EnumValueContext::EnumValueContext(ParserRuleContext *parent, size_t invokingState) - : ParserRuleContext(parent, invokingState) { -} - -tree::TerminalNode* ClickHouseParser::EnumValueContext::STRING_LITERAL() { - return getToken(ClickHouseParser::STRING_LITERAL, 0); -} - -tree::TerminalNode* ClickHouseParser::EnumValueContext::EQ_SINGLE() { - return getToken(ClickHouseParser::EQ_SINGLE, 0); -} - -ClickHouseParser::NumberLiteralContext* ClickHouseParser::EnumValueContext::numberLiteral() { - return getRuleContext(0); -} - - -size_t ClickHouseParser::EnumValueContext::getRuleIndex() const { - return ClickHouseParser::RuleEnumValue; -} - -antlrcpp::Any ClickHouseParser::EnumValueContext::accept(tree::ParseTreeVisitor *visitor) { - if (auto parserVisitor = dynamic_cast(visitor)) - return parserVisitor->visitEnumValue(this); - else - return visitor->visitChildren(this); -} - -ClickHouseParser::EnumValueContext* ClickHouseParser::enumValue() { - EnumValueContext *_localctx = _tracker.createInstance(_ctx, getState()); - enterRule(_localctx, 216, ClickHouseParser::RuleEnumValue); - - auto onExit = finally([=] { - exitRule(); - }); - try { - enterOuterAlt(_localctx, 1); - setState(1911); - match(ClickHouseParser::STRING_LITERAL); - setState(1912); - match(ClickHouseParser::EQ_SINGLE); - setState(1913); - numberLiteral(); - - } - catch (RecognitionException &e) { - _errHandler->reportError(this, e); - _localctx->exception = std::current_exception(); - _errHandler->recover(this, _localctx->exception); - } - - return _localctx; -} - -bool ClickHouseParser::sempred(RuleContext *context, size_t ruleIndex, size_t predicateIndex) { - switch (ruleIndex) { - case 12: return dictionaryAttrDfntSempred(dynamic_cast(context), predicateIndex); - case 13: return dictionaryEngineClauseSempred(dynamic_cast(context), predicateIndex); - case 26: return engineClauseSempred(dynamic_cast(context), predicateIndex); - case 67: return joinExprSempred(dynamic_cast(context), predicateIndex); - case 87: return columnExprSempred(dynamic_cast(context), predicateIndex); - case 93: return tableExprSempred(dynamic_cast(context), predicateIndex); - - default: - break; - } - return true; -} - -bool ClickHouseParser::dictionaryAttrDfntSempred(DictionaryAttrDfntContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 0: return !_localctx->attrs.count("default"); - case 1: return !_localctx->attrs.count("expression"); - case 2: return !_localctx->attrs.count("hierarchical"); - case 3: return !_localctx->attrs.count("injective"); - case 4: return !_localctx->attrs.count("is_object_id"); - - default: - break; - } - return true; -} - -bool ClickHouseParser::dictionaryEngineClauseSempred(DictionaryEngineClauseContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 5: return !_localctx->clauses.count("source"); - case 6: return !_localctx->clauses.count("lifetime"); - case 7: return !_localctx->clauses.count("layout"); - case 8: return !_localctx->clauses.count("range"); - case 9: return !_localctx->clauses.count("settings"); - - default: - break; - } - return true; -} - -bool ClickHouseParser::engineClauseSempred(EngineClauseContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 10: return !_localctx->clauses.count("orderByClause"); - case 11: return !_localctx->clauses.count("partitionByClause"); - case 12: return !_localctx->clauses.count("primaryKeyClause"); - case 13: return !_localctx->clauses.count("sampleByClause"); - case 14: return !_localctx->clauses.count("ttlClause"); - case 15: return !_localctx->clauses.count("settingsClause"); - - default: - break; - } - return true; -} - -bool ClickHouseParser::joinExprSempred(JoinExprContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 16: return precpred(_ctx, 3); - case 17: return precpred(_ctx, 4); - - default: - break; - } - return true; -} - -bool ClickHouseParser::columnExprSempred(ColumnExprContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 18: return precpred(_ctx, 16); - case 19: return precpred(_ctx, 15); - case 20: return precpred(_ctx, 14); - case 21: return precpred(_ctx, 11); - case 22: return precpred(_ctx, 10); - case 23: return precpred(_ctx, 9); - case 24: return precpred(_ctx, 8); - case 25: return precpred(_ctx, 19); - case 26: return precpred(_ctx, 18); - case 27: return precpred(_ctx, 13); - case 28: return precpred(_ctx, 7); - - default: - break; - } - return true; -} - -bool ClickHouseParser::tableExprSempred(TableExprContext *_localctx, size_t predicateIndex) { - switch (predicateIndex) { - case 29: return precpred(_ctx, 1); - - default: - break; - } - return true; -} - -// Static vars and initialization. -std::vector ClickHouseParser::_decisionToDFA; -atn::PredictionContextCache ClickHouseParser::_sharedContextCache; - -// We own the ATN which in turn owns the ATN states. -atn::ATN ClickHouseParser::_atn; -std::vector ClickHouseParser::_serializedATN; - -std::vector ClickHouseParser::_ruleNames = { - "queryStmt", "query", "alterStmt", "alterTableClause", "assignmentExprList", - "assignmentExpr", "tableColumnPropertyType", "partitionClause", "attachStmt", - "checkStmt", "createStmt", "dictionarySchemaClause", "dictionaryAttrDfnt", - "dictionaryEngineClause", "dictionaryPrimaryKeyClause", "dictionaryArgExpr", - "sourceClause", "lifetimeClause", "layoutClause", "rangeClause", "dictionarySettingsClause", - "clusterClause", "uuidClause", "destinationClause", "subqueryClause", - "tableSchemaClause", "engineClause", "partitionByClause", "primaryKeyClause", - "sampleByClause", "ttlClause", "engineExpr", "tableElementExpr", "tableColumnDfnt", - "tableColumnPropertyExpr", "tableIndexDfnt", "tableProjectionDfnt", "codecExpr", - "codecArgExpr", "ttlExpr", "describeStmt", "dropStmt", "existsStmt", "explainStmt", - "insertStmt", "columnsClause", "dataClause", "killStmt", "optimizeStmt", - "renameStmt", "projectionSelectStmt", "selectUnionStmt", "selectStmtWithParens", - "selectStmt", "withClause", "topClause", "fromClause", "arrayJoinClause", - "prewhereClause", "whereClause", "groupByClause", "havingClause", "orderByClause", - "projectionOrderByClause", "limitByClause", "limitClause", "settingsClause", - "joinExpr", "joinOp", "joinOpCross", "joinConstraintClause", "sampleClause", - "limitExpr", "orderExprList", "orderExpr", "ratioExpr", "settingExprList", - "settingExpr", "setStmt", "showStmt", "systemStmt", "truncateStmt", "useStmt", - "watchStmt", "columnTypeExpr", "columnExprList", "columnsExpr", "columnExpr", - "columnArgList", "columnArgExpr", "columnLambdaExpr", "columnIdentifier", - "nestedIdentifier", "tableExpr", "tableFunctionExpr", "tableIdentifier", - "tableArgList", "tableArgExpr", "databaseIdentifier", "floatingLiteral", - "numberLiteral", "literal", "interval", "keyword", "keywordForAlias", - "alias", "identifier", "identifierOrNull", "enumValue" -}; - -std::vector ClickHouseParser::_literalNames = { - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", - "", "", "'false'", "'true'", "", "", "", "", "", "", "'->'", "'*'", "'`'", - "'\\'", "':'", "','", "'||'", "'-'", "'.'", "'=='", "'='", "'>='", "'>'", - "'{'", "'['", "'<='", "'('", "'<'", "", "'%'", "'+'", "'?'", "'\"'", "'''", - "'}'", "']'", "')'", "';'", "'/'", "'_'" -}; - -std::vector ClickHouseParser::_symbolicNames = { - "", "ADD", "AFTER", "ALIAS", "ALL", "ALTER", "AND", "ANTI", "ANY", "ARRAY", - "AS", "ASCENDING", "ASOF", "AST", "ASYNC", "ATTACH", "BETWEEN", "BOTH", - "BY", "CASE", "CAST", "CHECK", "CLEAR", "CLUSTER", "CODEC", "COLLATE", - "COLUMN", "COMMENT", "CONSTRAINT", "CREATE", "CROSS", "CUBE", "DATABASE", - "DATABASES", "DATE", "DAY", "DEDUPLICATE", "DEFAULT", "DELAY", "DELETE", - "DESC", "DESCENDING", "DESCRIBE", "DETACH", "DICTIONARIES", "DICTIONARY", - "DISK", "DISTINCT", "DISTRIBUTED", "DROP", "ELSE", "END", "ENGINE", "EVENTS", - "EXISTS", "EXPLAIN", "EXPRESSION", "EXTRACT", "FETCHES", "FINAL", "FIRST", - "FLUSH", "FOR", "FORMAT", "FREEZE", "FROM", "FULL", "FUNCTION", "GLOBAL", - "GRANULARITY", "GROUP", "HAVING", "HIERARCHICAL", "HOUR", "ID", "IF", - "ILIKE", "IN", "INDEX", "INF", "INJECTIVE", "INNER", "INSERT", "INTERVAL", - "INTO", "IS", "IS_OBJECT_ID", "JOIN", "KEY", "KILL", "LAST", "LAYOUT", - "LEADING", "LEFT", "LIFETIME", "LIKE", "LIMIT", "LIVE", "LOCAL", "LOGS", - "MATERIALIZE", "MATERIALIZED", "MAX", "MERGES", "MIN", "MINUTE", "MODIFY", - "MONTH", "MOVE", "MUTATION", "NAN_SQL", "NO", "NOT", "NULL_SQL", "NULLS", - "OFFSET", "ON", "OPTIMIZE", "OR", "ORDER", "OUTER", "OUTFILE", "PARTITION", - "POPULATE", "PREWHERE", "PRIMARY", "PROJECTION", "QUARTER", "RANGE", "RELOAD", - "REMOVE", "RENAME", "REPLACE", "REPLICA", "REPLICATED", "RIGHT", "ROLLUP", - "SAMPLE", "SECOND", "SELECT", "SEMI", "SENDS", "SET", "SETTINGS", "SHOW", - "SOURCE", "START", "STOP", "SUBSTRING", "SYNC", "SYNTAX", "SYSTEM", "TABLE", - "TABLES", "TEMPORARY", "TEST", "THEN", "TIES", "TIMEOUT", "TIMESTAMP", - "TO", "TOP", "TOTALS", "TRAILING", "TRIM", "TRUNCATE", "TTL", "TYPE", - "UNION", "UPDATE", "USE", "USING", "UUID", "VALUES", "VIEW", "VOLUME", - "WATCH", "WEEK", "WHEN", "WHERE", "WITH", "YEAR", "JSON_FALSE", "JSON_TRUE", - "IDENTIFIER", "FLOATING_LITERAL", "OCTAL_LITERAL", "DECIMAL_LITERAL", - "HEXADECIMAL_LITERAL", "STRING_LITERAL", "ARROW", "ASTERISK", "BACKQUOTE", - "BACKSLASH", "COLON", "COMMA", "CONCAT", "DASH", "DOT", "EQ_DOUBLE", "EQ_SINGLE", - "GE", "GT", "LBRACE", "LBRACKET", "LE", "LPAREN", "LT", "NOT_EQ", "PERCENT", - "PLUS", "QUERY", "QUOTE_DOUBLE", "QUOTE_SINGLE", "RBRACE", "RBRACKET", - "RPAREN", "SEMICOLON", "SLASH", "UNDERSCORE", "MULTI_LINE_COMMENT", "SINGLE_LINE_COMMENT", - "WHITESPACE" -}; - -dfa::Vocabulary ClickHouseParser::_vocabulary(_literalNames, _symbolicNames); - -std::vector ClickHouseParser::_tokenNames; - -ClickHouseParser::Initializer::Initializer() { - for (size_t i = 0; i < _symbolicNames.size(); ++i) { - std::string name = _vocabulary.getLiteralName(i); - if (name.empty()) { - name = _vocabulary.getSymbolicName(i); - } - - if (name.empty()) { - _tokenNames.push_back(""); - } else { - _tokenNames.push_back(name); - } - } - - _serializedATN = { - 0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964, - 0x3, 0xe0, 0x77e, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3, 0x9, 0x3, 0x4, 0x4, - 0x9, 0x4, 0x4, 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, 0x9, 0x7, - 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x4, 0xa, 0x9, 0xa, 0x4, 0xb, - 0x9, 0xb, 0x4, 0xc, 0x9, 0xc, 0x4, 0xd, 0x9, 0xd, 0x4, 0xe, 0x9, 0xe, - 0x4, 0xf, 0x9, 0xf, 0x4, 0x10, 0x9, 0x10, 0x4, 0x11, 0x9, 0x11, 0x4, - 0x12, 0x9, 0x12, 0x4, 0x13, 0x9, 0x13, 0x4, 0x14, 0x9, 0x14, 0x4, 0x15, - 0x9, 0x15, 0x4, 0x16, 0x9, 0x16, 0x4, 0x17, 0x9, 0x17, 0x4, 0x18, 0x9, - 0x18, 0x4, 0x19, 0x9, 0x19, 0x4, 0x1a, 0x9, 0x1a, 0x4, 0x1b, 0x9, 0x1b, - 0x4, 0x1c, 0x9, 0x1c, 0x4, 0x1d, 0x9, 0x1d, 0x4, 0x1e, 0x9, 0x1e, 0x4, - 0x1f, 0x9, 0x1f, 0x4, 0x20, 0x9, 0x20, 0x4, 0x21, 0x9, 0x21, 0x4, 0x22, - 0x9, 0x22, 0x4, 0x23, 0x9, 0x23, 0x4, 0x24, 0x9, 0x24, 0x4, 0x25, 0x9, - 0x25, 0x4, 0x26, 0x9, 0x26, 0x4, 0x27, 0x9, 0x27, 0x4, 0x28, 0x9, 0x28, - 0x4, 0x29, 0x9, 0x29, 0x4, 0x2a, 0x9, 0x2a, 0x4, 0x2b, 0x9, 0x2b, 0x4, - 0x2c, 0x9, 0x2c, 0x4, 0x2d, 0x9, 0x2d, 0x4, 0x2e, 0x9, 0x2e, 0x4, 0x2f, - 0x9, 0x2f, 0x4, 0x30, 0x9, 0x30, 0x4, 0x31, 0x9, 0x31, 0x4, 0x32, 0x9, - 0x32, 0x4, 0x33, 0x9, 0x33, 0x4, 0x34, 0x9, 0x34, 0x4, 0x35, 0x9, 0x35, - 0x4, 0x36, 0x9, 0x36, 0x4, 0x37, 0x9, 0x37, 0x4, 0x38, 0x9, 0x38, 0x4, - 0x39, 0x9, 0x39, 0x4, 0x3a, 0x9, 0x3a, 0x4, 0x3b, 0x9, 0x3b, 0x4, 0x3c, - 0x9, 0x3c, 0x4, 0x3d, 0x9, 0x3d, 0x4, 0x3e, 0x9, 0x3e, 0x4, 0x3f, 0x9, - 0x3f, 0x4, 0x40, 0x9, 0x40, 0x4, 0x41, 0x9, 0x41, 0x4, 0x42, 0x9, 0x42, - 0x4, 0x43, 0x9, 0x43, 0x4, 0x44, 0x9, 0x44, 0x4, 0x45, 0x9, 0x45, 0x4, - 0x46, 0x9, 0x46, 0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9, 0x48, 0x4, 0x49, - 0x9, 0x49, 0x4, 0x4a, 0x9, 0x4a, 0x4, 0x4b, 0x9, 0x4b, 0x4, 0x4c, 0x9, - 0x4c, 0x4, 0x4d, 0x9, 0x4d, 0x4, 0x4e, 0x9, 0x4e, 0x4, 0x4f, 0x9, 0x4f, - 0x4, 0x50, 0x9, 0x50, 0x4, 0x51, 0x9, 0x51, 0x4, 0x52, 0x9, 0x52, 0x4, - 0x53, 0x9, 0x53, 0x4, 0x54, 0x9, 0x54, 0x4, 0x55, 0x9, 0x55, 0x4, 0x56, - 0x9, 0x56, 0x4, 0x57, 0x9, 0x57, 0x4, 0x58, 0x9, 0x58, 0x4, 0x59, 0x9, - 0x59, 0x4, 0x5a, 0x9, 0x5a, 0x4, 0x5b, 0x9, 0x5b, 0x4, 0x5c, 0x9, 0x5c, - 0x4, 0x5d, 0x9, 0x5d, 0x4, 0x5e, 0x9, 0x5e, 0x4, 0x5f, 0x9, 0x5f, 0x4, - 0x60, 0x9, 0x60, 0x4, 0x61, 0x9, 0x61, 0x4, 0x62, 0x9, 0x62, 0x4, 0x63, - 0x9, 0x63, 0x4, 0x64, 0x9, 0x64, 0x4, 0x65, 0x9, 0x65, 0x4, 0x66, 0x9, - 0x66, 0x4, 0x67, 0x9, 0x67, 0x4, 0x68, 0x9, 0x68, 0x4, 0x69, 0x9, 0x69, - 0x4, 0x6a, 0x9, 0x6a, 0x4, 0x6b, 0x9, 0x6b, 0x4, 0x6c, 0x9, 0x6c, 0x4, - 0x6d, 0x9, 0x6d, 0x4, 0x6e, 0x9, 0x6e, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, - 0x3, 0x2, 0x5, 0x2, 0xe1, 0xa, 0x2, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2, 0xe5, - 0xa, 0x2, 0x3, 0x2, 0x5, 0x2, 0xe8, 0xa, 0x2, 0x3, 0x2, 0x5, 0x2, 0xeb, - 0xa, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, - 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, - 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x5, 0x3, 0xff, 0xa, - 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x5, 0x4, 0x105, 0xa, 0x4, - 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x7, 0x4, 0x10a, 0xa, 0x4, 0xc, 0x4, 0xe, - 0x4, 0x10d, 0xb, 0x4, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, - 0x5, 0x5, 0x114, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x119, - 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, - 0x120, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x125, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x12c, - 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x131, 0xa, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x137, 0xa, 0x5, 0x3, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x13d, 0xa, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x5, 0x5, 0x142, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, - 0x3, 0x5, 0x5, 0x5, 0x148, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, - 0x5, 0x14d, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, - 0x153, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x158, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x15e, 0xa, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x16c, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x173, - 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, - 0x17a, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, - 0x5, 0x181, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, - 0x187, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x18c, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x192, 0xa, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x197, 0xa, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x19d, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x1a6, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x5, 0x5, 0x1b0, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x1ba, 0xa, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x1ce, 0xa, 0x5, - 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, - 0x1d6, 0xa, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, - 0x5, 0x5, 0x5, 0x1e5, 0xa, 0x5, 0x3, 0x6, 0x3, 0x6, 0x3, 0x6, 0x7, 0x6, - 0x1ea, 0xa, 0x6, 0xc, 0x6, 0xe, 0x6, 0x1ed, 0xb, 0x6, 0x3, 0x7, 0x3, - 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x8, 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, 0x3, - 0x9, 0x3, 0x9, 0x3, 0x9, 0x5, 0x9, 0x1fa, 0xa, 0x9, 0x3, 0xa, 0x3, 0xa, - 0x3, 0xa, 0x3, 0xa, 0x5, 0xa, 0x200, 0xa, 0xa, 0x3, 0xb, 0x3, 0xb, 0x3, - 0xb, 0x3, 0xb, 0x5, 0xb, 0x206, 0xa, 0xb, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x20d, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, - 0xc, 0x211, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x214, 0xa, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x21b, 0xa, 0xc, 0x3, - 0xc, 0x3, 0xc, 0x5, 0xc, 0x21f, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x222, - 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x22d, 0xa, 0xc, 0x3, 0xc, 0x3, - 0xc, 0x5, 0xc, 0x231, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x234, 0xa, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x239, 0xa, 0xc, 0x5, 0xc, 0x23b, - 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x23e, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x241, - 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x24b, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, - 0xc, 0x24f, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x252, 0xa, 0xc, 0x3, 0xc, - 0x5, 0xc, 0x255, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x25a, - 0xa, 0xc, 0x5, 0xc, 0x25c, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, - 0xc, 0x5, 0xc, 0x262, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, - 0x5, 0xc, 0x268, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x26c, 0xa, - 0xc, 0x3, 0xc, 0x5, 0xc, 0x26f, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x272, - 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x275, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x278, - 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x27d, 0xa, 0xc, 0x3, - 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, 0x283, 0xa, 0xc, 0x3, 0xc, - 0x3, 0xc, 0x5, 0xc, 0x287, 0xa, 0xc, 0x3, 0xc, 0x5, 0xc, 0x28a, 0xa, - 0xc, 0x3, 0xc, 0x5, 0xc, 0x28d, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc, - 0x291, 0xa, 0xc, 0x3, 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, 0xd, 0x7, 0xd, 0x297, - 0xa, 0xd, 0xc, 0xd, 0xe, 0xd, 0x29a, 0xb, 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, - 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, - 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, - 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x3, 0xe, 0x7, - 0xe, 0x2b3, 0xa, 0xe, 0xc, 0xe, 0xe, 0xe, 0x2b6, 0xb, 0xe, 0x3, 0xf, - 0x5, 0xf, 0x2b9, 0xa, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, - 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, - 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, - 0xf, 0x3, 0xf, 0x7, 0xf, 0x2cf, 0xa, 0xf, 0xc, 0xf, 0xe, 0xf, 0x2d2, - 0xb, 0xf, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x11, 0x3, - 0x11, 0x3, 0x11, 0x3, 0x11, 0x5, 0x11, 0x2dc, 0xa, 0x11, 0x3, 0x11, - 0x5, 0x11, 0x2df, 0xa, 0x11, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, - 0x3, 0x12, 0x7, 0x12, 0x2e6, 0xa, 0x12, 0xc, 0x12, 0xe, 0x12, 0x2e9, - 0xb, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x13, 0x3, 0x13, 0x3, - 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, - 0x3, 0x13, 0x3, 0x13, 0x5, 0x13, 0x2f9, 0xa, 0x13, 0x3, 0x13, 0x3, 0x13, - 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x7, 0x14, 0x302, - 0xa, 0x14, 0xc, 0x14, 0xe, 0x14, 0x305, 0xb, 0x14, 0x3, 0x14, 0x3, 0x14, - 0x3, 0x14, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, - 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, - 0x5, 0x15, 0x316, 0xa, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x16, 0x3, 0x16, - 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, - 0x17, 0x5, 0x17, 0x323, 0xa, 0x17, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, - 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, - 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x7, 0x1b, 0x332, 0xa, 0x1b, - 0xc, 0x1b, 0xe, 0x1b, 0x335, 0xb, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, - 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x5, 0x1b, 0x33d, 0xa, 0x1b, 0x3, 0x1c, - 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, - 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, - 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, - 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x7, 0x1c, 0x358, - 0xa, 0x1c, 0xc, 0x1c, 0xe, 0x1c, 0x35b, 0xb, 0x1c, 0x3, 0x1d, 0x3, 0x1d, - 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3, - 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, - 0x3, 0x20, 0x7, 0x20, 0x36d, 0xa, 0x20, 0xc, 0x20, 0xe, 0x20, 0x370, - 0xb, 0x20, 0x3, 0x21, 0x3, 0x21, 0x5, 0x21, 0x374, 0xa, 0x21, 0x3, 0x21, - 0x3, 0x21, 0x3, 0x21, 0x5, 0x21, 0x379, 0xa, 0x21, 0x3, 0x21, 0x5, 0x21, - 0x37c, 0xa, 0x21, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, - 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x5, 0x22, 0x388, - 0xa, 0x22, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x5, 0x23, 0x38d, 0xa, 0x23, - 0x3, 0x23, 0x3, 0x23, 0x5, 0x23, 0x391, 0xa, 0x23, 0x3, 0x23, 0x5, 0x23, - 0x394, 0xa, 0x23, 0x3, 0x23, 0x3, 0x23, 0x5, 0x23, 0x398, 0xa, 0x23, - 0x3, 0x23, 0x3, 0x23, 0x5, 0x23, 0x39c, 0xa, 0x23, 0x3, 0x23, 0x3, 0x23, - 0x3, 0x23, 0x5, 0x23, 0x3a1, 0xa, 0x23, 0x3, 0x23, 0x5, 0x23, 0x3a4, - 0xa, 0x23, 0x3, 0x23, 0x3, 0x23, 0x5, 0x23, 0x3a8, 0xa, 0x23, 0x5, 0x23, - 0x3aa, 0xa, 0x23, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x25, 0x3, 0x25, - 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x26, 0x3, - 0x26, 0x3, 0x26, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, - 0x7, 0x27, 0x3be, 0xa, 0x27, 0xc, 0x27, 0xe, 0x27, 0x3c1, 0xb, 0x27, - 0x3, 0x27, 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x5, 0x28, 0x3c8, - 0xa, 0x28, 0x3, 0x28, 0x5, 0x28, 0x3cb, 0xa, 0x28, 0x3, 0x29, 0x3, 0x29, - 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x5, - 0x29, 0x3d5, 0xa, 0x29, 0x3, 0x2a, 0x3, 0x2a, 0x5, 0x2a, 0x3d9, 0xa, - 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, - 0x5, 0x2b, 0x3e1, 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x3e5, - 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x3ea, 0xa, 0x2b, - 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x3ee, 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, - 0x5, 0x2b, 0x3f2, 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x3f6, - 0xa, 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x5, 0x2b, 0x3fa, 0xa, 0x2b, 0x5, 0x2b, - 0x3fc, 0xa, 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2c, - 0x3, 0x2c, 0x5, 0x2c, 0x404, 0xa, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x5, 0x2c, - 0x408, 0xa, 0x2c, 0x3, 0x2c, 0x5, 0x2c, 0x40b, 0xa, 0x2c, 0x3, 0x2d, - 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x5, 0x2d, 0x413, - 0xa, 0x2d, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x5, 0x2e, 0x418, 0xa, 0x2e, - 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x5, 0x2e, 0x41d, 0xa, 0x2e, 0x3, 0x2e, - 0x5, 0x2e, 0x420, 0xa, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2f, 0x3, 0x2f, - 0x3, 0x2f, 0x3, 0x2f, 0x7, 0x2f, 0x428, 0xa, 0x2f, 0xc, 0x2f, 0xe, 0x2f, - 0x42b, 0xb, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, - 0x3, 0x30, 0x3, 0x30, 0x5, 0x30, 0x434, 0xa, 0x30, 0x3, 0x30, 0x3, 0x30, - 0x5, 0x30, 0x438, 0xa, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x5, 0x31, - 0x43d, 0xa, 0x31, 0x3, 0x31, 0x3, 0x31, 0x5, 0x31, 0x441, 0xa, 0x31, - 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x5, 0x32, 0x447, 0xa, 0x32, - 0x3, 0x32, 0x5, 0x32, 0x44a, 0xa, 0x32, 0x3, 0x32, 0x5, 0x32, 0x44d, - 0xa, 0x32, 0x3, 0x32, 0x5, 0x32, 0x450, 0xa, 0x32, 0x3, 0x33, 0x3, 0x33, - 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, - 0x33, 0x3, 0x33, 0x7, 0x33, 0x45c, 0xa, 0x33, 0xc, 0x33, 0xe, 0x33, - 0x45f, 0xb, 0x33, 0x3, 0x33, 0x5, 0x33, 0x462, 0xa, 0x33, 0x3, 0x34, - 0x3, 0x34, 0x5, 0x34, 0x466, 0xa, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, - 0x5, 0x34, 0x46b, 0xa, 0x34, 0x3, 0x34, 0x5, 0x34, 0x46e, 0xa, 0x34, - 0x3, 0x34, 0x3, 0x34, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x7, - 0x35, 0x476, 0xa, 0x35, 0xc, 0x35, 0xe, 0x35, 0x479, 0xb, 0x35, 0x3, - 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x5, 0x36, 0x480, - 0xa, 0x36, 0x3, 0x37, 0x5, 0x37, 0x483, 0xa, 0x37, 0x3, 0x37, 0x3, 0x37, - 0x5, 0x37, 0x487, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x48a, 0xa, 0x37, - 0x3, 0x37, 0x3, 0x37, 0x5, 0x37, 0x48e, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, - 0x491, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x494, 0xa, 0x37, 0x3, 0x37, - 0x5, 0x37, 0x497, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x49a, 0xa, 0x37, - 0x3, 0x37, 0x3, 0x37, 0x5, 0x37, 0x49e, 0xa, 0x37, 0x3, 0x37, 0x3, 0x37, - 0x5, 0x37, 0x4a2, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x4a5, 0xa, 0x37, - 0x3, 0x37, 0x5, 0x37, 0x4a8, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x4ab, - 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, 0x4ae, 0xa, 0x37, 0x3, 0x37, 0x5, 0x37, - 0x4b1, 0xa, 0x37, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x39, 0x3, 0x39, - 0x3, 0x39, 0x3, 0x39, 0x5, 0x39, 0x4ba, 0xa, 0x39, 0x3, 0x3a, 0x3, 0x3a, - 0x3, 0x3a, 0x3, 0x3b, 0x5, 0x3b, 0x4c0, 0xa, 0x3b, 0x3, 0x3b, 0x3, 0x3b, - 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3d, 0x3, - 0x3d, 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, - 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x5, 0x3e, 0x4d4, 0xa, 0x3e, 0x3, 0x3f, - 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, - 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, - 0x3, 0x42, 0x3, 0x42, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x5, - 0x43, 0x4ea, 0xa, 0x43, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x45, - 0x3, 0x45, 0x3, 0x45, 0x5, 0x45, 0x4f2, 0xa, 0x45, 0x3, 0x45, 0x5, 0x45, - 0x4f5, 0xa, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x5, 0x45, - 0x4fb, 0xa, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, - 0x3, 0x45, 0x5, 0x45, 0x503, 0xa, 0x45, 0x3, 0x45, 0x5, 0x45, 0x506, - 0xa, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x7, 0x45, 0x50c, - 0xa, 0x45, 0xc, 0x45, 0xe, 0x45, 0x50f, 0xb, 0x45, 0x3, 0x46, 0x5, 0x46, - 0x512, 0xa, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x5, 0x46, 0x517, - 0xa, 0x46, 0x3, 0x46, 0x5, 0x46, 0x51a, 0xa, 0x46, 0x3, 0x46, 0x5, 0x46, - 0x51d, 0xa, 0x46, 0x3, 0x46, 0x3, 0x46, 0x5, 0x46, 0x521, 0xa, 0x46, - 0x3, 0x46, 0x3, 0x46, 0x5, 0x46, 0x525, 0xa, 0x46, 0x3, 0x46, 0x5, 0x46, - 0x528, 0xa, 0x46, 0x5, 0x46, 0x52a, 0xa, 0x46, 0x3, 0x46, 0x5, 0x46, - 0x52d, 0xa, 0x46, 0x3, 0x46, 0x3, 0x46, 0x5, 0x46, 0x531, 0xa, 0x46, - 0x3, 0x46, 0x3, 0x46, 0x5, 0x46, 0x535, 0xa, 0x46, 0x3, 0x46, 0x5, 0x46, - 0x538, 0xa, 0x46, 0x5, 0x46, 0x53a, 0xa, 0x46, 0x5, 0x46, 0x53c, 0xa, - 0x46, 0x3, 0x47, 0x5, 0x47, 0x53f, 0xa, 0x47, 0x3, 0x47, 0x3, 0x47, - 0x3, 0x47, 0x5, 0x47, 0x544, 0xa, 0x47, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, - 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x5, - 0x48, 0x54f, 0xa, 0x48, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, - 0x5, 0x49, 0x555, 0xa, 0x49, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x5, 0x4a, - 0x55a, 0xa, 0x4a, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x7, 0x4b, 0x55f, - 0xa, 0x4b, 0xc, 0x4b, 0xe, 0x4b, 0x562, 0xb, 0x4b, 0x3, 0x4c, 0x3, 0x4c, - 0x5, 0x4c, 0x566, 0xa, 0x4c, 0x3, 0x4c, 0x3, 0x4c, 0x5, 0x4c, 0x56a, - 0xa, 0x4c, 0x3, 0x4c, 0x3, 0x4c, 0x5, 0x4c, 0x56e, 0xa, 0x4c, 0x3, 0x4d, - 0x3, 0x4d, 0x3, 0x4d, 0x5, 0x4d, 0x573, 0xa, 0x4d, 0x3, 0x4e, 0x3, 0x4e, - 0x3, 0x4e, 0x7, 0x4e, 0x578, 0xa, 0x4e, 0xc, 0x4e, 0xe, 0x4e, 0x57b, - 0xb, 0x4e, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x4f, 0x3, 0x50, 0x3, - 0x50, 0x3, 0x50, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, - 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5, - 0x51, 0x58f, 0xa, 0x51, 0x3, 0x51, 0x5, 0x51, 0x592, 0xa, 0x51, 0x3, - 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, - 0x5, 0x51, 0x59b, 0xa, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5, 0x51, 0x59f, - 0xa, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5, 0x51, 0x5a4, 0xa, 0x51, - 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5, 0x51, 0x5a9, 0xa, 0x51, 0x3, 0x51, - 0x5, 0x51, 0x5ac, 0xa, 0x51, 0x5, 0x51, 0x5ae, 0xa, 0x51, 0x3, 0x52, - 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, - 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, - 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x5, - 0x52, 0x5c4, 0xa, 0x52, 0x3, 0x52, 0x5, 0x52, 0x5c7, 0xa, 0x52, 0x3, - 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, - 0x3, 0x52, 0x3, 0x52, 0x5, 0x52, 0x5d2, 0xa, 0x52, 0x3, 0x53, 0x3, 0x53, - 0x5, 0x53, 0x5d6, 0xa, 0x53, 0x3, 0x53, 0x5, 0x53, 0x5d9, 0xa, 0x53, - 0x3, 0x53, 0x3, 0x53, 0x5, 0x53, 0x5dd, 0xa, 0x53, 0x3, 0x53, 0x3, 0x53, - 0x5, 0x53, 0x5e1, 0xa, 0x53, 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x3, 0x55, - 0x3, 0x55, 0x3, 0x55, 0x5, 0x55, 0x5e9, 0xa, 0x55, 0x3, 0x55, 0x3, 0x55, - 0x5, 0x55, 0x5ed, 0xa, 0x55, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, - 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x7, 0x56, 0x5f8, - 0xa, 0x56, 0xc, 0x56, 0xe, 0x56, 0x5fb, 0xb, 0x56, 0x3, 0x56, 0x3, 0x56, - 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x7, 0x56, 0x604, - 0xa, 0x56, 0xc, 0x56, 0xe, 0x56, 0x607, 0xb, 0x56, 0x3, 0x56, 0x3, 0x56, - 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x7, 0x56, 0x610, - 0xa, 0x56, 0xc, 0x56, 0xe, 0x56, 0x613, 0xb, 0x56, 0x3, 0x56, 0x3, 0x56, - 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x5, 0x56, 0x61a, 0xa, 0x56, 0x3, 0x56, - 0x3, 0x56, 0x5, 0x56, 0x61e, 0xa, 0x56, 0x3, 0x57, 0x3, 0x57, 0x3, 0x57, - 0x7, 0x57, 0x623, 0xa, 0x57, 0xc, 0x57, 0xe, 0x57, 0x626, 0xb, 0x57, - 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x5, 0x58, 0x62b, 0xa, 0x58, 0x3, 0x58, - 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x3, 0x58, 0x5, 0x58, 0x633, - 0xa, 0x58, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x638, 0xa, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x6, 0x59, 0x63f, - 0xa, 0x59, 0xd, 0x59, 0xe, 0x59, 0x640, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, - 0x645, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x664, - 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x675, 0xa, 0x59, 0x3, 0x59, - 0x5, 0x59, 0x678, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x67c, - 0xa, 0x59, 0x3, 0x59, 0x5, 0x59, 0x67f, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x5, 0x59, 0x68b, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x5, 0x59, 0x69c, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x6a0, - 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x6b1, 0xa, 0x59, 0x3, 0x59, - 0x5, 0x59, 0x6b4, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, 0x59, 0x6b8, - 0xa, 0x59, 0x3, 0x59, 0x5, 0x59, 0x6bb, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x5, 0x59, 0x6c6, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, - 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x5, - 0x59, 0x6de, 0xa, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, 0x3, 0x59, - 0x3, 0x59, 0x5, 0x59, 0x6e5, 0xa, 0x59, 0x7, 0x59, 0x6e7, 0xa, 0x59, - 0xc, 0x59, 0xe, 0x59, 0x6ea, 0xb, 0x59, 0x3, 0x5a, 0x3, 0x5a, 0x3, 0x5a, - 0x7, 0x5a, 0x6ef, 0xa, 0x5a, 0xc, 0x5a, 0xe, 0x5a, 0x6f2, 0xb, 0x5a, - 0x3, 0x5b, 0x3, 0x5b, 0x5, 0x5b, 0x6f6, 0xa, 0x5b, 0x3, 0x5c, 0x3, 0x5c, - 0x3, 0x5c, 0x3, 0x5c, 0x7, 0x5c, 0x6fc, 0xa, 0x5c, 0xc, 0x5c, 0xe, 0x5c, - 0x6ff, 0xb, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, - 0x7, 0x5c, 0x706, 0xa, 0x5c, 0xc, 0x5c, 0xe, 0x5c, 0x709, 0xb, 0x5c, - 0x5, 0x5c, 0x70b, 0xa, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5c, 0x3, 0x5d, - 0x3, 0x5d, 0x3, 0x5d, 0x5, 0x5d, 0x713, 0xa, 0x5d, 0x3, 0x5d, 0x3, 0x5d, - 0x3, 0x5e, 0x3, 0x5e, 0x3, 0x5e, 0x5, 0x5e, 0x71a, 0xa, 0x5e, 0x3, 0x5f, - 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x5, - 0x5f, 0x723, 0xa, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, 0x3, 0x5f, - 0x5, 0x5f, 0x729, 0xa, 0x5f, 0x7, 0x5f, 0x72b, 0xa, 0x5f, 0xc, 0x5f, - 0xe, 0x5f, 0x72e, 0xb, 0x5f, 0x3, 0x60, 0x3, 0x60, 0x3, 0x60, 0x5, 0x60, - 0x733, 0xa, 0x60, 0x3, 0x60, 0x3, 0x60, 0x3, 0x61, 0x3, 0x61, 0x3, 0x61, - 0x5, 0x61, 0x73a, 0xa, 0x61, 0x3, 0x61, 0x3, 0x61, 0x3, 0x62, 0x3, 0x62, - 0x3, 0x62, 0x7, 0x62, 0x741, 0xa, 0x62, 0xc, 0x62, 0xe, 0x62, 0x744, - 0xb, 0x62, 0x3, 0x63, 0x3, 0x63, 0x3, 0x63, 0x5, 0x63, 0x749, 0xa, 0x63, - 0x3, 0x64, 0x3, 0x64, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, 0x65, 0x3, - 0x65, 0x3, 0x65, 0x5, 0x65, 0x753, 0xa, 0x65, 0x5, 0x65, 0x755, 0xa, - 0x65, 0x3, 0x66, 0x5, 0x66, 0x758, 0xa, 0x66, 0x3, 0x66, 0x3, 0x66, - 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x3, 0x66, 0x5, 0x66, 0x760, 0xa, 0x66, - 0x3, 0x67, 0x3, 0x67, 0x3, 0x67, 0x5, 0x67, 0x765, 0xa, 0x67, 0x3, 0x68, - 0x3, 0x68, 0x3, 0x69, 0x3, 0x69, 0x3, 0x6a, 0x3, 0x6a, 0x3, 0x6b, 0x3, - 0x6b, 0x5, 0x6b, 0x76f, 0xa, 0x6b, 0x3, 0x6c, 0x3, 0x6c, 0x3, 0x6c, - 0x5, 0x6c, 0x774, 0xa, 0x6c, 0x3, 0x6d, 0x3, 0x6d, 0x5, 0x6d, 0x778, - 0xa, 0x6d, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x3, 0x6e, 0x2, - 0x5, 0x88, 0xb0, 0xbc, 0x6f, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, - 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, - 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, - 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, - 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, - 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, - 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, - 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, - 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, - 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0x2, 0x1d, 0x8, 0x2, 0x5, 0x5, 0x1a, 0x1a, - 0x1d, 0x1d, 0x27, 0x27, 0x67, 0x67, 0xa8, 0xa8, 0x4, 0x2, 0x11, 0x11, - 0x1f, 0x1f, 0x5, 0x2, 0x5, 0x5, 0x27, 0x27, 0x67, 0x67, 0x4, 0x2, 0x2a, - 0x2a, 0x2c, 0x2c, 0x4, 0x2, 0x2d, 0x2d, 0x33, 0x33, 0x5, 0x2, 0x10, - 0x10, 0x97, 0x97, 0x9d, 0x9d, 0x4, 0x2, 0x21, 0x21, 0x8a, 0x8a, 0x4, - 0x2, 0x53, 0x53, 0x5f, 0x5f, 0x4, 0x2, 0x46, 0x46, 0x64, 0x64, 0x5, - 0x2, 0x6, 0x6, 0xa, 0xa, 0xe, 0xe, 0x6, 0x2, 0x6, 0x6, 0x9, 0xa, 0xe, - 0xe, 0x8e, 0x8e, 0x4, 0x2, 0x5f, 0x5f, 0x89, 0x89, 0x4, 0x2, 0x6, 0x6, - 0xa, 0xa, 0x4, 0x2, 0x75, 0x75, 0xc5, 0xc5, 0x4, 0x2, 0xd, 0xd, 0x2a, - 0x2b, 0x4, 0x2, 0x3e, 0x3e, 0x5c, 0x5c, 0x4, 0x2, 0x43, 0x43, 0x4f, - 0x4f, 0x3, 0x2, 0x94, 0x95, 0x5, 0x2, 0x13, 0x13, 0x5e, 0x5e, 0xa5, - 0xa5, 0x5, 0x2, 0xc1, 0xc1, 0xd3, 0xd3, 0xdc, 0xdc, 0x4, 0x2, 0xc6, - 0xc7, 0xd4, 0xd4, 0x4, 0x2, 0x4e, 0x4e, 0x61, 0x61, 0x3, 0x2, 0xbc, - 0xbd, 0x4, 0x2, 0xc7, 0xc7, 0xd4, 0xd4, 0xa, 0x2, 0x25, 0x25, 0x4b, - 0x4b, 0x6b, 0x6b, 0x6d, 0x6d, 0x81, 0x81, 0x8c, 0x8c, 0xb3, 0xb3, 0xb7, - 0xb7, 0xe, 0x2, 0x4, 0x24, 0x26, 0x4a, 0x4c, 0x50, 0x52, 0x6a, 0x6c, - 0x6c, 0x6e, 0x6f, 0x71, 0x72, 0x74, 0x7f, 0x82, 0x8b, 0x8d, 0xb2, 0xb4, - 0xb6, 0xb8, 0xb9, 0x6, 0x2, 0x24, 0x24, 0x3e, 0x3e, 0x4c, 0x4c, 0x5a, - 0x5a, 0x2, 0x88f, 0x2, 0xea, 0x3, 0x2, 0x2, 0x2, 0x4, 0xfe, 0x3, 0x2, - 0x2, 0x2, 0x6, 0x100, 0x3, 0x2, 0x2, 0x2, 0x8, 0x1e4, 0x3, 0x2, 0x2, - 0x2, 0xa, 0x1e6, 0x3, 0x2, 0x2, 0x2, 0xc, 0x1ee, 0x3, 0x2, 0x2, 0x2, - 0xe, 0x1f2, 0x3, 0x2, 0x2, 0x2, 0x10, 0x1f9, 0x3, 0x2, 0x2, 0x2, 0x12, - 0x1fb, 0x3, 0x2, 0x2, 0x2, 0x14, 0x201, 0x3, 0x2, 0x2, 0x2, 0x16, 0x290, - 0x3, 0x2, 0x2, 0x2, 0x18, 0x292, 0x3, 0x2, 0x2, 0x2, 0x1a, 0x29d, 0x3, - 0x2, 0x2, 0x2, 0x1c, 0x2b8, 0x3, 0x2, 0x2, 0x2, 0x1e, 0x2d3, 0x3, 0x2, - 0x2, 0x2, 0x20, 0x2d7, 0x3, 0x2, 0x2, 0x2, 0x22, 0x2e0, 0x3, 0x2, 0x2, - 0x2, 0x24, 0x2ed, 0x3, 0x2, 0x2, 0x2, 0x26, 0x2fc, 0x3, 0x2, 0x2, 0x2, - 0x28, 0x309, 0x3, 0x2, 0x2, 0x2, 0x2a, 0x319, 0x3, 0x2, 0x2, 0x2, 0x2c, - 0x31e, 0x3, 0x2, 0x2, 0x2, 0x2e, 0x324, 0x3, 0x2, 0x2, 0x2, 0x30, 0x327, - 0x3, 0x2, 0x2, 0x2, 0x32, 0x32a, 0x3, 0x2, 0x2, 0x2, 0x34, 0x33c, 0x3, - 0x2, 0x2, 0x2, 0x36, 0x33e, 0x3, 0x2, 0x2, 0x2, 0x38, 0x35c, 0x3, 0x2, - 0x2, 0x2, 0x3a, 0x360, 0x3, 0x2, 0x2, 0x2, 0x3c, 0x364, 0x3, 0x2, 0x2, - 0x2, 0x3e, 0x368, 0x3, 0x2, 0x2, 0x2, 0x40, 0x371, 0x3, 0x2, 0x2, 0x2, - 0x42, 0x387, 0x3, 0x2, 0x2, 0x2, 0x44, 0x3a9, 0x3, 0x2, 0x2, 0x2, 0x46, - 0x3ab, 0x3, 0x2, 0x2, 0x2, 0x48, 0x3ae, 0x3, 0x2, 0x2, 0x2, 0x4a, 0x3b5, - 0x3, 0x2, 0x2, 0x2, 0x4c, 0x3b8, 0x3, 0x2, 0x2, 0x2, 0x4e, 0x3c4, 0x3, - 0x2, 0x2, 0x2, 0x50, 0x3cc, 0x3, 0x2, 0x2, 0x2, 0x52, 0x3d6, 0x3, 0x2, - 0x2, 0x2, 0x54, 0x3fb, 0x3, 0x2, 0x2, 0x2, 0x56, 0x40a, 0x3, 0x2, 0x2, - 0x2, 0x58, 0x412, 0x3, 0x2, 0x2, 0x2, 0x5a, 0x414, 0x3, 0x2, 0x2, 0x2, - 0x5c, 0x423, 0x3, 0x2, 0x2, 0x2, 0x5e, 0x437, 0x3, 0x2, 0x2, 0x2, 0x60, - 0x439, 0x3, 0x2, 0x2, 0x2, 0x62, 0x442, 0x3, 0x2, 0x2, 0x2, 0x64, 0x451, - 0x3, 0x2, 0x2, 0x2, 0x66, 0x463, 0x3, 0x2, 0x2, 0x2, 0x68, 0x471, 0x3, - 0x2, 0x2, 0x2, 0x6a, 0x47f, 0x3, 0x2, 0x2, 0x2, 0x6c, 0x482, 0x3, 0x2, - 0x2, 0x2, 0x6e, 0x4b2, 0x3, 0x2, 0x2, 0x2, 0x70, 0x4b5, 0x3, 0x2, 0x2, - 0x2, 0x72, 0x4bb, 0x3, 0x2, 0x2, 0x2, 0x74, 0x4bf, 0x3, 0x2, 0x2, 0x2, - 0x76, 0x4c5, 0x3, 0x2, 0x2, 0x2, 0x78, 0x4c8, 0x3, 0x2, 0x2, 0x2, 0x7a, - 0x4cb, 0x3, 0x2, 0x2, 0x2, 0x7c, 0x4d5, 0x3, 0x2, 0x2, 0x2, 0x7e, 0x4d8, - 0x3, 0x2, 0x2, 0x2, 0x80, 0x4dc, 0x3, 0x2, 0x2, 0x2, 0x82, 0x4e0, 0x3, - 0x2, 0x2, 0x2, 0x84, 0x4e5, 0x3, 0x2, 0x2, 0x2, 0x86, 0x4eb, 0x3, 0x2, - 0x2, 0x2, 0x88, 0x4fa, 0x3, 0x2, 0x2, 0x2, 0x8a, 0x53b, 0x3, 0x2, 0x2, - 0x2, 0x8c, 0x543, 0x3, 0x2, 0x2, 0x2, 0x8e, 0x54e, 0x3, 0x2, 0x2, 0x2, - 0x90, 0x550, 0x3, 0x2, 0x2, 0x2, 0x92, 0x556, 0x3, 0x2, 0x2, 0x2, 0x94, - 0x55b, 0x3, 0x2, 0x2, 0x2, 0x96, 0x563, 0x3, 0x2, 0x2, 0x2, 0x98, 0x56f, - 0x3, 0x2, 0x2, 0x2, 0x9a, 0x574, 0x3, 0x2, 0x2, 0x2, 0x9c, 0x57c, 0x3, - 0x2, 0x2, 0x2, 0x9e, 0x580, 0x3, 0x2, 0x2, 0x2, 0xa0, 0x5ad, 0x3, 0x2, - 0x2, 0x2, 0xa2, 0x5d1, 0x3, 0x2, 0x2, 0x2, 0xa4, 0x5d3, 0x3, 0x2, 0x2, - 0x2, 0xa6, 0x5e2, 0x3, 0x2, 0x2, 0x2, 0xa8, 0x5e5, 0x3, 0x2, 0x2, 0x2, - 0xaa, 0x61d, 0x3, 0x2, 0x2, 0x2, 0xac, 0x61f, 0x3, 0x2, 0x2, 0x2, 0xae, - 0x632, 0x3, 0x2, 0x2, 0x2, 0xb0, 0x69f, 0x3, 0x2, 0x2, 0x2, 0xb2, 0x6eb, - 0x3, 0x2, 0x2, 0x2, 0xb4, 0x6f5, 0x3, 0x2, 0x2, 0x2, 0xb6, 0x70a, 0x3, - 0x2, 0x2, 0x2, 0xb8, 0x712, 0x3, 0x2, 0x2, 0x2, 0xba, 0x716, 0x3, 0x2, - 0x2, 0x2, 0xbc, 0x722, 0x3, 0x2, 0x2, 0x2, 0xbe, 0x72f, 0x3, 0x2, 0x2, - 0x2, 0xc0, 0x739, 0x3, 0x2, 0x2, 0x2, 0xc2, 0x73d, 0x3, 0x2, 0x2, 0x2, - 0xc4, 0x748, 0x3, 0x2, 0x2, 0x2, 0xc6, 0x74a, 0x3, 0x2, 0x2, 0x2, 0xc8, - 0x754, 0x3, 0x2, 0x2, 0x2, 0xca, 0x757, 0x3, 0x2, 0x2, 0x2, 0xcc, 0x764, - 0x3, 0x2, 0x2, 0x2, 0xce, 0x766, 0x3, 0x2, 0x2, 0x2, 0xd0, 0x768, 0x3, - 0x2, 0x2, 0x2, 0xd2, 0x76a, 0x3, 0x2, 0x2, 0x2, 0xd4, 0x76e, 0x3, 0x2, - 0x2, 0x2, 0xd6, 0x773, 0x3, 0x2, 0x2, 0x2, 0xd8, 0x777, 0x3, 0x2, 0x2, - 0x2, 0xda, 0x779, 0x3, 0x2, 0x2, 0x2, 0xdc, 0xe0, 0x5, 0x4, 0x3, 0x2, - 0xdd, 0xde, 0x7, 0x56, 0x2, 0x2, 0xde, 0xdf, 0x7, 0x7b, 0x2, 0x2, 0xdf, - 0xe1, 0x7, 0xbf, 0x2, 0x2, 0xe0, 0xdd, 0x3, 0x2, 0x2, 0x2, 0xe0, 0xe1, - 0x3, 0x2, 0x2, 0x2, 0xe1, 0xe4, 0x3, 0x2, 0x2, 0x2, 0xe2, 0xe3, 0x7, - 0x41, 0x2, 0x2, 0xe3, 0xe5, 0x5, 0xd8, 0x6d, 0x2, 0xe4, 0xe2, 0x3, 0x2, - 0x2, 0x2, 0xe4, 0xe5, 0x3, 0x2, 0x2, 0x2, 0xe5, 0xe7, 0x3, 0x2, 0x2, - 0x2, 0xe6, 0xe8, 0x7, 0xdb, 0x2, 0x2, 0xe7, 0xe6, 0x3, 0x2, 0x2, 0x2, - 0xe7, 0xe8, 0x3, 0x2, 0x2, 0x2, 0xe8, 0xeb, 0x3, 0x2, 0x2, 0x2, 0xe9, - 0xeb, 0x5, 0x5a, 0x2e, 0x2, 0xea, 0xdc, 0x3, 0x2, 0x2, 0x2, 0xea, 0xe9, - 0x3, 0x2, 0x2, 0x2, 0xeb, 0x3, 0x3, 0x2, 0x2, 0x2, 0xec, 0xff, 0x5, - 0x6, 0x4, 0x2, 0xed, 0xff, 0x5, 0x12, 0xa, 0x2, 0xee, 0xff, 0x5, 0x14, - 0xb, 0x2, 0xef, 0xff, 0x5, 0x16, 0xc, 0x2, 0xf0, 0xff, 0x5, 0x52, 0x2a, - 0x2, 0xf1, 0xff, 0x5, 0x54, 0x2b, 0x2, 0xf2, 0xff, 0x5, 0x56, 0x2c, - 0x2, 0xf3, 0xff, 0x5, 0x58, 0x2d, 0x2, 0xf4, 0xff, 0x5, 0x60, 0x31, - 0x2, 0xf5, 0xff, 0x5, 0x62, 0x32, 0x2, 0xf6, 0xff, 0x5, 0x64, 0x33, - 0x2, 0xf7, 0xff, 0x5, 0x68, 0x35, 0x2, 0xf8, 0xff, 0x5, 0x9e, 0x50, - 0x2, 0xf9, 0xff, 0x5, 0xa0, 0x51, 0x2, 0xfa, 0xff, 0x5, 0xa2, 0x52, - 0x2, 0xfb, 0xff, 0x5, 0xa4, 0x53, 0x2, 0xfc, 0xff, 0x5, 0xa6, 0x54, - 0x2, 0xfd, 0xff, 0x5, 0xa8, 0x55, 0x2, 0xfe, 0xec, 0x3, 0x2, 0x2, 0x2, - 0xfe, 0xed, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xee, 0x3, 0x2, 0x2, 0x2, 0xfe, - 0xef, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf0, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf1, - 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf2, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf3, 0x3, - 0x2, 0x2, 0x2, 0xfe, 0xf4, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf5, 0x3, 0x2, - 0x2, 0x2, 0xfe, 0xf6, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf7, 0x3, 0x2, 0x2, - 0x2, 0xfe, 0xf8, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xf9, 0x3, 0x2, 0x2, 0x2, - 0xfe, 0xfa, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xfb, 0x3, 0x2, 0x2, 0x2, 0xfe, - 0xfc, 0x3, 0x2, 0x2, 0x2, 0xfe, 0xfd, 0x3, 0x2, 0x2, 0x2, 0xff, 0x5, - 0x3, 0x2, 0x2, 0x2, 0x100, 0x101, 0x7, 0x7, 0x2, 0x2, 0x101, 0x102, - 0x7, 0x9a, 0x2, 0x2, 0x102, 0x104, 0x5, 0xc0, 0x61, 0x2, 0x103, 0x105, - 0x5, 0x2c, 0x17, 0x2, 0x104, 0x103, 0x3, 0x2, 0x2, 0x2, 0x104, 0x105, - 0x3, 0x2, 0x2, 0x2, 0x105, 0x106, 0x3, 0x2, 0x2, 0x2, 0x106, 0x10b, - 0x5, 0x8, 0x5, 0x2, 0x107, 0x108, 0x7, 0xc5, 0x2, 0x2, 0x108, 0x10a, - 0x5, 0x8, 0x5, 0x2, 0x109, 0x107, 0x3, 0x2, 0x2, 0x2, 0x10a, 0x10d, - 0x3, 0x2, 0x2, 0x2, 0x10b, 0x109, 0x3, 0x2, 0x2, 0x2, 0x10b, 0x10c, - 0x3, 0x2, 0x2, 0x2, 0x10c, 0x7, 0x3, 0x2, 0x2, 0x2, 0x10d, 0x10b, 0x3, - 0x2, 0x2, 0x2, 0x10e, 0x10f, 0x7, 0x3, 0x2, 0x2, 0x10f, 0x113, 0x7, - 0x1c, 0x2, 0x2, 0x110, 0x111, 0x7, 0x4d, 0x2, 0x2, 0x111, 0x112, 0x7, - 0x72, 0x2, 0x2, 0x112, 0x114, 0x7, 0x38, 0x2, 0x2, 0x113, 0x110, 0x3, - 0x2, 0x2, 0x2, 0x113, 0x114, 0x3, 0x2, 0x2, 0x2, 0x114, 0x115, 0x3, - 0x2, 0x2, 0x2, 0x115, 0x118, 0x5, 0x44, 0x23, 0x2, 0x116, 0x117, 0x7, - 0x4, 0x2, 0x2, 0x117, 0x119, 0x5, 0xba, 0x5e, 0x2, 0x118, 0x116, 0x3, - 0x2, 0x2, 0x2, 0x118, 0x119, 0x3, 0x2, 0x2, 0x2, 0x119, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x11a, 0x11b, 0x7, 0x3, 0x2, 0x2, 0x11b, 0x11f, 0x7, - 0x50, 0x2, 0x2, 0x11c, 0x11d, 0x7, 0x4d, 0x2, 0x2, 0x11d, 0x11e, 0x7, - 0x72, 0x2, 0x2, 0x11e, 0x120, 0x7, 0x38, 0x2, 0x2, 0x11f, 0x11c, 0x3, - 0x2, 0x2, 0x2, 0x11f, 0x120, 0x3, 0x2, 0x2, 0x2, 0x120, 0x121, 0x3, - 0x2, 0x2, 0x2, 0x121, 0x124, 0x5, 0x48, 0x25, 0x2, 0x122, 0x123, 0x7, - 0x4, 0x2, 0x2, 0x123, 0x125, 0x5, 0xba, 0x5e, 0x2, 0x124, 0x122, 0x3, - 0x2, 0x2, 0x2, 0x124, 0x125, 0x3, 0x2, 0x2, 0x2, 0x125, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x126, 0x127, 0x7, 0x3, 0x2, 0x2, 0x127, 0x12b, 0x7, - 0x80, 0x2, 0x2, 0x128, 0x129, 0x7, 0x4d, 0x2, 0x2, 0x129, 0x12a, 0x7, - 0x72, 0x2, 0x2, 0x12a, 0x12c, 0x7, 0x38, 0x2, 0x2, 0x12b, 0x128, 0x3, - 0x2, 0x2, 0x2, 0x12b, 0x12c, 0x3, 0x2, 0x2, 0x2, 0x12c, 0x12d, 0x3, - 0x2, 0x2, 0x2, 0x12d, 0x130, 0x5, 0x4a, 0x26, 0x2, 0x12e, 0x12f, 0x7, - 0x4, 0x2, 0x2, 0x12f, 0x131, 0x5, 0xba, 0x5e, 0x2, 0x130, 0x12e, 0x3, - 0x2, 0x2, 0x2, 0x130, 0x131, 0x3, 0x2, 0x2, 0x2, 0x131, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x132, 0x133, 0x7, 0x11, 0x2, 0x2, 0x133, 0x136, 0x5, - 0x10, 0x9, 0x2, 0x134, 0x135, 0x7, 0x43, 0x2, 0x2, 0x135, 0x137, 0x5, - 0xc0, 0x61, 0x2, 0x136, 0x134, 0x3, 0x2, 0x2, 0x2, 0x136, 0x137, 0x3, - 0x2, 0x2, 0x2, 0x137, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x138, 0x139, 0x7, - 0x18, 0x2, 0x2, 0x139, 0x13c, 0x7, 0x1c, 0x2, 0x2, 0x13a, 0x13b, 0x7, - 0x4d, 0x2, 0x2, 0x13b, 0x13d, 0x7, 0x38, 0x2, 0x2, 0x13c, 0x13a, 0x3, - 0x2, 0x2, 0x2, 0x13c, 0x13d, 0x3, 0x2, 0x2, 0x2, 0x13d, 0x13e, 0x3, - 0x2, 0x2, 0x2, 0x13e, 0x141, 0x5, 0xba, 0x5e, 0x2, 0x13f, 0x140, 0x7, - 0x4f, 0x2, 0x2, 0x140, 0x142, 0x5, 0x10, 0x9, 0x2, 0x141, 0x13f, 0x3, - 0x2, 0x2, 0x2, 0x141, 0x142, 0x3, 0x2, 0x2, 0x2, 0x142, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x143, 0x144, 0x7, 0x18, 0x2, 0x2, 0x144, 0x147, 0x7, - 0x50, 0x2, 0x2, 0x145, 0x146, 0x7, 0x4d, 0x2, 0x2, 0x146, 0x148, 0x7, - 0x38, 0x2, 0x2, 0x147, 0x145, 0x3, 0x2, 0x2, 0x2, 0x147, 0x148, 0x3, - 0x2, 0x2, 0x2, 0x148, 0x149, 0x3, 0x2, 0x2, 0x2, 0x149, 0x14c, 0x5, - 0xba, 0x5e, 0x2, 0x14a, 0x14b, 0x7, 0x4f, 0x2, 0x2, 0x14b, 0x14d, 0x5, - 0x10, 0x9, 0x2, 0x14c, 0x14a, 0x3, 0x2, 0x2, 0x2, 0x14c, 0x14d, 0x3, - 0x2, 0x2, 0x2, 0x14d, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x14e, 0x14f, 0x7, - 0x18, 0x2, 0x2, 0x14f, 0x152, 0x7, 0x80, 0x2, 0x2, 0x150, 0x151, 0x7, - 0x4d, 0x2, 0x2, 0x151, 0x153, 0x7, 0x38, 0x2, 0x2, 0x152, 0x150, 0x3, - 0x2, 0x2, 0x2, 0x152, 0x153, 0x3, 0x2, 0x2, 0x2, 0x153, 0x154, 0x3, - 0x2, 0x2, 0x2, 0x154, 0x157, 0x5, 0xba, 0x5e, 0x2, 0x155, 0x156, 0x7, - 0x4f, 0x2, 0x2, 0x156, 0x158, 0x5, 0x10, 0x9, 0x2, 0x157, 0x155, 0x3, - 0x2, 0x2, 0x2, 0x157, 0x158, 0x3, 0x2, 0x2, 0x2, 0x158, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x159, 0x15a, 0x7, 0x1d, 0x2, 0x2, 0x15a, 0x15d, 0x7, - 0x1c, 0x2, 0x2, 0x15b, 0x15c, 0x7, 0x4d, 0x2, 0x2, 0x15c, 0x15e, 0x7, - 0x38, 0x2, 0x2, 0x15d, 0x15b, 0x3, 0x2, 0x2, 0x2, 0x15d, 0x15e, 0x3, - 0x2, 0x2, 0x2, 0x15e, 0x15f, 0x3, 0x2, 0x2, 0x2, 0x15f, 0x160, 0x5, - 0xba, 0x5e, 0x2, 0x160, 0x161, 0x7, 0xbf, 0x2, 0x2, 0x161, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x162, 0x163, 0x7, 0x29, 0x2, 0x2, 0x163, 0x164, 0x7, - 0xb5, 0x2, 0x2, 0x164, 0x1e5, 0x5, 0xb0, 0x59, 0x2, 0x165, 0x166, 0x7, - 0x2d, 0x2, 0x2, 0x166, 0x1e5, 0x5, 0x10, 0x9, 0x2, 0x167, 0x168, 0x7, - 0x33, 0x2, 0x2, 0x168, 0x16b, 0x7, 0x1c, 0x2, 0x2, 0x169, 0x16a, 0x7, - 0x4d, 0x2, 0x2, 0x16a, 0x16c, 0x7, 0x38, 0x2, 0x2, 0x16b, 0x169, 0x3, - 0x2, 0x2, 0x2, 0x16b, 0x16c, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16d, 0x3, - 0x2, 0x2, 0x2, 0x16d, 0x1e5, 0x5, 0xba, 0x5e, 0x2, 0x16e, 0x16f, 0x7, - 0x33, 0x2, 0x2, 0x16f, 0x172, 0x7, 0x50, 0x2, 0x2, 0x170, 0x171, 0x7, - 0x4d, 0x2, 0x2, 0x171, 0x173, 0x7, 0x38, 0x2, 0x2, 0x172, 0x170, 0x3, - 0x2, 0x2, 0x2, 0x172, 0x173, 0x3, 0x2, 0x2, 0x2, 0x173, 0x174, 0x3, - 0x2, 0x2, 0x2, 0x174, 0x1e5, 0x5, 0xba, 0x5e, 0x2, 0x175, 0x176, 0x7, - 0x33, 0x2, 0x2, 0x176, 0x179, 0x7, 0x80, 0x2, 0x2, 0x177, 0x178, 0x7, - 0x4d, 0x2, 0x2, 0x178, 0x17a, 0x7, 0x38, 0x2, 0x2, 0x179, 0x177, 0x3, - 0x2, 0x2, 0x2, 0x179, 0x17a, 0x3, 0x2, 0x2, 0x2, 0x17a, 0x17b, 0x3, - 0x2, 0x2, 0x2, 0x17b, 0x1e5, 0x5, 0xba, 0x5e, 0x2, 0x17c, 0x17d, 0x7, - 0x33, 0x2, 0x2, 0x17d, 0x1e5, 0x5, 0x10, 0x9, 0x2, 0x17e, 0x180, 0x7, - 0x42, 0x2, 0x2, 0x17f, 0x181, 0x5, 0x10, 0x9, 0x2, 0x180, 0x17f, 0x3, - 0x2, 0x2, 0x2, 0x180, 0x181, 0x3, 0x2, 0x2, 0x2, 0x181, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x182, 0x183, 0x7, 0x66, 0x2, 0x2, 0x183, 0x186, 0x7, - 0x50, 0x2, 0x2, 0x184, 0x185, 0x7, 0x4d, 0x2, 0x2, 0x185, 0x187, 0x7, - 0x38, 0x2, 0x2, 0x186, 0x184, 0x3, 0x2, 0x2, 0x2, 0x186, 0x187, 0x3, - 0x2, 0x2, 0x2, 0x187, 0x188, 0x3, 0x2, 0x2, 0x2, 0x188, 0x18b, 0x5, - 0xba, 0x5e, 0x2, 0x189, 0x18a, 0x7, 0x4f, 0x2, 0x2, 0x18a, 0x18c, 0x5, - 0x10, 0x9, 0x2, 0x18b, 0x189, 0x3, 0x2, 0x2, 0x2, 0x18b, 0x18c, 0x3, - 0x2, 0x2, 0x2, 0x18c, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x18d, 0x18e, 0x7, - 0x66, 0x2, 0x2, 0x18e, 0x191, 0x7, 0x80, 0x2, 0x2, 0x18f, 0x190, 0x7, - 0x4d, 0x2, 0x2, 0x190, 0x192, 0x7, 0x38, 0x2, 0x2, 0x191, 0x18f, 0x3, - 0x2, 0x2, 0x2, 0x191, 0x192, 0x3, 0x2, 0x2, 0x2, 0x192, 0x193, 0x3, - 0x2, 0x2, 0x2, 0x193, 0x196, 0x5, 0xba, 0x5e, 0x2, 0x194, 0x195, 0x7, - 0x4f, 0x2, 0x2, 0x195, 0x197, 0x5, 0x10, 0x9, 0x2, 0x196, 0x194, 0x3, - 0x2, 0x2, 0x2, 0x196, 0x197, 0x3, 0x2, 0x2, 0x2, 0x197, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x198, 0x199, 0x7, 0x6c, 0x2, 0x2, 0x199, 0x19c, 0x7, - 0x1c, 0x2, 0x2, 0x19a, 0x19b, 0x7, 0x4d, 0x2, 0x2, 0x19b, 0x19d, 0x7, - 0x38, 0x2, 0x2, 0x19c, 0x19a, 0x3, 0x2, 0x2, 0x2, 0x19c, 0x19d, 0x3, - 0x2, 0x2, 0x2, 0x19d, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x19e, 0x19f, 0x5, - 0xba, 0x5e, 0x2, 0x19f, 0x1a0, 0x5, 0x4c, 0x27, 0x2, 0x1a0, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x1a1, 0x1a2, 0x7, 0x6c, 0x2, 0x2, 0x1a2, 0x1a5, 0x7, - 0x1c, 0x2, 0x2, 0x1a3, 0x1a4, 0x7, 0x4d, 0x2, 0x2, 0x1a4, 0x1a6, 0x7, - 0x38, 0x2, 0x2, 0x1a5, 0x1a3, 0x3, 0x2, 0x2, 0x2, 0x1a5, 0x1a6, 0x3, - 0x2, 0x2, 0x2, 0x1a6, 0x1a7, 0x3, 0x2, 0x2, 0x2, 0x1a7, 0x1a8, 0x5, - 0xba, 0x5e, 0x2, 0x1a8, 0x1a9, 0x7, 0x1d, 0x2, 0x2, 0x1a9, 0x1aa, 0x7, - 0xbf, 0x2, 0x2, 0x1aa, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1ab, 0x1ac, 0x7, - 0x6c, 0x2, 0x2, 0x1ac, 0x1af, 0x7, 0x1c, 0x2, 0x2, 0x1ad, 0x1ae, 0x7, - 0x4d, 0x2, 0x2, 0x1ae, 0x1b0, 0x7, 0x38, 0x2, 0x2, 0x1af, 0x1ad, 0x3, - 0x2, 0x2, 0x2, 0x1af, 0x1b0, 0x3, 0x2, 0x2, 0x2, 0x1b0, 0x1b1, 0x3, - 0x2, 0x2, 0x2, 0x1b1, 0x1b2, 0x5, 0xba, 0x5e, 0x2, 0x1b2, 0x1b3, 0x7, - 0x84, 0x2, 0x2, 0x1b3, 0x1b4, 0x5, 0xe, 0x8, 0x2, 0x1b4, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x1b5, 0x1b6, 0x7, 0x6c, 0x2, 0x2, 0x1b6, 0x1b9, 0x7, - 0x1c, 0x2, 0x2, 0x1b7, 0x1b8, 0x7, 0x4d, 0x2, 0x2, 0x1b8, 0x1ba, 0x7, - 0x38, 0x2, 0x2, 0x1b9, 0x1b7, 0x3, 0x2, 0x2, 0x2, 0x1b9, 0x1ba, 0x3, - 0x2, 0x2, 0x2, 0x1ba, 0x1bb, 0x3, 0x2, 0x2, 0x2, 0x1bb, 0x1e5, 0x5, - 0x44, 0x23, 0x2, 0x1bc, 0x1bd, 0x7, 0x6c, 0x2, 0x2, 0x1bd, 0x1be, 0x7, - 0x79, 0x2, 0x2, 0x1be, 0x1bf, 0x7, 0x14, 0x2, 0x2, 0x1bf, 0x1e5, 0x5, - 0xb0, 0x59, 0x2, 0x1c0, 0x1c1, 0x7, 0x6c, 0x2, 0x2, 0x1c1, 0x1e5, 0x5, - 0x3e, 0x20, 0x2, 0x1c2, 0x1c3, 0x7, 0x6e, 0x2, 0x2, 0x1c3, 0x1cd, 0x5, - 0x10, 0x9, 0x2, 0x1c4, 0x1c5, 0x7, 0xa2, 0x2, 0x2, 0x1c5, 0x1c6, 0x7, - 0x30, 0x2, 0x2, 0x1c6, 0x1ce, 0x7, 0xbf, 0x2, 0x2, 0x1c7, 0x1c8, 0x7, - 0xa2, 0x2, 0x2, 0x1c8, 0x1c9, 0x7, 0xb1, 0x2, 0x2, 0x1c9, 0x1ce, 0x7, - 0xbf, 0x2, 0x2, 0x1ca, 0x1cb, 0x7, 0xa2, 0x2, 0x2, 0x1cb, 0x1cc, 0x7, - 0x9a, 0x2, 0x2, 0x1cc, 0x1ce, 0x5, 0xc0, 0x61, 0x2, 0x1cd, 0x1c4, 0x3, - 0x2, 0x2, 0x2, 0x1cd, 0x1c7, 0x3, 0x2, 0x2, 0x2, 0x1cd, 0x1ca, 0x3, - 0x2, 0x2, 0x2, 0x1ce, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1cf, 0x1d0, 0x7, - 0x84, 0x2, 0x2, 0x1d0, 0x1e5, 0x7, 0xa8, 0x2, 0x2, 0x1d1, 0x1d2, 0x7, - 0x85, 0x2, 0x2, 0x1d2, 0x1d5, 0x7, 0x1c, 0x2, 0x2, 0x1d3, 0x1d4, 0x7, - 0x4d, 0x2, 0x2, 0x1d4, 0x1d6, 0x7, 0x38, 0x2, 0x2, 0x1d5, 0x1d3, 0x3, - 0x2, 0x2, 0x2, 0x1d5, 0x1d6, 0x3, 0x2, 0x2, 0x2, 0x1d6, 0x1d7, 0x3, - 0x2, 0x2, 0x2, 0x1d7, 0x1d8, 0x5, 0xba, 0x5e, 0x2, 0x1d8, 0x1d9, 0x7, - 0xa2, 0x2, 0x2, 0x1d9, 0x1da, 0x5, 0xba, 0x5e, 0x2, 0x1da, 0x1e5, 0x3, - 0x2, 0x2, 0x2, 0x1db, 0x1dc, 0x7, 0x86, 0x2, 0x2, 0x1dc, 0x1dd, 0x5, - 0x10, 0x9, 0x2, 0x1dd, 0x1de, 0x7, 0x43, 0x2, 0x2, 0x1de, 0x1df, 0x5, - 0xc0, 0x61, 0x2, 0x1df, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1e0, 0x1e1, 0x7, - 0xab, 0x2, 0x2, 0x1e1, 0x1e2, 0x5, 0xa, 0x6, 0x2, 0x1e2, 0x1e3, 0x5, - 0x78, 0x3d, 0x2, 0x1e3, 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x10e, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x11a, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x126, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x132, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x138, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x143, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x14e, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x159, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x162, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x165, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x167, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x16e, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x175, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x17c, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x17e, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x182, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x18d, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x198, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1a1, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x1ab, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1b5, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x1bc, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1c0, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x1c2, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1cf, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x1d1, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1db, 0x3, - 0x2, 0x2, 0x2, 0x1e4, 0x1e0, 0x3, 0x2, 0x2, 0x2, 0x1e5, 0x9, 0x3, 0x2, - 0x2, 0x2, 0x1e6, 0x1eb, 0x5, 0xc, 0x7, 0x2, 0x1e7, 0x1e8, 0x7, 0xc5, - 0x2, 0x2, 0x1e8, 0x1ea, 0x5, 0xc, 0x7, 0x2, 0x1e9, 0x1e7, 0x3, 0x2, - 0x2, 0x2, 0x1ea, 0x1ed, 0x3, 0x2, 0x2, 0x2, 0x1eb, 0x1e9, 0x3, 0x2, - 0x2, 0x2, 0x1eb, 0x1ec, 0x3, 0x2, 0x2, 0x2, 0x1ec, 0xb, 0x3, 0x2, 0x2, - 0x2, 0x1ed, 0x1eb, 0x3, 0x2, 0x2, 0x2, 0x1ee, 0x1ef, 0x5, 0xba, 0x5e, - 0x2, 0x1ef, 0x1f0, 0x7, 0xca, 0x2, 0x2, 0x1f0, 0x1f1, 0x5, 0xb0, 0x59, - 0x2, 0x1f1, 0xd, 0x3, 0x2, 0x2, 0x2, 0x1f2, 0x1f3, 0x9, 0x2, 0x2, 0x2, - 0x1f3, 0xf, 0x3, 0x2, 0x2, 0x2, 0x1f4, 0x1f5, 0x7, 0x7c, 0x2, 0x2, 0x1f5, - 0x1fa, 0x5, 0xb0, 0x59, 0x2, 0x1f6, 0x1f7, 0x7, 0x7c, 0x2, 0x2, 0x1f7, - 0x1f8, 0x7, 0x4c, 0x2, 0x2, 0x1f8, 0x1fa, 0x7, 0xbf, 0x2, 0x2, 0x1f9, - 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x1f9, 0x1f6, 0x3, 0x2, 0x2, 0x2, 0x1fa, - 0x11, 0x3, 0x2, 0x2, 0x2, 0x1fb, 0x1fc, 0x7, 0x11, 0x2, 0x2, 0x1fc, - 0x1fd, 0x7, 0x2f, 0x2, 0x2, 0x1fd, 0x1ff, 0x5, 0xc0, 0x61, 0x2, 0x1fe, - 0x200, 0x5, 0x2c, 0x17, 0x2, 0x1ff, 0x1fe, 0x3, 0x2, 0x2, 0x2, 0x1ff, - 0x200, 0x3, 0x2, 0x2, 0x2, 0x200, 0x13, 0x3, 0x2, 0x2, 0x2, 0x201, 0x202, - 0x7, 0x17, 0x2, 0x2, 0x202, 0x203, 0x7, 0x9a, 0x2, 0x2, 0x203, 0x205, - 0x5, 0xc0, 0x61, 0x2, 0x204, 0x206, 0x5, 0x10, 0x9, 0x2, 0x205, 0x204, - 0x3, 0x2, 0x2, 0x2, 0x205, 0x206, 0x3, 0x2, 0x2, 0x2, 0x206, 0x15, 0x3, - 0x2, 0x2, 0x2, 0x207, 0x208, 0x9, 0x3, 0x2, 0x2, 0x208, 0x20c, 0x7, - 0x22, 0x2, 0x2, 0x209, 0x20a, 0x7, 0x4d, 0x2, 0x2, 0x20a, 0x20b, 0x7, - 0x72, 0x2, 0x2, 0x20b, 0x20d, 0x7, 0x38, 0x2, 0x2, 0x20c, 0x209, 0x3, - 0x2, 0x2, 0x2, 0x20c, 0x20d, 0x3, 0x2, 0x2, 0x2, 0x20d, 0x20e, 0x3, - 0x2, 0x2, 0x2, 0x20e, 0x210, 0x5, 0xc6, 0x64, 0x2, 0x20f, 0x211, 0x5, - 0x2c, 0x17, 0x2, 0x210, 0x20f, 0x3, 0x2, 0x2, 0x2, 0x210, 0x211, 0x3, - 0x2, 0x2, 0x2, 0x211, 0x213, 0x3, 0x2, 0x2, 0x2, 0x212, 0x214, 0x5, - 0x40, 0x21, 0x2, 0x213, 0x212, 0x3, 0x2, 0x2, 0x2, 0x213, 0x214, 0x3, - 0x2, 0x2, 0x2, 0x214, 0x291, 0x3, 0x2, 0x2, 0x2, 0x215, 0x216, 0x9, - 0x3, 0x2, 0x2, 0x216, 0x21a, 0x7, 0x2f, 0x2, 0x2, 0x217, 0x218, 0x7, - 0x4d, 0x2, 0x2, 0x218, 0x219, 0x7, 0x72, 0x2, 0x2, 0x219, 0x21b, 0x7, - 0x38, 0x2, 0x2, 0x21a, 0x217, 0x3, 0x2, 0x2, 0x2, 0x21a, 0x21b, 0x3, - 0x2, 0x2, 0x2, 0x21b, 0x21c, 0x3, 0x2, 0x2, 0x2, 0x21c, 0x21e, 0x5, - 0xc0, 0x61, 0x2, 0x21d, 0x21f, 0x5, 0x2e, 0x18, 0x2, 0x21e, 0x21d, 0x3, - 0x2, 0x2, 0x2, 0x21e, 0x21f, 0x3, 0x2, 0x2, 0x2, 0x21f, 0x221, 0x3, - 0x2, 0x2, 0x2, 0x220, 0x222, 0x5, 0x2c, 0x17, 0x2, 0x221, 0x220, 0x3, - 0x2, 0x2, 0x2, 0x221, 0x222, 0x3, 0x2, 0x2, 0x2, 0x222, 0x223, 0x3, - 0x2, 0x2, 0x2, 0x223, 0x224, 0x5, 0x18, 0xd, 0x2, 0x224, 0x225, 0x5, - 0x1c, 0xf, 0x2, 0x225, 0x291, 0x3, 0x2, 0x2, 0x2, 0x226, 0x227, 0x9, - 0x3, 0x2, 0x2, 0x227, 0x228, 0x7, 0x63, 0x2, 0x2, 0x228, 0x22c, 0x7, - 0xb0, 0x2, 0x2, 0x229, 0x22a, 0x7, 0x4d, 0x2, 0x2, 0x22a, 0x22b, 0x7, - 0x72, 0x2, 0x2, 0x22b, 0x22d, 0x7, 0x38, 0x2, 0x2, 0x22c, 0x229, 0x3, - 0x2, 0x2, 0x2, 0x22c, 0x22d, 0x3, 0x2, 0x2, 0x2, 0x22d, 0x22e, 0x3, - 0x2, 0x2, 0x2, 0x22e, 0x230, 0x5, 0xc0, 0x61, 0x2, 0x22f, 0x231, 0x5, - 0x2e, 0x18, 0x2, 0x230, 0x22f, 0x3, 0x2, 0x2, 0x2, 0x230, 0x231, 0x3, - 0x2, 0x2, 0x2, 0x231, 0x233, 0x3, 0x2, 0x2, 0x2, 0x232, 0x234, 0x5, - 0x2c, 0x17, 0x2, 0x233, 0x232, 0x3, 0x2, 0x2, 0x2, 0x233, 0x234, 0x3, - 0x2, 0x2, 0x2, 0x234, 0x23a, 0x3, 0x2, 0x2, 0x2, 0x235, 0x236, 0x7, - 0xb6, 0x2, 0x2, 0x236, 0x238, 0x7, 0xa0, 0x2, 0x2, 0x237, 0x239, 0x7, - 0xbd, 0x2, 0x2, 0x238, 0x237, 0x3, 0x2, 0x2, 0x2, 0x238, 0x239, 0x3, - 0x2, 0x2, 0x2, 0x239, 0x23b, 0x3, 0x2, 0x2, 0x2, 0x23a, 0x235, 0x3, - 0x2, 0x2, 0x2, 0x23a, 0x23b, 0x3, 0x2, 0x2, 0x2, 0x23b, 0x23d, 0x3, - 0x2, 0x2, 0x2, 0x23c, 0x23e, 0x5, 0x30, 0x19, 0x2, 0x23d, 0x23c, 0x3, - 0x2, 0x2, 0x2, 0x23d, 0x23e, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x240, 0x3, - 0x2, 0x2, 0x2, 0x23f, 0x241, 0x5, 0x34, 0x1b, 0x2, 0x240, 0x23f, 0x3, - 0x2, 0x2, 0x2, 0x240, 0x241, 0x3, 0x2, 0x2, 0x2, 0x241, 0x242, 0x3, - 0x2, 0x2, 0x2, 0x242, 0x243, 0x5, 0x32, 0x1a, 0x2, 0x243, 0x291, 0x3, - 0x2, 0x2, 0x2, 0x244, 0x245, 0x9, 0x3, 0x2, 0x2, 0x245, 0x246, 0x7, - 0x67, 0x2, 0x2, 0x246, 0x24a, 0x7, 0xb0, 0x2, 0x2, 0x247, 0x248, 0x7, - 0x4d, 0x2, 0x2, 0x248, 0x249, 0x7, 0x72, 0x2, 0x2, 0x249, 0x24b, 0x7, - 0x38, 0x2, 0x2, 0x24a, 0x247, 0x3, 0x2, 0x2, 0x2, 0x24a, 0x24b, 0x3, - 0x2, 0x2, 0x2, 0x24b, 0x24c, 0x3, 0x2, 0x2, 0x2, 0x24c, 0x24e, 0x5, - 0xc0, 0x61, 0x2, 0x24d, 0x24f, 0x5, 0x2e, 0x18, 0x2, 0x24e, 0x24d, 0x3, - 0x2, 0x2, 0x2, 0x24e, 0x24f, 0x3, 0x2, 0x2, 0x2, 0x24f, 0x251, 0x3, - 0x2, 0x2, 0x2, 0x250, 0x252, 0x5, 0x2c, 0x17, 0x2, 0x251, 0x250, 0x3, - 0x2, 0x2, 0x2, 0x251, 0x252, 0x3, 0x2, 0x2, 0x2, 0x252, 0x254, 0x3, - 0x2, 0x2, 0x2, 0x253, 0x255, 0x5, 0x34, 0x1b, 0x2, 0x254, 0x253, 0x3, - 0x2, 0x2, 0x2, 0x254, 0x255, 0x3, 0x2, 0x2, 0x2, 0x255, 0x25b, 0x3, - 0x2, 0x2, 0x2, 0x256, 0x25c, 0x5, 0x30, 0x19, 0x2, 0x257, 0x259, 0x5, - 0x36, 0x1c, 0x2, 0x258, 0x25a, 0x7, 0x7d, 0x2, 0x2, 0x259, 0x258, 0x3, - 0x2, 0x2, 0x2, 0x259, 0x25a, 0x3, 0x2, 0x2, 0x2, 0x25a, 0x25c, 0x3, - 0x2, 0x2, 0x2, 0x25b, 0x256, 0x3, 0x2, 0x2, 0x2, 0x25b, 0x257, 0x3, - 0x2, 0x2, 0x2, 0x25c, 0x25d, 0x3, 0x2, 0x2, 0x2, 0x25d, 0x25e, 0x5, - 0x32, 0x1a, 0x2, 0x25e, 0x291, 0x3, 0x2, 0x2, 0x2, 0x25f, 0x261, 0x9, - 0x3, 0x2, 0x2, 0x260, 0x262, 0x7, 0x9c, 0x2, 0x2, 0x261, 0x260, 0x3, - 0x2, 0x2, 0x2, 0x261, 0x262, 0x3, 0x2, 0x2, 0x2, 0x262, 0x263, 0x3, - 0x2, 0x2, 0x2, 0x263, 0x267, 0x7, 0x9a, 0x2, 0x2, 0x264, 0x265, 0x7, - 0x4d, 0x2, 0x2, 0x265, 0x266, 0x7, 0x72, 0x2, 0x2, 0x266, 0x268, 0x7, - 0x38, 0x2, 0x2, 0x267, 0x264, 0x3, 0x2, 0x2, 0x2, 0x267, 0x268, 0x3, - 0x2, 0x2, 0x2, 0x268, 0x269, 0x3, 0x2, 0x2, 0x2, 0x269, 0x26b, 0x5, - 0xc0, 0x61, 0x2, 0x26a, 0x26c, 0x5, 0x2e, 0x18, 0x2, 0x26b, 0x26a, 0x3, - 0x2, 0x2, 0x2, 0x26b, 0x26c, 0x3, 0x2, 0x2, 0x2, 0x26c, 0x26e, 0x3, - 0x2, 0x2, 0x2, 0x26d, 0x26f, 0x5, 0x2c, 0x17, 0x2, 0x26e, 0x26d, 0x3, - 0x2, 0x2, 0x2, 0x26e, 0x26f, 0x3, 0x2, 0x2, 0x2, 0x26f, 0x271, 0x3, - 0x2, 0x2, 0x2, 0x270, 0x272, 0x5, 0x34, 0x1b, 0x2, 0x271, 0x270, 0x3, - 0x2, 0x2, 0x2, 0x271, 0x272, 0x3, 0x2, 0x2, 0x2, 0x272, 0x274, 0x3, - 0x2, 0x2, 0x2, 0x273, 0x275, 0x5, 0x36, 0x1c, 0x2, 0x274, 0x273, 0x3, - 0x2, 0x2, 0x2, 0x274, 0x275, 0x3, 0x2, 0x2, 0x2, 0x275, 0x277, 0x3, - 0x2, 0x2, 0x2, 0x276, 0x278, 0x5, 0x32, 0x1a, 0x2, 0x277, 0x276, 0x3, - 0x2, 0x2, 0x2, 0x277, 0x278, 0x3, 0x2, 0x2, 0x2, 0x278, 0x291, 0x3, - 0x2, 0x2, 0x2, 0x279, 0x27c, 0x9, 0x3, 0x2, 0x2, 0x27a, 0x27b, 0x7, - 0x78, 0x2, 0x2, 0x27b, 0x27d, 0x7, 0x86, 0x2, 0x2, 0x27c, 0x27a, 0x3, - 0x2, 0x2, 0x2, 0x27c, 0x27d, 0x3, 0x2, 0x2, 0x2, 0x27d, 0x27e, 0x3, - 0x2, 0x2, 0x2, 0x27e, 0x282, 0x7, 0xb0, 0x2, 0x2, 0x27f, 0x280, 0x7, - 0x4d, 0x2, 0x2, 0x280, 0x281, 0x7, 0x72, 0x2, 0x2, 0x281, 0x283, 0x7, - 0x38, 0x2, 0x2, 0x282, 0x27f, 0x3, 0x2, 0x2, 0x2, 0x282, 0x283, 0x3, - 0x2, 0x2, 0x2, 0x283, 0x284, 0x3, 0x2, 0x2, 0x2, 0x284, 0x286, 0x5, - 0xc0, 0x61, 0x2, 0x285, 0x287, 0x5, 0x2e, 0x18, 0x2, 0x286, 0x285, 0x3, - 0x2, 0x2, 0x2, 0x286, 0x287, 0x3, 0x2, 0x2, 0x2, 0x287, 0x289, 0x3, - 0x2, 0x2, 0x2, 0x288, 0x28a, 0x5, 0x2c, 0x17, 0x2, 0x289, 0x288, 0x3, - 0x2, 0x2, 0x2, 0x289, 0x28a, 0x3, 0x2, 0x2, 0x2, 0x28a, 0x28c, 0x3, - 0x2, 0x2, 0x2, 0x28b, 0x28d, 0x5, 0x34, 0x1b, 0x2, 0x28c, 0x28b, 0x3, - 0x2, 0x2, 0x2, 0x28c, 0x28d, 0x3, 0x2, 0x2, 0x2, 0x28d, 0x28e, 0x3, - 0x2, 0x2, 0x2, 0x28e, 0x28f, 0x5, 0x32, 0x1a, 0x2, 0x28f, 0x291, 0x3, - 0x2, 0x2, 0x2, 0x290, 0x207, 0x3, 0x2, 0x2, 0x2, 0x290, 0x215, 0x3, - 0x2, 0x2, 0x2, 0x290, 0x226, 0x3, 0x2, 0x2, 0x2, 0x290, 0x244, 0x3, - 0x2, 0x2, 0x2, 0x290, 0x25f, 0x3, 0x2, 0x2, 0x2, 0x290, 0x279, 0x3, - 0x2, 0x2, 0x2, 0x291, 0x17, 0x3, 0x2, 0x2, 0x2, 0x292, 0x293, 0x7, 0xd0, - 0x2, 0x2, 0x293, 0x298, 0x5, 0x1a, 0xe, 0x2, 0x294, 0x295, 0x7, 0xc5, - 0x2, 0x2, 0x295, 0x297, 0x5, 0x1a, 0xe, 0x2, 0x296, 0x294, 0x3, 0x2, - 0x2, 0x2, 0x297, 0x29a, 0x3, 0x2, 0x2, 0x2, 0x298, 0x296, 0x3, 0x2, - 0x2, 0x2, 0x298, 0x299, 0x3, 0x2, 0x2, 0x2, 0x299, 0x29b, 0x3, 0x2, - 0x2, 0x2, 0x29a, 0x298, 0x3, 0x2, 0x2, 0x2, 0x29b, 0x29c, 0x7, 0xda, - 0x2, 0x2, 0x29c, 0x19, 0x3, 0x2, 0x2, 0x2, 0x29d, 0x29e, 0x5, 0xd6, - 0x6c, 0x2, 0x29e, 0x2b4, 0x5, 0xaa, 0x56, 0x2, 0x29f, 0x2a0, 0x6, 0xe, - 0x2, 0x3, 0x2a0, 0x2a1, 0x7, 0x27, 0x2, 0x2, 0x2a1, 0x2a2, 0x5, 0xcc, - 0x67, 0x2, 0x2a2, 0x2a3, 0x8, 0xe, 0x1, 0x2, 0x2a3, 0x2b3, 0x3, 0x2, - 0x2, 0x2, 0x2a4, 0x2a5, 0x6, 0xe, 0x3, 0x3, 0x2a5, 0x2a6, 0x7, 0x3a, - 0x2, 0x2, 0x2a6, 0x2a7, 0x5, 0xb0, 0x59, 0x2, 0x2a7, 0x2a8, 0x8, 0xe, - 0x1, 0x2, 0x2a8, 0x2b3, 0x3, 0x2, 0x2, 0x2, 0x2a9, 0x2aa, 0x6, 0xe, - 0x4, 0x3, 0x2aa, 0x2ab, 0x7, 0x4a, 0x2, 0x2, 0x2ab, 0x2b3, 0x8, 0xe, - 0x1, 0x2, 0x2ac, 0x2ad, 0x6, 0xe, 0x5, 0x3, 0x2ad, 0x2ae, 0x7, 0x52, - 0x2, 0x2, 0x2ae, 0x2b3, 0x8, 0xe, 0x1, 0x2, 0x2af, 0x2b0, 0x6, 0xe, - 0x6, 0x3, 0x2b0, 0x2b1, 0x7, 0x58, 0x2, 0x2, 0x2b1, 0x2b3, 0x8, 0xe, - 0x1, 0x2, 0x2b2, 0x29f, 0x3, 0x2, 0x2, 0x2, 0x2b2, 0x2a4, 0x3, 0x2, - 0x2, 0x2, 0x2b2, 0x2a9, 0x3, 0x2, 0x2, 0x2, 0x2b2, 0x2ac, 0x3, 0x2, - 0x2, 0x2, 0x2b2, 0x2af, 0x3, 0x2, 0x2, 0x2, 0x2b3, 0x2b6, 0x3, 0x2, - 0x2, 0x2, 0x2b4, 0x2b2, 0x3, 0x2, 0x2, 0x2, 0x2b4, 0x2b5, 0x3, 0x2, - 0x2, 0x2, 0x2b5, 0x1b, 0x3, 0x2, 0x2, 0x2, 0x2b6, 0x2b4, 0x3, 0x2, 0x2, - 0x2, 0x2b7, 0x2b9, 0x5, 0x1e, 0x10, 0x2, 0x2b8, 0x2b7, 0x3, 0x2, 0x2, - 0x2, 0x2b8, 0x2b9, 0x3, 0x2, 0x2, 0x2, 0x2b9, 0x2d0, 0x3, 0x2, 0x2, - 0x2, 0x2ba, 0x2bb, 0x6, 0xf, 0x7, 0x3, 0x2bb, 0x2bc, 0x5, 0x22, 0x12, - 0x2, 0x2bc, 0x2bd, 0x8, 0xf, 0x1, 0x2, 0x2bd, 0x2cf, 0x3, 0x2, 0x2, - 0x2, 0x2be, 0x2bf, 0x6, 0xf, 0x8, 0x3, 0x2bf, 0x2c0, 0x5, 0x24, 0x13, - 0x2, 0x2c0, 0x2c1, 0x8, 0xf, 0x1, 0x2, 0x2c1, 0x2cf, 0x3, 0x2, 0x2, - 0x2, 0x2c2, 0x2c3, 0x6, 0xf, 0x9, 0x3, 0x2c3, 0x2c4, 0x5, 0x26, 0x14, - 0x2, 0x2c4, 0x2c5, 0x8, 0xf, 0x1, 0x2, 0x2c5, 0x2cf, 0x3, 0x2, 0x2, - 0x2, 0x2c6, 0x2c7, 0x6, 0xf, 0xa, 0x3, 0x2c7, 0x2c8, 0x5, 0x28, 0x15, - 0x2, 0x2c8, 0x2c9, 0x8, 0xf, 0x1, 0x2, 0x2c9, 0x2cf, 0x3, 0x2, 0x2, - 0x2, 0x2ca, 0x2cb, 0x6, 0xf, 0xb, 0x3, 0x2cb, 0x2cc, 0x5, 0x2a, 0x16, - 0x2, 0x2cc, 0x2cd, 0x8, 0xf, 0x1, 0x2, 0x2cd, 0x2cf, 0x3, 0x2, 0x2, - 0x2, 0x2ce, 0x2ba, 0x3, 0x2, 0x2, 0x2, 0x2ce, 0x2be, 0x3, 0x2, 0x2, - 0x2, 0x2ce, 0x2c2, 0x3, 0x2, 0x2, 0x2, 0x2ce, 0x2c6, 0x3, 0x2, 0x2, - 0x2, 0x2ce, 0x2ca, 0x3, 0x2, 0x2, 0x2, 0x2cf, 0x2d2, 0x3, 0x2, 0x2, - 0x2, 0x2d0, 0x2ce, 0x3, 0x2, 0x2, 0x2, 0x2d0, 0x2d1, 0x3, 0x2, 0x2, - 0x2, 0x2d1, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x2d2, 0x2d0, 0x3, 0x2, 0x2, 0x2, - 0x2d3, 0x2d4, 0x7, 0x7f, 0x2, 0x2, 0x2d4, 0x2d5, 0x7, 0x5a, 0x2, 0x2, - 0x2d5, 0x2d6, 0x5, 0xac, 0x57, 0x2, 0x2d6, 0x1f, 0x3, 0x2, 0x2, 0x2, - 0x2d7, 0x2de, 0x5, 0xd6, 0x6c, 0x2, 0x2d8, 0x2db, 0x5, 0xd6, 0x6c, 0x2, - 0x2d9, 0x2da, 0x7, 0xd0, 0x2, 0x2, 0x2da, 0x2dc, 0x7, 0xda, 0x2, 0x2, - 0x2db, 0x2d9, 0x3, 0x2, 0x2, 0x2, 0x2db, 0x2dc, 0x3, 0x2, 0x2, 0x2, - 0x2dc, 0x2df, 0x3, 0x2, 0x2, 0x2, 0x2dd, 0x2df, 0x5, 0xcc, 0x67, 0x2, - 0x2de, 0x2d8, 0x3, 0x2, 0x2, 0x2, 0x2de, 0x2dd, 0x3, 0x2, 0x2, 0x2, - 0x2df, 0x21, 0x3, 0x2, 0x2, 0x2, 0x2e0, 0x2e1, 0x7, 0x93, 0x2, 0x2, - 0x2e1, 0x2e2, 0x7, 0xd0, 0x2, 0x2, 0x2e2, 0x2e3, 0x5, 0xd6, 0x6c, 0x2, - 0x2e3, 0x2e7, 0x7, 0xd0, 0x2, 0x2, 0x2e4, 0x2e6, 0x5, 0x20, 0x11, 0x2, - 0x2e5, 0x2e4, 0x3, 0x2, 0x2, 0x2, 0x2e6, 0x2e9, 0x3, 0x2, 0x2, 0x2, - 0x2e7, 0x2e5, 0x3, 0x2, 0x2, 0x2, 0x2e7, 0x2e8, 0x3, 0x2, 0x2, 0x2, - 0x2e8, 0x2ea, 0x3, 0x2, 0x2, 0x2, 0x2e9, 0x2e7, 0x3, 0x2, 0x2, 0x2, - 0x2ea, 0x2eb, 0x7, 0xda, 0x2, 0x2, 0x2eb, 0x2ec, 0x7, 0xda, 0x2, 0x2, - 0x2ec, 0x23, 0x3, 0x2, 0x2, 0x2, 0x2ed, 0x2ee, 0x7, 0x60, 0x2, 0x2, - 0x2ee, 0x2f8, 0x7, 0xd0, 0x2, 0x2, 0x2ef, 0x2f9, 0x7, 0xbd, 0x2, 0x2, - 0x2f0, 0x2f1, 0x7, 0x6a, 0x2, 0x2, 0x2f1, 0x2f2, 0x7, 0xbd, 0x2, 0x2, - 0x2f2, 0x2f3, 0x7, 0x68, 0x2, 0x2, 0x2f3, 0x2f9, 0x7, 0xbd, 0x2, 0x2, - 0x2f4, 0x2f5, 0x7, 0x68, 0x2, 0x2, 0x2f5, 0x2f6, 0x7, 0xbd, 0x2, 0x2, - 0x2f6, 0x2f7, 0x7, 0x6a, 0x2, 0x2, 0x2f7, 0x2f9, 0x7, 0xbd, 0x2, 0x2, - 0x2f8, 0x2ef, 0x3, 0x2, 0x2, 0x2, 0x2f8, 0x2f0, 0x3, 0x2, 0x2, 0x2, - 0x2f8, 0x2f4, 0x3, 0x2, 0x2, 0x2, 0x2f9, 0x2fa, 0x3, 0x2, 0x2, 0x2, - 0x2fa, 0x2fb, 0x7, 0xda, 0x2, 0x2, 0x2fb, 0x25, 0x3, 0x2, 0x2, 0x2, - 0x2fc, 0x2fd, 0x7, 0x5d, 0x2, 0x2, 0x2fd, 0x2fe, 0x7, 0xd0, 0x2, 0x2, - 0x2fe, 0x2ff, 0x5, 0xd6, 0x6c, 0x2, 0x2ff, 0x303, 0x7, 0xd0, 0x2, 0x2, - 0x300, 0x302, 0x5, 0x20, 0x11, 0x2, 0x301, 0x300, 0x3, 0x2, 0x2, 0x2, - 0x302, 0x305, 0x3, 0x2, 0x2, 0x2, 0x303, 0x301, 0x3, 0x2, 0x2, 0x2, - 0x303, 0x304, 0x3, 0x2, 0x2, 0x2, 0x304, 0x306, 0x3, 0x2, 0x2, 0x2, - 0x305, 0x303, 0x3, 0x2, 0x2, 0x2, 0x306, 0x307, 0x7, 0xda, 0x2, 0x2, - 0x307, 0x308, 0x7, 0xda, 0x2, 0x2, 0x308, 0x27, 0x3, 0x2, 0x2, 0x2, - 0x309, 0x30a, 0x7, 0x82, 0x2, 0x2, 0x30a, 0x315, 0x7, 0xd0, 0x2, 0x2, - 0x30b, 0x30c, 0x7, 0x6a, 0x2, 0x2, 0x30c, 0x30d, 0x5, 0xd6, 0x6c, 0x2, - 0x30d, 0x30e, 0x7, 0x68, 0x2, 0x2, 0x30e, 0x30f, 0x5, 0xd6, 0x6c, 0x2, - 0x30f, 0x316, 0x3, 0x2, 0x2, 0x2, 0x310, 0x311, 0x7, 0x68, 0x2, 0x2, - 0x311, 0x312, 0x5, 0xd6, 0x6c, 0x2, 0x312, 0x313, 0x7, 0x6a, 0x2, 0x2, - 0x313, 0x314, 0x5, 0xd6, 0x6c, 0x2, 0x314, 0x316, 0x3, 0x2, 0x2, 0x2, - 0x315, 0x30b, 0x3, 0x2, 0x2, 0x2, 0x315, 0x310, 0x3, 0x2, 0x2, 0x2, - 0x316, 0x317, 0x3, 0x2, 0x2, 0x2, 0x317, 0x318, 0x7, 0xda, 0x2, 0x2, - 0x318, 0x29, 0x3, 0x2, 0x2, 0x2, 0x319, 0x31a, 0x7, 0x91, 0x2, 0x2, - 0x31a, 0x31b, 0x7, 0xd0, 0x2, 0x2, 0x31b, 0x31c, 0x5, 0x9a, 0x4e, 0x2, - 0x31c, 0x31d, 0x7, 0xda, 0x2, 0x2, 0x31d, 0x2b, 0x3, 0x2, 0x2, 0x2, - 0x31e, 0x31f, 0x7, 0x76, 0x2, 0x2, 0x31f, 0x322, 0x7, 0x19, 0x2, 0x2, - 0x320, 0x323, 0x5, 0xd6, 0x6c, 0x2, 0x321, 0x323, 0x7, 0xbf, 0x2, 0x2, - 0x322, 0x320, 0x3, 0x2, 0x2, 0x2, 0x322, 0x321, 0x3, 0x2, 0x2, 0x2, - 0x323, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x324, 0x325, 0x7, 0xae, 0x2, 0x2, - 0x325, 0x326, 0x7, 0xbf, 0x2, 0x2, 0x326, 0x2f, 0x3, 0x2, 0x2, 0x2, - 0x327, 0x328, 0x7, 0xa2, 0x2, 0x2, 0x328, 0x329, 0x5, 0xc0, 0x61, 0x2, - 0x329, 0x31, 0x3, 0x2, 0x2, 0x2, 0x32a, 0x32b, 0x7, 0xc, 0x2, 0x2, 0x32b, - 0x32c, 0x5, 0x68, 0x35, 0x2, 0x32c, 0x33, 0x3, 0x2, 0x2, 0x2, 0x32d, - 0x32e, 0x7, 0xd0, 0x2, 0x2, 0x32e, 0x333, 0x5, 0x42, 0x22, 0x2, 0x32f, - 0x330, 0x7, 0xc5, 0x2, 0x2, 0x330, 0x332, 0x5, 0x42, 0x22, 0x2, 0x331, - 0x32f, 0x3, 0x2, 0x2, 0x2, 0x332, 0x335, 0x3, 0x2, 0x2, 0x2, 0x333, - 0x331, 0x3, 0x2, 0x2, 0x2, 0x333, 0x334, 0x3, 0x2, 0x2, 0x2, 0x334, - 0x336, 0x3, 0x2, 0x2, 0x2, 0x335, 0x333, 0x3, 0x2, 0x2, 0x2, 0x336, - 0x337, 0x7, 0xda, 0x2, 0x2, 0x337, 0x33d, 0x3, 0x2, 0x2, 0x2, 0x338, - 0x339, 0x7, 0xc, 0x2, 0x2, 0x339, 0x33d, 0x5, 0xc0, 0x61, 0x2, 0x33a, - 0x33b, 0x7, 0xc, 0x2, 0x2, 0x33b, 0x33d, 0x5, 0xbe, 0x60, 0x2, 0x33c, - 0x32d, 0x3, 0x2, 0x2, 0x2, 0x33c, 0x338, 0x3, 0x2, 0x2, 0x2, 0x33c, - 0x33a, 0x3, 0x2, 0x2, 0x2, 0x33d, 0x35, 0x3, 0x2, 0x2, 0x2, 0x33e, 0x359, - 0x5, 0x40, 0x21, 0x2, 0x33f, 0x340, 0x6, 0x1c, 0xc, 0x3, 0x340, 0x341, - 0x5, 0x7e, 0x40, 0x2, 0x341, 0x342, 0x8, 0x1c, 0x1, 0x2, 0x342, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x343, 0x344, 0x6, 0x1c, 0xd, 0x3, 0x344, 0x345, - 0x5, 0x38, 0x1d, 0x2, 0x345, 0x346, 0x8, 0x1c, 0x1, 0x2, 0x346, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x347, 0x348, 0x6, 0x1c, 0xe, 0x3, 0x348, 0x349, - 0x5, 0x3a, 0x1e, 0x2, 0x349, 0x34a, 0x8, 0x1c, 0x1, 0x2, 0x34a, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x34b, 0x34c, 0x6, 0x1c, 0xf, 0x3, 0x34c, 0x34d, - 0x5, 0x3c, 0x1f, 0x2, 0x34d, 0x34e, 0x8, 0x1c, 0x1, 0x2, 0x34e, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x34f, 0x350, 0x6, 0x1c, 0x10, 0x3, 0x350, 0x351, - 0x5, 0x3e, 0x20, 0x2, 0x351, 0x352, 0x8, 0x1c, 0x1, 0x2, 0x352, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x353, 0x354, 0x6, 0x1c, 0x11, 0x3, 0x354, 0x355, - 0x5, 0x86, 0x44, 0x2, 0x355, 0x356, 0x8, 0x1c, 0x1, 0x2, 0x356, 0x358, - 0x3, 0x2, 0x2, 0x2, 0x357, 0x33f, 0x3, 0x2, 0x2, 0x2, 0x357, 0x343, - 0x3, 0x2, 0x2, 0x2, 0x357, 0x347, 0x3, 0x2, 0x2, 0x2, 0x357, 0x34b, - 0x3, 0x2, 0x2, 0x2, 0x357, 0x34f, 0x3, 0x2, 0x2, 0x2, 0x357, 0x353, - 0x3, 0x2, 0x2, 0x2, 0x358, 0x35b, 0x3, 0x2, 0x2, 0x2, 0x359, 0x357, - 0x3, 0x2, 0x2, 0x2, 0x359, 0x35a, 0x3, 0x2, 0x2, 0x2, 0x35a, 0x37, 0x3, - 0x2, 0x2, 0x2, 0x35b, 0x359, 0x3, 0x2, 0x2, 0x2, 0x35c, 0x35d, 0x7, - 0x7c, 0x2, 0x2, 0x35d, 0x35e, 0x7, 0x14, 0x2, 0x2, 0x35e, 0x35f, 0x5, - 0xb0, 0x59, 0x2, 0x35f, 0x39, 0x3, 0x2, 0x2, 0x2, 0x360, 0x361, 0x7, - 0x7f, 0x2, 0x2, 0x361, 0x362, 0x7, 0x5a, 0x2, 0x2, 0x362, 0x363, 0x5, - 0xb0, 0x59, 0x2, 0x363, 0x3b, 0x3, 0x2, 0x2, 0x2, 0x364, 0x365, 0x7, - 0x8b, 0x2, 0x2, 0x365, 0x366, 0x7, 0x14, 0x2, 0x2, 0x366, 0x367, 0x5, - 0xb0, 0x59, 0x2, 0x367, 0x3d, 0x3, 0x2, 0x2, 0x2, 0x368, 0x369, 0x7, - 0xa8, 0x2, 0x2, 0x369, 0x36e, 0x5, 0x50, 0x29, 0x2, 0x36a, 0x36b, 0x7, - 0xc5, 0x2, 0x2, 0x36b, 0x36d, 0x5, 0x50, 0x29, 0x2, 0x36c, 0x36a, 0x3, - 0x2, 0x2, 0x2, 0x36d, 0x370, 0x3, 0x2, 0x2, 0x2, 0x36e, 0x36c, 0x3, - 0x2, 0x2, 0x2, 0x36e, 0x36f, 0x3, 0x2, 0x2, 0x2, 0x36f, 0x3f, 0x3, 0x2, - 0x2, 0x2, 0x370, 0x36e, 0x3, 0x2, 0x2, 0x2, 0x371, 0x373, 0x7, 0x36, - 0x2, 0x2, 0x372, 0x374, 0x7, 0xca, 0x2, 0x2, 0x373, 0x372, 0x3, 0x2, - 0x2, 0x2, 0x373, 0x374, 0x3, 0x2, 0x2, 0x2, 0x374, 0x375, 0x3, 0x2, - 0x2, 0x2, 0x375, 0x37b, 0x5, 0xd8, 0x6d, 0x2, 0x376, 0x378, 0x7, 0xd0, - 0x2, 0x2, 0x377, 0x379, 0x5, 0xac, 0x57, 0x2, 0x378, 0x377, 0x3, 0x2, - 0x2, 0x2, 0x378, 0x379, 0x3, 0x2, 0x2, 0x2, 0x379, 0x37a, 0x3, 0x2, - 0x2, 0x2, 0x37a, 0x37c, 0x7, 0xda, 0x2, 0x2, 0x37b, 0x376, 0x3, 0x2, - 0x2, 0x2, 0x37b, 0x37c, 0x3, 0x2, 0x2, 0x2, 0x37c, 0x41, 0x3, 0x2, 0x2, - 0x2, 0x37d, 0x388, 0x5, 0x44, 0x23, 0x2, 0x37e, 0x37f, 0x7, 0x1e, 0x2, - 0x2, 0x37f, 0x380, 0x5, 0xd6, 0x6c, 0x2, 0x380, 0x381, 0x7, 0x17, 0x2, - 0x2, 0x381, 0x382, 0x5, 0xb0, 0x59, 0x2, 0x382, 0x388, 0x3, 0x2, 0x2, - 0x2, 0x383, 0x384, 0x7, 0x50, 0x2, 0x2, 0x384, 0x388, 0x5, 0x48, 0x25, - 0x2, 0x385, 0x386, 0x7, 0x80, 0x2, 0x2, 0x386, 0x388, 0x5, 0x4a, 0x26, - 0x2, 0x387, 0x37d, 0x3, 0x2, 0x2, 0x2, 0x387, 0x37e, 0x3, 0x2, 0x2, - 0x2, 0x387, 0x383, 0x3, 0x2, 0x2, 0x2, 0x387, 0x385, 0x3, 0x2, 0x2, - 0x2, 0x388, 0x43, 0x3, 0x2, 0x2, 0x2, 0x389, 0x38a, 0x5, 0xba, 0x5e, - 0x2, 0x38a, 0x38c, 0x5, 0xaa, 0x56, 0x2, 0x38b, 0x38d, 0x5, 0x46, 0x24, - 0x2, 0x38c, 0x38b, 0x3, 0x2, 0x2, 0x2, 0x38c, 0x38d, 0x3, 0x2, 0x2, - 0x2, 0x38d, 0x390, 0x3, 0x2, 0x2, 0x2, 0x38e, 0x38f, 0x7, 0x1d, 0x2, - 0x2, 0x38f, 0x391, 0x7, 0xbf, 0x2, 0x2, 0x390, 0x38e, 0x3, 0x2, 0x2, - 0x2, 0x390, 0x391, 0x3, 0x2, 0x2, 0x2, 0x391, 0x393, 0x3, 0x2, 0x2, - 0x2, 0x392, 0x394, 0x5, 0x4c, 0x27, 0x2, 0x393, 0x392, 0x3, 0x2, 0x2, - 0x2, 0x393, 0x394, 0x3, 0x2, 0x2, 0x2, 0x394, 0x397, 0x3, 0x2, 0x2, - 0x2, 0x395, 0x396, 0x7, 0xa8, 0x2, 0x2, 0x396, 0x398, 0x5, 0xb0, 0x59, - 0x2, 0x397, 0x395, 0x3, 0x2, 0x2, 0x2, 0x397, 0x398, 0x3, 0x2, 0x2, - 0x2, 0x398, 0x3aa, 0x3, 0x2, 0x2, 0x2, 0x399, 0x39b, 0x5, 0xba, 0x5e, - 0x2, 0x39a, 0x39c, 0x5, 0xaa, 0x56, 0x2, 0x39b, 0x39a, 0x3, 0x2, 0x2, - 0x2, 0x39b, 0x39c, 0x3, 0x2, 0x2, 0x2, 0x39c, 0x39d, 0x3, 0x2, 0x2, - 0x2, 0x39d, 0x3a0, 0x5, 0x46, 0x24, 0x2, 0x39e, 0x39f, 0x7, 0x1d, 0x2, - 0x2, 0x39f, 0x3a1, 0x7, 0xbf, 0x2, 0x2, 0x3a0, 0x39e, 0x3, 0x2, 0x2, - 0x2, 0x3a0, 0x3a1, 0x3, 0x2, 0x2, 0x2, 0x3a1, 0x3a3, 0x3, 0x2, 0x2, - 0x2, 0x3a2, 0x3a4, 0x5, 0x4c, 0x27, 0x2, 0x3a3, 0x3a2, 0x3, 0x2, 0x2, - 0x2, 0x3a3, 0x3a4, 0x3, 0x2, 0x2, 0x2, 0x3a4, 0x3a7, 0x3, 0x2, 0x2, - 0x2, 0x3a5, 0x3a6, 0x7, 0xa8, 0x2, 0x2, 0x3a6, 0x3a8, 0x5, 0xb0, 0x59, - 0x2, 0x3a7, 0x3a5, 0x3, 0x2, 0x2, 0x2, 0x3a7, 0x3a8, 0x3, 0x2, 0x2, - 0x2, 0x3a8, 0x3aa, 0x3, 0x2, 0x2, 0x2, 0x3a9, 0x389, 0x3, 0x2, 0x2, - 0x2, 0x3a9, 0x399, 0x3, 0x2, 0x2, 0x2, 0x3aa, 0x45, 0x3, 0x2, 0x2, 0x2, - 0x3ab, 0x3ac, 0x9, 0x4, 0x2, 0x2, 0x3ac, 0x3ad, 0x5, 0xb0, 0x59, 0x2, - 0x3ad, 0x47, 0x3, 0x2, 0x2, 0x2, 0x3ae, 0x3af, 0x5, 0xba, 0x5e, 0x2, - 0x3af, 0x3b0, 0x5, 0xb0, 0x59, 0x2, 0x3b0, 0x3b1, 0x7, 0xa9, 0x2, 0x2, - 0x3b1, 0x3b2, 0x5, 0xaa, 0x56, 0x2, 0x3b2, 0x3b3, 0x7, 0x47, 0x2, 0x2, - 0x3b3, 0x3b4, 0x7, 0xbd, 0x2, 0x2, 0x3b4, 0x49, 0x3, 0x2, 0x2, 0x2, - 0x3b5, 0x3b6, 0x5, 0xba, 0x5e, 0x2, 0x3b6, 0x3b7, 0x5, 0x66, 0x34, 0x2, - 0x3b7, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x3b8, 0x3b9, 0x7, 0x1a, 0x2, 0x2, - 0x3b9, 0x3ba, 0x7, 0xd0, 0x2, 0x2, 0x3ba, 0x3bf, 0x5, 0x4e, 0x28, 0x2, - 0x3bb, 0x3bc, 0x7, 0xc5, 0x2, 0x2, 0x3bc, 0x3be, 0x5, 0x4e, 0x28, 0x2, - 0x3bd, 0x3bb, 0x3, 0x2, 0x2, 0x2, 0x3be, 0x3c1, 0x3, 0x2, 0x2, 0x2, - 0x3bf, 0x3bd, 0x3, 0x2, 0x2, 0x2, 0x3bf, 0x3c0, 0x3, 0x2, 0x2, 0x2, - 0x3c0, 0x3c2, 0x3, 0x2, 0x2, 0x2, 0x3c1, 0x3bf, 0x3, 0x2, 0x2, 0x2, - 0x3c2, 0x3c3, 0x7, 0xda, 0x2, 0x2, 0x3c3, 0x4d, 0x3, 0x2, 0x2, 0x2, - 0x3c4, 0x3ca, 0x5, 0xd6, 0x6c, 0x2, 0x3c5, 0x3c7, 0x7, 0xd0, 0x2, 0x2, - 0x3c6, 0x3c8, 0x5, 0xac, 0x57, 0x2, 0x3c7, 0x3c6, 0x3, 0x2, 0x2, 0x2, - 0x3c7, 0x3c8, 0x3, 0x2, 0x2, 0x2, 0x3c8, 0x3c9, 0x3, 0x2, 0x2, 0x2, - 0x3c9, 0x3cb, 0x7, 0xda, 0x2, 0x2, 0x3ca, 0x3c5, 0x3, 0x2, 0x2, 0x2, - 0x3ca, 0x3cb, 0x3, 0x2, 0x2, 0x2, 0x3cb, 0x4f, 0x3, 0x2, 0x2, 0x2, 0x3cc, - 0x3d4, 0x5, 0xb0, 0x59, 0x2, 0x3cd, 0x3d5, 0x7, 0x29, 0x2, 0x2, 0x3ce, - 0x3cf, 0x7, 0xa2, 0x2, 0x2, 0x3cf, 0x3d0, 0x7, 0x30, 0x2, 0x2, 0x3d0, - 0x3d5, 0x7, 0xbf, 0x2, 0x2, 0x3d1, 0x3d2, 0x7, 0xa2, 0x2, 0x2, 0x3d2, - 0x3d3, 0x7, 0xb1, 0x2, 0x2, 0x3d3, 0x3d5, 0x7, 0xbf, 0x2, 0x2, 0x3d4, - 0x3cd, 0x3, 0x2, 0x2, 0x2, 0x3d4, 0x3ce, 0x3, 0x2, 0x2, 0x2, 0x3d4, - 0x3d1, 0x3, 0x2, 0x2, 0x2, 0x3d4, 0x3d5, 0x3, 0x2, 0x2, 0x2, 0x3d5, - 0x51, 0x3, 0x2, 0x2, 0x2, 0x3d6, 0x3d8, 0x9, 0x5, 0x2, 0x2, 0x3d7, 0x3d9, - 0x7, 0x9a, 0x2, 0x2, 0x3d8, 0x3d7, 0x3, 0x2, 0x2, 0x2, 0x3d8, 0x3d9, - 0x3, 0x2, 0x2, 0x2, 0x3d9, 0x3da, 0x3, 0x2, 0x2, 0x2, 0x3da, 0x3db, - 0x5, 0xbc, 0x5f, 0x2, 0x3db, 0x53, 0x3, 0x2, 0x2, 0x2, 0x3dc, 0x3dd, - 0x9, 0x6, 0x2, 0x2, 0x3dd, 0x3e0, 0x7, 0x22, 0x2, 0x2, 0x3de, 0x3df, - 0x7, 0x4d, 0x2, 0x2, 0x3df, 0x3e1, 0x7, 0x38, 0x2, 0x2, 0x3e0, 0x3de, - 0x3, 0x2, 0x2, 0x2, 0x3e0, 0x3e1, 0x3, 0x2, 0x2, 0x2, 0x3e1, 0x3e2, - 0x3, 0x2, 0x2, 0x2, 0x3e2, 0x3e4, 0x5, 0xc6, 0x64, 0x2, 0x3e3, 0x3e5, - 0x5, 0x2c, 0x17, 0x2, 0x3e4, 0x3e3, 0x3, 0x2, 0x2, 0x2, 0x3e4, 0x3e5, - 0x3, 0x2, 0x2, 0x2, 0x3e5, 0x3fc, 0x3, 0x2, 0x2, 0x2, 0x3e6, 0x3ed, - 0x9, 0x6, 0x2, 0x2, 0x3e7, 0x3ee, 0x7, 0x2f, 0x2, 0x2, 0x3e8, 0x3ea, - 0x7, 0x9c, 0x2, 0x2, 0x3e9, 0x3e8, 0x3, 0x2, 0x2, 0x2, 0x3e9, 0x3ea, - 0x3, 0x2, 0x2, 0x2, 0x3ea, 0x3eb, 0x3, 0x2, 0x2, 0x2, 0x3eb, 0x3ee, - 0x7, 0x9a, 0x2, 0x2, 0x3ec, 0x3ee, 0x7, 0xb0, 0x2, 0x2, 0x3ed, 0x3e7, - 0x3, 0x2, 0x2, 0x2, 0x3ed, 0x3e9, 0x3, 0x2, 0x2, 0x2, 0x3ed, 0x3ec, - 0x3, 0x2, 0x2, 0x2, 0x3ee, 0x3f1, 0x3, 0x2, 0x2, 0x2, 0x3ef, 0x3f0, - 0x7, 0x4d, 0x2, 0x2, 0x3f0, 0x3f2, 0x7, 0x38, 0x2, 0x2, 0x3f1, 0x3ef, - 0x3, 0x2, 0x2, 0x2, 0x3f1, 0x3f2, 0x3, 0x2, 0x2, 0x2, 0x3f2, 0x3f3, - 0x3, 0x2, 0x2, 0x2, 0x3f3, 0x3f5, 0x5, 0xc0, 0x61, 0x2, 0x3f4, 0x3f6, - 0x5, 0x2c, 0x17, 0x2, 0x3f5, 0x3f4, 0x3, 0x2, 0x2, 0x2, 0x3f5, 0x3f6, - 0x3, 0x2, 0x2, 0x2, 0x3f6, 0x3f9, 0x3, 0x2, 0x2, 0x2, 0x3f7, 0x3f8, - 0x7, 0x71, 0x2, 0x2, 0x3f8, 0x3fa, 0x7, 0x28, 0x2, 0x2, 0x3f9, 0x3f7, - 0x3, 0x2, 0x2, 0x2, 0x3f9, 0x3fa, 0x3, 0x2, 0x2, 0x2, 0x3fa, 0x3fc, - 0x3, 0x2, 0x2, 0x2, 0x3fb, 0x3dc, 0x3, 0x2, 0x2, 0x2, 0x3fb, 0x3e6, - 0x3, 0x2, 0x2, 0x2, 0x3fc, 0x55, 0x3, 0x2, 0x2, 0x2, 0x3fd, 0x3fe, 0x7, - 0x38, 0x2, 0x2, 0x3fe, 0x3ff, 0x7, 0x22, 0x2, 0x2, 0x3ff, 0x40b, 0x5, - 0xc6, 0x64, 0x2, 0x400, 0x407, 0x7, 0x38, 0x2, 0x2, 0x401, 0x408, 0x7, - 0x2f, 0x2, 0x2, 0x402, 0x404, 0x7, 0x9c, 0x2, 0x2, 0x403, 0x402, 0x3, - 0x2, 0x2, 0x2, 0x403, 0x404, 0x3, 0x2, 0x2, 0x2, 0x404, 0x405, 0x3, - 0x2, 0x2, 0x2, 0x405, 0x408, 0x7, 0x9a, 0x2, 0x2, 0x406, 0x408, 0x7, - 0xb0, 0x2, 0x2, 0x407, 0x401, 0x3, 0x2, 0x2, 0x2, 0x407, 0x403, 0x3, - 0x2, 0x2, 0x2, 0x407, 0x406, 0x3, 0x2, 0x2, 0x2, 0x407, 0x408, 0x3, - 0x2, 0x2, 0x2, 0x408, 0x409, 0x3, 0x2, 0x2, 0x2, 0x409, 0x40b, 0x5, - 0xc0, 0x61, 0x2, 0x40a, 0x3fd, 0x3, 0x2, 0x2, 0x2, 0x40a, 0x400, 0x3, - 0x2, 0x2, 0x2, 0x40b, 0x57, 0x3, 0x2, 0x2, 0x2, 0x40c, 0x40d, 0x7, 0x39, - 0x2, 0x2, 0x40d, 0x40e, 0x7, 0xf, 0x2, 0x2, 0x40e, 0x413, 0x5, 0x4, - 0x3, 0x2, 0x40f, 0x410, 0x7, 0x39, 0x2, 0x2, 0x410, 0x411, 0x7, 0x98, - 0x2, 0x2, 0x411, 0x413, 0x5, 0x4, 0x3, 0x2, 0x412, 0x40c, 0x3, 0x2, - 0x2, 0x2, 0x412, 0x40f, 0x3, 0x2, 0x2, 0x2, 0x413, 0x59, 0x3, 0x2, 0x2, - 0x2, 0x414, 0x415, 0x7, 0x54, 0x2, 0x2, 0x415, 0x417, 0x7, 0x56, 0x2, - 0x2, 0x416, 0x418, 0x7, 0x9a, 0x2, 0x2, 0x417, 0x416, 0x3, 0x2, 0x2, - 0x2, 0x417, 0x418, 0x3, 0x2, 0x2, 0x2, 0x418, 0x41c, 0x3, 0x2, 0x2, - 0x2, 0x419, 0x41d, 0x5, 0xc0, 0x61, 0x2, 0x41a, 0x41b, 0x7, 0x45, 0x2, - 0x2, 0x41b, 0x41d, 0x5, 0xbe, 0x60, 0x2, 0x41c, 0x419, 0x3, 0x2, 0x2, - 0x2, 0x41c, 0x41a, 0x3, 0x2, 0x2, 0x2, 0x41d, 0x41f, 0x3, 0x2, 0x2, - 0x2, 0x41e, 0x420, 0x5, 0x5c, 0x2f, 0x2, 0x41f, 0x41e, 0x3, 0x2, 0x2, - 0x2, 0x41f, 0x420, 0x3, 0x2, 0x2, 0x2, 0x420, 0x421, 0x3, 0x2, 0x2, - 0x2, 0x421, 0x422, 0x5, 0x5e, 0x30, 0x2, 0x422, 0x5b, 0x3, 0x2, 0x2, - 0x2, 0x423, 0x424, 0x7, 0xd0, 0x2, 0x2, 0x424, 0x429, 0x5, 0xba, 0x5e, - 0x2, 0x425, 0x426, 0x7, 0xc5, 0x2, 0x2, 0x426, 0x428, 0x5, 0xba, 0x5e, - 0x2, 0x427, 0x425, 0x3, 0x2, 0x2, 0x2, 0x428, 0x42b, 0x3, 0x2, 0x2, - 0x2, 0x429, 0x427, 0x3, 0x2, 0x2, 0x2, 0x429, 0x42a, 0x3, 0x2, 0x2, - 0x2, 0x42a, 0x42c, 0x3, 0x2, 0x2, 0x2, 0x42b, 0x429, 0x3, 0x2, 0x2, - 0x2, 0x42c, 0x42d, 0x7, 0xda, 0x2, 0x2, 0x42d, 0x5d, 0x3, 0x2, 0x2, - 0x2, 0x42e, 0x42f, 0x7, 0x41, 0x2, 0x2, 0x42f, 0x438, 0x5, 0xd6, 0x6c, - 0x2, 0x430, 0x438, 0x7, 0xaf, 0x2, 0x2, 0x431, 0x433, 0x5, 0x68, 0x35, - 0x2, 0x432, 0x434, 0x7, 0xdb, 0x2, 0x2, 0x433, 0x432, 0x3, 0x2, 0x2, - 0x2, 0x433, 0x434, 0x3, 0x2, 0x2, 0x2, 0x434, 0x435, 0x3, 0x2, 0x2, - 0x2, 0x435, 0x436, 0x7, 0x2, 0x2, 0x3, 0x436, 0x438, 0x3, 0x2, 0x2, - 0x2, 0x437, 0x42e, 0x3, 0x2, 0x2, 0x2, 0x437, 0x430, 0x3, 0x2, 0x2, - 0x2, 0x437, 0x431, 0x3, 0x2, 0x2, 0x2, 0x438, 0x5f, 0x3, 0x2, 0x2, 0x2, - 0x439, 0x43a, 0x7, 0x5b, 0x2, 0x2, 0x43a, 0x43c, 0x7, 0x6f, 0x2, 0x2, - 0x43b, 0x43d, 0x5, 0x2c, 0x17, 0x2, 0x43c, 0x43b, 0x3, 0x2, 0x2, 0x2, - 0x43c, 0x43d, 0x3, 0x2, 0x2, 0x2, 0x43d, 0x43e, 0x3, 0x2, 0x2, 0x2, - 0x43e, 0x440, 0x5, 0x78, 0x3d, 0x2, 0x43f, 0x441, 0x9, 0x7, 0x2, 0x2, - 0x440, 0x43f, 0x3, 0x2, 0x2, 0x2, 0x440, 0x441, 0x3, 0x2, 0x2, 0x2, - 0x441, 0x61, 0x3, 0x2, 0x2, 0x2, 0x442, 0x443, 0x7, 0x77, 0x2, 0x2, - 0x443, 0x444, 0x7, 0x9a, 0x2, 0x2, 0x444, 0x446, 0x5, 0xc0, 0x61, 0x2, - 0x445, 0x447, 0x5, 0x2c, 0x17, 0x2, 0x446, 0x445, 0x3, 0x2, 0x2, 0x2, - 0x446, 0x447, 0x3, 0x2, 0x2, 0x2, 0x447, 0x449, 0x3, 0x2, 0x2, 0x2, - 0x448, 0x44a, 0x5, 0x10, 0x9, 0x2, 0x449, 0x448, 0x3, 0x2, 0x2, 0x2, - 0x449, 0x44a, 0x3, 0x2, 0x2, 0x2, 0x44a, 0x44c, 0x3, 0x2, 0x2, 0x2, - 0x44b, 0x44d, 0x7, 0x3d, 0x2, 0x2, 0x44c, 0x44b, 0x3, 0x2, 0x2, 0x2, - 0x44c, 0x44d, 0x3, 0x2, 0x2, 0x2, 0x44d, 0x44f, 0x3, 0x2, 0x2, 0x2, - 0x44e, 0x450, 0x7, 0x26, 0x2, 0x2, 0x44f, 0x44e, 0x3, 0x2, 0x2, 0x2, - 0x44f, 0x450, 0x3, 0x2, 0x2, 0x2, 0x450, 0x63, 0x3, 0x2, 0x2, 0x2, 0x451, - 0x452, 0x7, 0x85, 0x2, 0x2, 0x452, 0x453, 0x7, 0x9a, 0x2, 0x2, 0x453, - 0x454, 0x5, 0xc0, 0x61, 0x2, 0x454, 0x455, 0x7, 0xa2, 0x2, 0x2, 0x455, - 0x45d, 0x5, 0xc0, 0x61, 0x2, 0x456, 0x457, 0x7, 0xc5, 0x2, 0x2, 0x457, - 0x458, 0x5, 0xc0, 0x61, 0x2, 0x458, 0x459, 0x7, 0xa2, 0x2, 0x2, 0x459, - 0x45a, 0x5, 0xc0, 0x61, 0x2, 0x45a, 0x45c, 0x3, 0x2, 0x2, 0x2, 0x45b, - 0x456, 0x3, 0x2, 0x2, 0x2, 0x45c, 0x45f, 0x3, 0x2, 0x2, 0x2, 0x45d, - 0x45b, 0x3, 0x2, 0x2, 0x2, 0x45d, 0x45e, 0x3, 0x2, 0x2, 0x2, 0x45e, - 0x461, 0x3, 0x2, 0x2, 0x2, 0x45f, 0x45d, 0x3, 0x2, 0x2, 0x2, 0x460, - 0x462, 0x5, 0x2c, 0x17, 0x2, 0x461, 0x460, 0x3, 0x2, 0x2, 0x2, 0x461, - 0x462, 0x3, 0x2, 0x2, 0x2, 0x462, 0x65, 0x3, 0x2, 0x2, 0x2, 0x463, 0x465, - 0x7, 0xd0, 0x2, 0x2, 0x464, 0x466, 0x5, 0x6e, 0x38, 0x2, 0x465, 0x464, - 0x3, 0x2, 0x2, 0x2, 0x465, 0x466, 0x3, 0x2, 0x2, 0x2, 0x466, 0x467, - 0x3, 0x2, 0x2, 0x2, 0x467, 0x468, 0x7, 0x8d, 0x2, 0x2, 0x468, 0x46a, - 0x5, 0xac, 0x57, 0x2, 0x469, 0x46b, 0x5, 0x7a, 0x3e, 0x2, 0x46a, 0x469, - 0x3, 0x2, 0x2, 0x2, 0x46a, 0x46b, 0x3, 0x2, 0x2, 0x2, 0x46b, 0x46d, - 0x3, 0x2, 0x2, 0x2, 0x46c, 0x46e, 0x5, 0x80, 0x41, 0x2, 0x46d, 0x46c, - 0x3, 0x2, 0x2, 0x2, 0x46d, 0x46e, 0x3, 0x2, 0x2, 0x2, 0x46e, 0x46f, - 0x3, 0x2, 0x2, 0x2, 0x46f, 0x470, 0x7, 0xda, 0x2, 0x2, 0x470, 0x67, - 0x3, 0x2, 0x2, 0x2, 0x471, 0x477, 0x5, 0x6a, 0x36, 0x2, 0x472, 0x473, - 0x7, 0xaa, 0x2, 0x2, 0x473, 0x474, 0x7, 0x6, 0x2, 0x2, 0x474, 0x476, - 0x5, 0x6a, 0x36, 0x2, 0x475, 0x472, 0x3, 0x2, 0x2, 0x2, 0x476, 0x479, - 0x3, 0x2, 0x2, 0x2, 0x477, 0x475, 0x3, 0x2, 0x2, 0x2, 0x477, 0x478, - 0x3, 0x2, 0x2, 0x2, 0x478, 0x69, 0x3, 0x2, 0x2, 0x2, 0x479, 0x477, 0x3, - 0x2, 0x2, 0x2, 0x47a, 0x480, 0x5, 0x6c, 0x37, 0x2, 0x47b, 0x47c, 0x7, - 0xd0, 0x2, 0x2, 0x47c, 0x47d, 0x5, 0x68, 0x35, 0x2, 0x47d, 0x47e, 0x7, - 0xda, 0x2, 0x2, 0x47e, 0x480, 0x3, 0x2, 0x2, 0x2, 0x47f, 0x47a, 0x3, - 0x2, 0x2, 0x2, 0x47f, 0x47b, 0x3, 0x2, 0x2, 0x2, 0x480, 0x6b, 0x3, 0x2, - 0x2, 0x2, 0x481, 0x483, 0x5, 0x6e, 0x38, 0x2, 0x482, 0x481, 0x3, 0x2, - 0x2, 0x2, 0x482, 0x483, 0x3, 0x2, 0x2, 0x2, 0x483, 0x484, 0x3, 0x2, - 0x2, 0x2, 0x484, 0x486, 0x7, 0x8d, 0x2, 0x2, 0x485, 0x487, 0x7, 0x31, - 0x2, 0x2, 0x486, 0x485, 0x3, 0x2, 0x2, 0x2, 0x486, 0x487, 0x3, 0x2, - 0x2, 0x2, 0x487, 0x489, 0x3, 0x2, 0x2, 0x2, 0x488, 0x48a, 0x5, 0x70, - 0x39, 0x2, 0x489, 0x488, 0x3, 0x2, 0x2, 0x2, 0x489, 0x48a, 0x3, 0x2, - 0x2, 0x2, 0x48a, 0x48b, 0x3, 0x2, 0x2, 0x2, 0x48b, 0x48d, 0x5, 0xac, - 0x57, 0x2, 0x48c, 0x48e, 0x5, 0x72, 0x3a, 0x2, 0x48d, 0x48c, 0x3, 0x2, - 0x2, 0x2, 0x48d, 0x48e, 0x3, 0x2, 0x2, 0x2, 0x48e, 0x490, 0x3, 0x2, - 0x2, 0x2, 0x48f, 0x491, 0x5, 0x74, 0x3b, 0x2, 0x490, 0x48f, 0x3, 0x2, - 0x2, 0x2, 0x490, 0x491, 0x3, 0x2, 0x2, 0x2, 0x491, 0x493, 0x3, 0x2, - 0x2, 0x2, 0x492, 0x494, 0x5, 0x76, 0x3c, 0x2, 0x493, 0x492, 0x3, 0x2, - 0x2, 0x2, 0x493, 0x494, 0x3, 0x2, 0x2, 0x2, 0x494, 0x496, 0x3, 0x2, - 0x2, 0x2, 0x495, 0x497, 0x5, 0x78, 0x3d, 0x2, 0x496, 0x495, 0x3, 0x2, - 0x2, 0x2, 0x496, 0x497, 0x3, 0x2, 0x2, 0x2, 0x497, 0x499, 0x3, 0x2, - 0x2, 0x2, 0x498, 0x49a, 0x5, 0x7a, 0x3e, 0x2, 0x499, 0x498, 0x3, 0x2, - 0x2, 0x2, 0x499, 0x49a, 0x3, 0x2, 0x2, 0x2, 0x49a, 0x49d, 0x3, 0x2, - 0x2, 0x2, 0x49b, 0x49c, 0x7, 0xb6, 0x2, 0x2, 0x49c, 0x49e, 0x9, 0x8, - 0x2, 0x2, 0x49d, 0x49b, 0x3, 0x2, 0x2, 0x2, 0x49d, 0x49e, 0x3, 0x2, - 0x2, 0x2, 0x49e, 0x4a1, 0x3, 0x2, 0x2, 0x2, 0x49f, 0x4a0, 0x7, 0xb6, - 0x2, 0x2, 0x4a0, 0x4a2, 0x7, 0xa4, 0x2, 0x2, 0x4a1, 0x49f, 0x3, 0x2, - 0x2, 0x2, 0x4a1, 0x4a2, 0x3, 0x2, 0x2, 0x2, 0x4a2, 0x4a4, 0x3, 0x2, - 0x2, 0x2, 0x4a3, 0x4a5, 0x5, 0x7c, 0x3f, 0x2, 0x4a4, 0x4a3, 0x3, 0x2, - 0x2, 0x2, 0x4a4, 0x4a5, 0x3, 0x2, 0x2, 0x2, 0x4a5, 0x4a7, 0x3, 0x2, - 0x2, 0x2, 0x4a6, 0x4a8, 0x5, 0x7e, 0x40, 0x2, 0x4a7, 0x4a6, 0x3, 0x2, - 0x2, 0x2, 0x4a7, 0x4a8, 0x3, 0x2, 0x2, 0x2, 0x4a8, 0x4aa, 0x3, 0x2, - 0x2, 0x2, 0x4a9, 0x4ab, 0x5, 0x82, 0x42, 0x2, 0x4aa, 0x4a9, 0x3, 0x2, - 0x2, 0x2, 0x4aa, 0x4ab, 0x3, 0x2, 0x2, 0x2, 0x4ab, 0x4ad, 0x3, 0x2, - 0x2, 0x2, 0x4ac, 0x4ae, 0x5, 0x84, 0x43, 0x2, 0x4ad, 0x4ac, 0x3, 0x2, - 0x2, 0x2, 0x4ad, 0x4ae, 0x3, 0x2, 0x2, 0x2, 0x4ae, 0x4b0, 0x3, 0x2, - 0x2, 0x2, 0x4af, 0x4b1, 0x5, 0x86, 0x44, 0x2, 0x4b0, 0x4af, 0x3, 0x2, - 0x2, 0x2, 0x4b0, 0x4b1, 0x3, 0x2, 0x2, 0x2, 0x4b1, 0x6d, 0x3, 0x2, 0x2, - 0x2, 0x4b2, 0x4b3, 0x7, 0xb6, 0x2, 0x2, 0x4b3, 0x4b4, 0x5, 0xac, 0x57, - 0x2, 0x4b4, 0x6f, 0x3, 0x2, 0x2, 0x2, 0x4b5, 0x4b6, 0x7, 0xa3, 0x2, - 0x2, 0x4b6, 0x4b9, 0x7, 0xbd, 0x2, 0x2, 0x4b7, 0x4b8, 0x7, 0xb6, 0x2, - 0x2, 0x4b8, 0x4ba, 0x7, 0x9f, 0x2, 0x2, 0x4b9, 0x4b7, 0x3, 0x2, 0x2, - 0x2, 0x4b9, 0x4ba, 0x3, 0x2, 0x2, 0x2, 0x4ba, 0x71, 0x3, 0x2, 0x2, 0x2, - 0x4bb, 0x4bc, 0x7, 0x43, 0x2, 0x2, 0x4bc, 0x4bd, 0x5, 0x88, 0x45, 0x2, - 0x4bd, 0x73, 0x3, 0x2, 0x2, 0x2, 0x4be, 0x4c0, 0x9, 0x9, 0x2, 0x2, 0x4bf, - 0x4be, 0x3, 0x2, 0x2, 0x2, 0x4bf, 0x4c0, 0x3, 0x2, 0x2, 0x2, 0x4c0, - 0x4c1, 0x3, 0x2, 0x2, 0x2, 0x4c1, 0x4c2, 0x7, 0xb, 0x2, 0x2, 0x4c2, - 0x4c3, 0x7, 0x59, 0x2, 0x2, 0x4c3, 0x4c4, 0x5, 0xac, 0x57, 0x2, 0x4c4, - 0x75, 0x3, 0x2, 0x2, 0x2, 0x4c5, 0x4c6, 0x7, 0x7e, 0x2, 0x2, 0x4c6, - 0x4c7, 0x5, 0xb0, 0x59, 0x2, 0x4c7, 0x77, 0x3, 0x2, 0x2, 0x2, 0x4c8, - 0x4c9, 0x7, 0xb5, 0x2, 0x2, 0x4c9, 0x4ca, 0x5, 0xb0, 0x59, 0x2, 0x4ca, - 0x79, 0x3, 0x2, 0x2, 0x2, 0x4cb, 0x4cc, 0x7, 0x48, 0x2, 0x2, 0x4cc, - 0x4d3, 0x7, 0x14, 0x2, 0x2, 0x4cd, 0x4ce, 0x9, 0x8, 0x2, 0x2, 0x4ce, - 0x4cf, 0x7, 0xd0, 0x2, 0x2, 0x4cf, 0x4d0, 0x5, 0xac, 0x57, 0x2, 0x4d0, - 0x4d1, 0x7, 0xda, 0x2, 0x2, 0x4d1, 0x4d4, 0x3, 0x2, 0x2, 0x2, 0x4d2, - 0x4d4, 0x5, 0xac, 0x57, 0x2, 0x4d3, 0x4cd, 0x3, 0x2, 0x2, 0x2, 0x4d3, - 0x4d2, 0x3, 0x2, 0x2, 0x2, 0x4d4, 0x7b, 0x3, 0x2, 0x2, 0x2, 0x4d5, 0x4d6, - 0x7, 0x49, 0x2, 0x2, 0x4d6, 0x4d7, 0x5, 0xb0, 0x59, 0x2, 0x4d7, 0x7d, - 0x3, 0x2, 0x2, 0x2, 0x4d8, 0x4d9, 0x7, 0x79, 0x2, 0x2, 0x4d9, 0x4da, - 0x7, 0x14, 0x2, 0x2, 0x4da, 0x4db, 0x5, 0x94, 0x4b, 0x2, 0x4db, 0x7f, - 0x3, 0x2, 0x2, 0x2, 0x4dc, 0x4dd, 0x7, 0x79, 0x2, 0x2, 0x4dd, 0x4de, - 0x7, 0x14, 0x2, 0x2, 0x4de, 0x4df, 0x5, 0xac, 0x57, 0x2, 0x4df, 0x81, - 0x3, 0x2, 0x2, 0x2, 0x4e0, 0x4e1, 0x7, 0x62, 0x2, 0x2, 0x4e1, 0x4e2, - 0x5, 0x92, 0x4a, 0x2, 0x4e2, 0x4e3, 0x7, 0x14, 0x2, 0x2, 0x4e3, 0x4e4, - 0x5, 0xac, 0x57, 0x2, 0x4e4, 0x83, 0x3, 0x2, 0x2, 0x2, 0x4e5, 0x4e6, - 0x7, 0x62, 0x2, 0x2, 0x4e6, 0x4e9, 0x5, 0x92, 0x4a, 0x2, 0x4e7, 0x4e8, - 0x7, 0xb6, 0x2, 0x2, 0x4e8, 0x4ea, 0x7, 0x9f, 0x2, 0x2, 0x4e9, 0x4e7, - 0x3, 0x2, 0x2, 0x2, 0x4e9, 0x4ea, 0x3, 0x2, 0x2, 0x2, 0x4ea, 0x85, 0x3, - 0x2, 0x2, 0x2, 0x4eb, 0x4ec, 0x7, 0x91, 0x2, 0x2, 0x4ec, 0x4ed, 0x5, - 0x9a, 0x4e, 0x2, 0x4ed, 0x87, 0x3, 0x2, 0x2, 0x2, 0x4ee, 0x4ef, 0x8, - 0x45, 0x1, 0x2, 0x4ef, 0x4f1, 0x5, 0xbc, 0x5f, 0x2, 0x4f0, 0x4f2, 0x7, - 0x3d, 0x2, 0x2, 0x4f1, 0x4f0, 0x3, 0x2, 0x2, 0x2, 0x4f1, 0x4f2, 0x3, - 0x2, 0x2, 0x2, 0x4f2, 0x4f4, 0x3, 0x2, 0x2, 0x2, 0x4f3, 0x4f5, 0x5, - 0x90, 0x49, 0x2, 0x4f4, 0x4f3, 0x3, 0x2, 0x2, 0x2, 0x4f4, 0x4f5, 0x3, - 0x2, 0x2, 0x2, 0x4f5, 0x4fb, 0x3, 0x2, 0x2, 0x2, 0x4f6, 0x4f7, 0x7, - 0xd0, 0x2, 0x2, 0x4f7, 0x4f8, 0x5, 0x88, 0x45, 0x2, 0x4f8, 0x4f9, 0x7, - 0xda, 0x2, 0x2, 0x4f9, 0x4fb, 0x3, 0x2, 0x2, 0x2, 0x4fa, 0x4ee, 0x3, - 0x2, 0x2, 0x2, 0x4fa, 0x4f6, 0x3, 0x2, 0x2, 0x2, 0x4fb, 0x50d, 0x3, - 0x2, 0x2, 0x2, 0x4fc, 0x4fd, 0xc, 0x5, 0x2, 0x2, 0x4fd, 0x4fe, 0x5, - 0x8c, 0x47, 0x2, 0x4fe, 0x4ff, 0x5, 0x88, 0x45, 0x6, 0x4ff, 0x50c, 0x3, - 0x2, 0x2, 0x2, 0x500, 0x502, 0xc, 0x6, 0x2, 0x2, 0x501, 0x503, 0x9, - 0xa, 0x2, 0x2, 0x502, 0x501, 0x3, 0x2, 0x2, 0x2, 0x502, 0x503, 0x3, - 0x2, 0x2, 0x2, 0x503, 0x505, 0x3, 0x2, 0x2, 0x2, 0x504, 0x506, 0x5, - 0x8a, 0x46, 0x2, 0x505, 0x504, 0x3, 0x2, 0x2, 0x2, 0x505, 0x506, 0x3, - 0x2, 0x2, 0x2, 0x506, 0x507, 0x3, 0x2, 0x2, 0x2, 0x507, 0x508, 0x7, - 0x59, 0x2, 0x2, 0x508, 0x509, 0x5, 0x88, 0x45, 0x2, 0x509, 0x50a, 0x5, - 0x8e, 0x48, 0x2, 0x50a, 0x50c, 0x3, 0x2, 0x2, 0x2, 0x50b, 0x4fc, 0x3, - 0x2, 0x2, 0x2, 0x50b, 0x500, 0x3, 0x2, 0x2, 0x2, 0x50c, 0x50f, 0x3, - 0x2, 0x2, 0x2, 0x50d, 0x50b, 0x3, 0x2, 0x2, 0x2, 0x50d, 0x50e, 0x3, - 0x2, 0x2, 0x2, 0x50e, 0x89, 0x3, 0x2, 0x2, 0x2, 0x50f, 0x50d, 0x3, 0x2, - 0x2, 0x2, 0x510, 0x512, 0x9, 0xb, 0x2, 0x2, 0x511, 0x510, 0x3, 0x2, - 0x2, 0x2, 0x511, 0x512, 0x3, 0x2, 0x2, 0x2, 0x512, 0x513, 0x3, 0x2, - 0x2, 0x2, 0x513, 0x51a, 0x7, 0x53, 0x2, 0x2, 0x514, 0x516, 0x7, 0x53, - 0x2, 0x2, 0x515, 0x517, 0x9, 0xb, 0x2, 0x2, 0x516, 0x515, 0x3, 0x2, - 0x2, 0x2, 0x516, 0x517, 0x3, 0x2, 0x2, 0x2, 0x517, 0x51a, 0x3, 0x2, - 0x2, 0x2, 0x518, 0x51a, 0x9, 0xb, 0x2, 0x2, 0x519, 0x511, 0x3, 0x2, - 0x2, 0x2, 0x519, 0x514, 0x3, 0x2, 0x2, 0x2, 0x519, 0x518, 0x3, 0x2, - 0x2, 0x2, 0x51a, 0x53c, 0x3, 0x2, 0x2, 0x2, 0x51b, 0x51d, 0x9, 0xc, - 0x2, 0x2, 0x51c, 0x51b, 0x3, 0x2, 0x2, 0x2, 0x51c, 0x51d, 0x3, 0x2, - 0x2, 0x2, 0x51d, 0x51e, 0x3, 0x2, 0x2, 0x2, 0x51e, 0x520, 0x9, 0xd, - 0x2, 0x2, 0x51f, 0x521, 0x7, 0x7a, 0x2, 0x2, 0x520, 0x51f, 0x3, 0x2, - 0x2, 0x2, 0x520, 0x521, 0x3, 0x2, 0x2, 0x2, 0x521, 0x52a, 0x3, 0x2, - 0x2, 0x2, 0x522, 0x524, 0x9, 0xd, 0x2, 0x2, 0x523, 0x525, 0x7, 0x7a, - 0x2, 0x2, 0x524, 0x523, 0x3, 0x2, 0x2, 0x2, 0x524, 0x525, 0x3, 0x2, - 0x2, 0x2, 0x525, 0x527, 0x3, 0x2, 0x2, 0x2, 0x526, 0x528, 0x9, 0xc, - 0x2, 0x2, 0x527, 0x526, 0x3, 0x2, 0x2, 0x2, 0x527, 0x528, 0x3, 0x2, - 0x2, 0x2, 0x528, 0x52a, 0x3, 0x2, 0x2, 0x2, 0x529, 0x51c, 0x3, 0x2, - 0x2, 0x2, 0x529, 0x522, 0x3, 0x2, 0x2, 0x2, 0x52a, 0x53c, 0x3, 0x2, - 0x2, 0x2, 0x52b, 0x52d, 0x9, 0xe, 0x2, 0x2, 0x52c, 0x52b, 0x3, 0x2, - 0x2, 0x2, 0x52c, 0x52d, 0x3, 0x2, 0x2, 0x2, 0x52d, 0x52e, 0x3, 0x2, - 0x2, 0x2, 0x52e, 0x530, 0x7, 0x44, 0x2, 0x2, 0x52f, 0x531, 0x7, 0x7a, - 0x2, 0x2, 0x530, 0x52f, 0x3, 0x2, 0x2, 0x2, 0x530, 0x531, 0x3, 0x2, - 0x2, 0x2, 0x531, 0x53a, 0x3, 0x2, 0x2, 0x2, 0x532, 0x534, 0x7, 0x44, - 0x2, 0x2, 0x533, 0x535, 0x7, 0x7a, 0x2, 0x2, 0x534, 0x533, 0x3, 0x2, - 0x2, 0x2, 0x534, 0x535, 0x3, 0x2, 0x2, 0x2, 0x535, 0x537, 0x3, 0x2, - 0x2, 0x2, 0x536, 0x538, 0x9, 0xe, 0x2, 0x2, 0x537, 0x536, 0x3, 0x2, - 0x2, 0x2, 0x537, 0x538, 0x3, 0x2, 0x2, 0x2, 0x538, 0x53a, 0x3, 0x2, - 0x2, 0x2, 0x539, 0x52c, 0x3, 0x2, 0x2, 0x2, 0x539, 0x532, 0x3, 0x2, - 0x2, 0x2, 0x53a, 0x53c, 0x3, 0x2, 0x2, 0x2, 0x53b, 0x519, 0x3, 0x2, - 0x2, 0x2, 0x53b, 0x529, 0x3, 0x2, 0x2, 0x2, 0x53b, 0x539, 0x3, 0x2, - 0x2, 0x2, 0x53c, 0x8b, 0x3, 0x2, 0x2, 0x2, 0x53d, 0x53f, 0x9, 0xa, 0x2, - 0x2, 0x53e, 0x53d, 0x3, 0x2, 0x2, 0x2, 0x53e, 0x53f, 0x3, 0x2, 0x2, - 0x2, 0x53f, 0x540, 0x3, 0x2, 0x2, 0x2, 0x540, 0x541, 0x7, 0x20, 0x2, - 0x2, 0x541, 0x544, 0x7, 0x59, 0x2, 0x2, 0x542, 0x544, 0x7, 0xc5, 0x2, - 0x2, 0x543, 0x53e, 0x3, 0x2, 0x2, 0x2, 0x543, 0x542, 0x3, 0x2, 0x2, - 0x2, 0x544, 0x8d, 0x3, 0x2, 0x2, 0x2, 0x545, 0x546, 0x7, 0x76, 0x2, - 0x2, 0x546, 0x54f, 0x5, 0xac, 0x57, 0x2, 0x547, 0x548, 0x7, 0xad, 0x2, - 0x2, 0x548, 0x549, 0x7, 0xd0, 0x2, 0x2, 0x549, 0x54a, 0x5, 0xac, 0x57, - 0x2, 0x54a, 0x54b, 0x7, 0xda, 0x2, 0x2, 0x54b, 0x54f, 0x3, 0x2, 0x2, - 0x2, 0x54c, 0x54d, 0x7, 0xad, 0x2, 0x2, 0x54d, 0x54f, 0x5, 0xac, 0x57, - 0x2, 0x54e, 0x545, 0x3, 0x2, 0x2, 0x2, 0x54e, 0x547, 0x3, 0x2, 0x2, - 0x2, 0x54e, 0x54c, 0x3, 0x2, 0x2, 0x2, 0x54f, 0x8f, 0x3, 0x2, 0x2, 0x2, - 0x550, 0x551, 0x7, 0x8b, 0x2, 0x2, 0x551, 0x554, 0x5, 0x98, 0x4d, 0x2, - 0x552, 0x553, 0x7, 0x75, 0x2, 0x2, 0x553, 0x555, 0x5, 0x98, 0x4d, 0x2, - 0x554, 0x552, 0x3, 0x2, 0x2, 0x2, 0x554, 0x555, 0x3, 0x2, 0x2, 0x2, - 0x555, 0x91, 0x3, 0x2, 0x2, 0x2, 0x556, 0x559, 0x5, 0xb0, 0x59, 0x2, - 0x557, 0x558, 0x9, 0xf, 0x2, 0x2, 0x558, 0x55a, 0x5, 0xb0, 0x59, 0x2, - 0x559, 0x557, 0x3, 0x2, 0x2, 0x2, 0x559, 0x55a, 0x3, 0x2, 0x2, 0x2, - 0x55a, 0x93, 0x3, 0x2, 0x2, 0x2, 0x55b, 0x560, 0x5, 0x96, 0x4c, 0x2, - 0x55c, 0x55d, 0x7, 0xc5, 0x2, 0x2, 0x55d, 0x55f, 0x5, 0x96, 0x4c, 0x2, - 0x55e, 0x55c, 0x3, 0x2, 0x2, 0x2, 0x55f, 0x562, 0x3, 0x2, 0x2, 0x2, - 0x560, 0x55e, 0x3, 0x2, 0x2, 0x2, 0x560, 0x561, 0x3, 0x2, 0x2, 0x2, - 0x561, 0x95, 0x3, 0x2, 0x2, 0x2, 0x562, 0x560, 0x3, 0x2, 0x2, 0x2, 0x563, - 0x565, 0x5, 0xb0, 0x59, 0x2, 0x564, 0x566, 0x9, 0x10, 0x2, 0x2, 0x565, - 0x564, 0x3, 0x2, 0x2, 0x2, 0x565, 0x566, 0x3, 0x2, 0x2, 0x2, 0x566, - 0x569, 0x3, 0x2, 0x2, 0x2, 0x567, 0x568, 0x7, 0x74, 0x2, 0x2, 0x568, - 0x56a, 0x9, 0x11, 0x2, 0x2, 0x569, 0x567, 0x3, 0x2, 0x2, 0x2, 0x569, - 0x56a, 0x3, 0x2, 0x2, 0x2, 0x56a, 0x56d, 0x3, 0x2, 0x2, 0x2, 0x56b, - 0x56c, 0x7, 0x1b, 0x2, 0x2, 0x56c, 0x56e, 0x7, 0xbf, 0x2, 0x2, 0x56d, - 0x56b, 0x3, 0x2, 0x2, 0x2, 0x56d, 0x56e, 0x3, 0x2, 0x2, 0x2, 0x56e, - 0x97, 0x3, 0x2, 0x2, 0x2, 0x56f, 0x572, 0x5, 0xca, 0x66, 0x2, 0x570, - 0x571, 0x7, 0xdc, 0x2, 0x2, 0x571, 0x573, 0x5, 0xca, 0x66, 0x2, 0x572, - 0x570, 0x3, 0x2, 0x2, 0x2, 0x572, 0x573, 0x3, 0x2, 0x2, 0x2, 0x573, - 0x99, 0x3, 0x2, 0x2, 0x2, 0x574, 0x579, 0x5, 0x9c, 0x4f, 0x2, 0x575, - 0x576, 0x7, 0xc5, 0x2, 0x2, 0x576, 0x578, 0x5, 0x9c, 0x4f, 0x2, 0x577, - 0x575, 0x3, 0x2, 0x2, 0x2, 0x578, 0x57b, 0x3, 0x2, 0x2, 0x2, 0x579, - 0x577, 0x3, 0x2, 0x2, 0x2, 0x579, 0x57a, 0x3, 0x2, 0x2, 0x2, 0x57a, - 0x9b, 0x3, 0x2, 0x2, 0x2, 0x57b, 0x579, 0x3, 0x2, 0x2, 0x2, 0x57c, 0x57d, - 0x5, 0xd6, 0x6c, 0x2, 0x57d, 0x57e, 0x7, 0xca, 0x2, 0x2, 0x57e, 0x57f, - 0x5, 0xcc, 0x67, 0x2, 0x57f, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x580, 0x581, - 0x7, 0x90, 0x2, 0x2, 0x581, 0x582, 0x5, 0x9a, 0x4e, 0x2, 0x582, 0x9f, - 0x3, 0x2, 0x2, 0x2, 0x583, 0x584, 0x7, 0x92, 0x2, 0x2, 0x584, 0x585, - 0x7, 0x1f, 0x2, 0x2, 0x585, 0x586, 0x7, 0x22, 0x2, 0x2, 0x586, 0x5ae, - 0x5, 0xc6, 0x64, 0x2, 0x587, 0x588, 0x7, 0x92, 0x2, 0x2, 0x588, 0x589, - 0x7, 0x1f, 0x2, 0x2, 0x589, 0x58a, 0x7, 0x2f, 0x2, 0x2, 0x58a, 0x5ae, - 0x5, 0xc0, 0x61, 0x2, 0x58b, 0x58c, 0x7, 0x92, 0x2, 0x2, 0x58c, 0x58e, - 0x7, 0x1f, 0x2, 0x2, 0x58d, 0x58f, 0x7, 0x9c, 0x2, 0x2, 0x58e, 0x58d, - 0x3, 0x2, 0x2, 0x2, 0x58e, 0x58f, 0x3, 0x2, 0x2, 0x2, 0x58f, 0x591, - 0x3, 0x2, 0x2, 0x2, 0x590, 0x592, 0x7, 0x9a, 0x2, 0x2, 0x591, 0x590, - 0x3, 0x2, 0x2, 0x2, 0x591, 0x592, 0x3, 0x2, 0x2, 0x2, 0x592, 0x593, - 0x3, 0x2, 0x2, 0x2, 0x593, 0x5ae, 0x5, 0xc0, 0x61, 0x2, 0x594, 0x595, - 0x7, 0x92, 0x2, 0x2, 0x595, 0x5ae, 0x7, 0x23, 0x2, 0x2, 0x596, 0x597, - 0x7, 0x92, 0x2, 0x2, 0x597, 0x59a, 0x7, 0x2e, 0x2, 0x2, 0x598, 0x599, - 0x7, 0x43, 0x2, 0x2, 0x599, 0x59b, 0x5, 0xc6, 0x64, 0x2, 0x59a, 0x598, - 0x3, 0x2, 0x2, 0x2, 0x59a, 0x59b, 0x3, 0x2, 0x2, 0x2, 0x59b, 0x5ae, - 0x3, 0x2, 0x2, 0x2, 0x59c, 0x59e, 0x7, 0x92, 0x2, 0x2, 0x59d, 0x59f, - 0x7, 0x9c, 0x2, 0x2, 0x59e, 0x59d, 0x3, 0x2, 0x2, 0x2, 0x59e, 0x59f, - 0x3, 0x2, 0x2, 0x2, 0x59f, 0x5a0, 0x3, 0x2, 0x2, 0x2, 0x5a0, 0x5a3, - 0x7, 0x9b, 0x2, 0x2, 0x5a1, 0x5a2, 0x9, 0x12, 0x2, 0x2, 0x5a2, 0x5a4, - 0x5, 0xc6, 0x64, 0x2, 0x5a3, 0x5a1, 0x3, 0x2, 0x2, 0x2, 0x5a3, 0x5a4, - 0x3, 0x2, 0x2, 0x2, 0x5a4, 0x5a8, 0x3, 0x2, 0x2, 0x2, 0x5a5, 0x5a6, - 0x7, 0x61, 0x2, 0x2, 0x5a6, 0x5a9, 0x7, 0xbf, 0x2, 0x2, 0x5a7, 0x5a9, - 0x5, 0x78, 0x3d, 0x2, 0x5a8, 0x5a5, 0x3, 0x2, 0x2, 0x2, 0x5a8, 0x5a7, - 0x3, 0x2, 0x2, 0x2, 0x5a8, 0x5a9, 0x3, 0x2, 0x2, 0x2, 0x5a9, 0x5ab, - 0x3, 0x2, 0x2, 0x2, 0x5aa, 0x5ac, 0x5, 0x84, 0x43, 0x2, 0x5ab, 0x5aa, - 0x3, 0x2, 0x2, 0x2, 0x5ab, 0x5ac, 0x3, 0x2, 0x2, 0x2, 0x5ac, 0x5ae, - 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x583, 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x587, - 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x58b, 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x594, - 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x596, 0x3, 0x2, 0x2, 0x2, 0x5ad, 0x59c, - 0x3, 0x2, 0x2, 0x2, 0x5ae, 0xa1, 0x3, 0x2, 0x2, 0x2, 0x5af, 0x5b0, 0x7, - 0x99, 0x2, 0x2, 0x5b0, 0x5b1, 0x7, 0x3f, 0x2, 0x2, 0x5b1, 0x5b2, 0x7, - 0x32, 0x2, 0x2, 0x5b2, 0x5d2, 0x5, 0xc0, 0x61, 0x2, 0x5b3, 0x5b4, 0x7, - 0x99, 0x2, 0x2, 0x5b4, 0x5b5, 0x7, 0x3f, 0x2, 0x2, 0x5b5, 0x5d2, 0x7, - 0x65, 0x2, 0x2, 0x5b6, 0x5b7, 0x7, 0x99, 0x2, 0x2, 0x5b7, 0x5b8, 0x7, - 0x83, 0x2, 0x2, 0x5b8, 0x5d2, 0x7, 0x2e, 0x2, 0x2, 0x5b9, 0x5ba, 0x7, - 0x99, 0x2, 0x2, 0x5ba, 0x5bb, 0x7, 0x83, 0x2, 0x2, 0x5bb, 0x5bc, 0x7, - 0x2f, 0x2, 0x2, 0x5bc, 0x5d2, 0x5, 0xc0, 0x61, 0x2, 0x5bd, 0x5be, 0x7, - 0x99, 0x2, 0x2, 0x5be, 0x5c6, 0x9, 0x13, 0x2, 0x2, 0x5bf, 0x5c0, 0x7, - 0x32, 0x2, 0x2, 0x5c0, 0x5c7, 0x7, 0x8f, 0x2, 0x2, 0x5c1, 0x5c7, 0x7, - 0x3c, 0x2, 0x2, 0x5c2, 0x5c4, 0x7, 0xa8, 0x2, 0x2, 0x5c3, 0x5c2, 0x3, - 0x2, 0x2, 0x2, 0x5c3, 0x5c4, 0x3, 0x2, 0x2, 0x2, 0x5c4, 0x5c5, 0x3, - 0x2, 0x2, 0x2, 0x5c5, 0x5c7, 0x7, 0x69, 0x2, 0x2, 0x5c6, 0x5bf, 0x3, - 0x2, 0x2, 0x2, 0x5c6, 0x5c1, 0x3, 0x2, 0x2, 0x2, 0x5c6, 0x5c3, 0x3, - 0x2, 0x2, 0x2, 0x5c7, 0x5c8, 0x3, 0x2, 0x2, 0x2, 0x5c8, 0x5d2, 0x5, - 0xc0, 0x61, 0x2, 0x5c9, 0x5ca, 0x7, 0x99, 0x2, 0x2, 0x5ca, 0x5cb, 0x9, - 0x13, 0x2, 0x2, 0x5cb, 0x5cc, 0x7, 0x88, 0x2, 0x2, 0x5cc, 0x5d2, 0x7, - 0x8f, 0x2, 0x2, 0x5cd, 0x5ce, 0x7, 0x99, 0x2, 0x2, 0x5ce, 0x5cf, 0x7, - 0x97, 0x2, 0x2, 0x5cf, 0x5d0, 0x7, 0x87, 0x2, 0x2, 0x5d0, 0x5d2, 0x5, - 0xc0, 0x61, 0x2, 0x5d1, 0x5af, 0x3, 0x2, 0x2, 0x2, 0x5d1, 0x5b3, 0x3, - 0x2, 0x2, 0x2, 0x5d1, 0x5b6, 0x3, 0x2, 0x2, 0x2, 0x5d1, 0x5b9, 0x3, - 0x2, 0x2, 0x2, 0x5d1, 0x5bd, 0x3, 0x2, 0x2, 0x2, 0x5d1, 0x5c9, 0x3, - 0x2, 0x2, 0x2, 0x5d1, 0x5cd, 0x3, 0x2, 0x2, 0x2, 0x5d2, 0xa3, 0x3, 0x2, - 0x2, 0x2, 0x5d3, 0x5d5, 0x7, 0xa7, 0x2, 0x2, 0x5d4, 0x5d6, 0x7, 0x9c, - 0x2, 0x2, 0x5d5, 0x5d4, 0x3, 0x2, 0x2, 0x2, 0x5d5, 0x5d6, 0x3, 0x2, - 0x2, 0x2, 0x5d6, 0x5d8, 0x3, 0x2, 0x2, 0x2, 0x5d7, 0x5d9, 0x7, 0x9a, - 0x2, 0x2, 0x5d8, 0x5d7, 0x3, 0x2, 0x2, 0x2, 0x5d8, 0x5d9, 0x3, 0x2, - 0x2, 0x2, 0x5d9, 0x5dc, 0x3, 0x2, 0x2, 0x2, 0x5da, 0x5db, 0x7, 0x4d, - 0x2, 0x2, 0x5db, 0x5dd, 0x7, 0x38, 0x2, 0x2, 0x5dc, 0x5da, 0x3, 0x2, - 0x2, 0x2, 0x5dc, 0x5dd, 0x3, 0x2, 0x2, 0x2, 0x5dd, 0x5de, 0x3, 0x2, - 0x2, 0x2, 0x5de, 0x5e0, 0x5, 0xc0, 0x61, 0x2, 0x5df, 0x5e1, 0x5, 0x2c, - 0x17, 0x2, 0x5e0, 0x5df, 0x3, 0x2, 0x2, 0x2, 0x5e0, 0x5e1, 0x3, 0x2, - 0x2, 0x2, 0x5e1, 0xa5, 0x3, 0x2, 0x2, 0x2, 0x5e2, 0x5e3, 0x7, 0xac, - 0x2, 0x2, 0x5e3, 0x5e4, 0x5, 0xc6, 0x64, 0x2, 0x5e4, 0xa7, 0x3, 0x2, - 0x2, 0x2, 0x5e5, 0x5e6, 0x7, 0xb2, 0x2, 0x2, 0x5e6, 0x5e8, 0x5, 0xc0, - 0x61, 0x2, 0x5e7, 0x5e9, 0x7, 0x37, 0x2, 0x2, 0x5e8, 0x5e7, 0x3, 0x2, - 0x2, 0x2, 0x5e8, 0x5e9, 0x3, 0x2, 0x2, 0x2, 0x5e9, 0x5ec, 0x3, 0x2, - 0x2, 0x2, 0x5ea, 0x5eb, 0x7, 0x62, 0x2, 0x2, 0x5eb, 0x5ed, 0x7, 0xbd, - 0x2, 0x2, 0x5ec, 0x5ea, 0x3, 0x2, 0x2, 0x2, 0x5ec, 0x5ed, 0x3, 0x2, - 0x2, 0x2, 0x5ed, 0xa9, 0x3, 0x2, 0x2, 0x2, 0x5ee, 0x61e, 0x5, 0xd6, - 0x6c, 0x2, 0x5ef, 0x5f0, 0x5, 0xd6, 0x6c, 0x2, 0x5f0, 0x5f1, 0x7, 0xd0, - 0x2, 0x2, 0x5f1, 0x5f2, 0x5, 0xd6, 0x6c, 0x2, 0x5f2, 0x5f9, 0x5, 0xaa, - 0x56, 0x2, 0x5f3, 0x5f4, 0x7, 0xc5, 0x2, 0x2, 0x5f4, 0x5f5, 0x5, 0xd6, - 0x6c, 0x2, 0x5f5, 0x5f6, 0x5, 0xaa, 0x56, 0x2, 0x5f6, 0x5f8, 0x3, 0x2, - 0x2, 0x2, 0x5f7, 0x5f3, 0x3, 0x2, 0x2, 0x2, 0x5f8, 0x5fb, 0x3, 0x2, - 0x2, 0x2, 0x5f9, 0x5f7, 0x3, 0x2, 0x2, 0x2, 0x5f9, 0x5fa, 0x3, 0x2, - 0x2, 0x2, 0x5fa, 0x5fc, 0x3, 0x2, 0x2, 0x2, 0x5fb, 0x5f9, 0x3, 0x2, - 0x2, 0x2, 0x5fc, 0x5fd, 0x7, 0xda, 0x2, 0x2, 0x5fd, 0x61e, 0x3, 0x2, - 0x2, 0x2, 0x5fe, 0x5ff, 0x5, 0xd6, 0x6c, 0x2, 0x5ff, 0x600, 0x7, 0xd0, - 0x2, 0x2, 0x600, 0x605, 0x5, 0xda, 0x6e, 0x2, 0x601, 0x602, 0x7, 0xc5, - 0x2, 0x2, 0x602, 0x604, 0x5, 0xda, 0x6e, 0x2, 0x603, 0x601, 0x3, 0x2, - 0x2, 0x2, 0x604, 0x607, 0x3, 0x2, 0x2, 0x2, 0x605, 0x603, 0x3, 0x2, - 0x2, 0x2, 0x605, 0x606, 0x3, 0x2, 0x2, 0x2, 0x606, 0x608, 0x3, 0x2, - 0x2, 0x2, 0x607, 0x605, 0x3, 0x2, 0x2, 0x2, 0x608, 0x609, 0x7, 0xda, - 0x2, 0x2, 0x609, 0x61e, 0x3, 0x2, 0x2, 0x2, 0x60a, 0x60b, 0x5, 0xd6, - 0x6c, 0x2, 0x60b, 0x60c, 0x7, 0xd0, 0x2, 0x2, 0x60c, 0x611, 0x5, 0xaa, - 0x56, 0x2, 0x60d, 0x60e, 0x7, 0xc5, 0x2, 0x2, 0x60e, 0x610, 0x5, 0xaa, - 0x56, 0x2, 0x60f, 0x60d, 0x3, 0x2, 0x2, 0x2, 0x610, 0x613, 0x3, 0x2, - 0x2, 0x2, 0x611, 0x60f, 0x3, 0x2, 0x2, 0x2, 0x611, 0x612, 0x3, 0x2, - 0x2, 0x2, 0x612, 0x614, 0x3, 0x2, 0x2, 0x2, 0x613, 0x611, 0x3, 0x2, - 0x2, 0x2, 0x614, 0x615, 0x7, 0xda, 0x2, 0x2, 0x615, 0x61e, 0x3, 0x2, - 0x2, 0x2, 0x616, 0x617, 0x5, 0xd6, 0x6c, 0x2, 0x617, 0x619, 0x7, 0xd0, - 0x2, 0x2, 0x618, 0x61a, 0x5, 0xac, 0x57, 0x2, 0x619, 0x618, 0x3, 0x2, - 0x2, 0x2, 0x619, 0x61a, 0x3, 0x2, 0x2, 0x2, 0x61a, 0x61b, 0x3, 0x2, - 0x2, 0x2, 0x61b, 0x61c, 0x7, 0xda, 0x2, 0x2, 0x61c, 0x61e, 0x3, 0x2, - 0x2, 0x2, 0x61d, 0x5ee, 0x3, 0x2, 0x2, 0x2, 0x61d, 0x5ef, 0x3, 0x2, - 0x2, 0x2, 0x61d, 0x5fe, 0x3, 0x2, 0x2, 0x2, 0x61d, 0x60a, 0x3, 0x2, - 0x2, 0x2, 0x61d, 0x616, 0x3, 0x2, 0x2, 0x2, 0x61e, 0xab, 0x3, 0x2, 0x2, - 0x2, 0x61f, 0x624, 0x5, 0xae, 0x58, 0x2, 0x620, 0x621, 0x7, 0xc5, 0x2, - 0x2, 0x621, 0x623, 0x5, 0xae, 0x58, 0x2, 0x622, 0x620, 0x3, 0x2, 0x2, - 0x2, 0x623, 0x626, 0x3, 0x2, 0x2, 0x2, 0x624, 0x622, 0x3, 0x2, 0x2, - 0x2, 0x624, 0x625, 0x3, 0x2, 0x2, 0x2, 0x625, 0xad, 0x3, 0x2, 0x2, 0x2, - 0x626, 0x624, 0x3, 0x2, 0x2, 0x2, 0x627, 0x628, 0x5, 0xc0, 0x61, 0x2, - 0x628, 0x629, 0x7, 0xc8, 0x2, 0x2, 0x629, 0x62b, 0x3, 0x2, 0x2, 0x2, - 0x62a, 0x627, 0x3, 0x2, 0x2, 0x2, 0x62a, 0x62b, 0x3, 0x2, 0x2, 0x2, - 0x62b, 0x62c, 0x3, 0x2, 0x2, 0x2, 0x62c, 0x633, 0x7, 0xc1, 0x2, 0x2, - 0x62d, 0x62e, 0x7, 0xd0, 0x2, 0x2, 0x62e, 0x62f, 0x5, 0x68, 0x35, 0x2, - 0x62f, 0x630, 0x7, 0xda, 0x2, 0x2, 0x630, 0x633, 0x3, 0x2, 0x2, 0x2, - 0x631, 0x633, 0x5, 0xb0, 0x59, 0x2, 0x632, 0x62a, 0x3, 0x2, 0x2, 0x2, - 0x632, 0x62d, 0x3, 0x2, 0x2, 0x2, 0x632, 0x631, 0x3, 0x2, 0x2, 0x2, - 0x633, 0xaf, 0x3, 0x2, 0x2, 0x2, 0x634, 0x635, 0x8, 0x59, 0x1, 0x2, - 0x635, 0x637, 0x7, 0x15, 0x2, 0x2, 0x636, 0x638, 0x5, 0xb0, 0x59, 0x2, - 0x637, 0x636, 0x3, 0x2, 0x2, 0x2, 0x637, 0x638, 0x3, 0x2, 0x2, 0x2, - 0x638, 0x63e, 0x3, 0x2, 0x2, 0x2, 0x639, 0x63a, 0x7, 0xb4, 0x2, 0x2, - 0x63a, 0x63b, 0x5, 0xb0, 0x59, 0x2, 0x63b, 0x63c, 0x7, 0x9e, 0x2, 0x2, - 0x63c, 0x63d, 0x5, 0xb0, 0x59, 0x2, 0x63d, 0x63f, 0x3, 0x2, 0x2, 0x2, - 0x63e, 0x639, 0x3, 0x2, 0x2, 0x2, 0x63f, 0x640, 0x3, 0x2, 0x2, 0x2, - 0x640, 0x63e, 0x3, 0x2, 0x2, 0x2, 0x640, 0x641, 0x3, 0x2, 0x2, 0x2, - 0x641, 0x644, 0x3, 0x2, 0x2, 0x2, 0x642, 0x643, 0x7, 0x34, 0x2, 0x2, - 0x643, 0x645, 0x5, 0xb0, 0x59, 0x2, 0x644, 0x642, 0x3, 0x2, 0x2, 0x2, - 0x644, 0x645, 0x3, 0x2, 0x2, 0x2, 0x645, 0x646, 0x3, 0x2, 0x2, 0x2, - 0x646, 0x647, 0x7, 0x35, 0x2, 0x2, 0x647, 0x6a0, 0x3, 0x2, 0x2, 0x2, - 0x648, 0x649, 0x7, 0x16, 0x2, 0x2, 0x649, 0x64a, 0x7, 0xd0, 0x2, 0x2, - 0x64a, 0x64b, 0x5, 0xb0, 0x59, 0x2, 0x64b, 0x64c, 0x7, 0xc, 0x2, 0x2, - 0x64c, 0x64d, 0x5, 0xaa, 0x56, 0x2, 0x64d, 0x64e, 0x7, 0xda, 0x2, 0x2, - 0x64e, 0x6a0, 0x3, 0x2, 0x2, 0x2, 0x64f, 0x650, 0x7, 0x24, 0x2, 0x2, - 0x650, 0x6a0, 0x7, 0xbf, 0x2, 0x2, 0x651, 0x652, 0x7, 0x3b, 0x2, 0x2, - 0x652, 0x653, 0x7, 0xd0, 0x2, 0x2, 0x653, 0x654, 0x5, 0xce, 0x68, 0x2, - 0x654, 0x655, 0x7, 0x43, 0x2, 0x2, 0x655, 0x656, 0x5, 0xb0, 0x59, 0x2, - 0x656, 0x657, 0x7, 0xda, 0x2, 0x2, 0x657, 0x6a0, 0x3, 0x2, 0x2, 0x2, - 0x658, 0x659, 0x7, 0x55, 0x2, 0x2, 0x659, 0x65a, 0x5, 0xb0, 0x59, 0x2, - 0x65a, 0x65b, 0x5, 0xce, 0x68, 0x2, 0x65b, 0x6a0, 0x3, 0x2, 0x2, 0x2, - 0x65c, 0x65d, 0x7, 0x96, 0x2, 0x2, 0x65d, 0x65e, 0x7, 0xd0, 0x2, 0x2, - 0x65e, 0x65f, 0x5, 0xb0, 0x59, 0x2, 0x65f, 0x660, 0x7, 0x43, 0x2, 0x2, - 0x660, 0x663, 0x5, 0xb0, 0x59, 0x2, 0x661, 0x662, 0x7, 0x40, 0x2, 0x2, - 0x662, 0x664, 0x5, 0xb0, 0x59, 0x2, 0x663, 0x661, 0x3, 0x2, 0x2, 0x2, - 0x663, 0x664, 0x3, 0x2, 0x2, 0x2, 0x664, 0x665, 0x3, 0x2, 0x2, 0x2, - 0x665, 0x666, 0x7, 0xda, 0x2, 0x2, 0x666, 0x6a0, 0x3, 0x2, 0x2, 0x2, - 0x667, 0x668, 0x7, 0xa1, 0x2, 0x2, 0x668, 0x6a0, 0x7, 0xbf, 0x2, 0x2, - 0x669, 0x66a, 0x7, 0xa6, 0x2, 0x2, 0x66a, 0x66b, 0x7, 0xd0, 0x2, 0x2, - 0x66b, 0x66c, 0x9, 0x14, 0x2, 0x2, 0x66c, 0x66d, 0x7, 0xbf, 0x2, 0x2, - 0x66d, 0x66e, 0x7, 0x43, 0x2, 0x2, 0x66e, 0x66f, 0x5, 0xb0, 0x59, 0x2, - 0x66f, 0x670, 0x7, 0xda, 0x2, 0x2, 0x670, 0x6a0, 0x3, 0x2, 0x2, 0x2, - 0x671, 0x677, 0x5, 0xd6, 0x6c, 0x2, 0x672, 0x674, 0x7, 0xd0, 0x2, 0x2, - 0x673, 0x675, 0x5, 0xac, 0x57, 0x2, 0x674, 0x673, 0x3, 0x2, 0x2, 0x2, - 0x674, 0x675, 0x3, 0x2, 0x2, 0x2, 0x675, 0x676, 0x3, 0x2, 0x2, 0x2, - 0x676, 0x678, 0x7, 0xda, 0x2, 0x2, 0x677, 0x672, 0x3, 0x2, 0x2, 0x2, - 0x677, 0x678, 0x3, 0x2, 0x2, 0x2, 0x678, 0x679, 0x3, 0x2, 0x2, 0x2, - 0x679, 0x67b, 0x7, 0xd0, 0x2, 0x2, 0x67a, 0x67c, 0x7, 0x31, 0x2, 0x2, - 0x67b, 0x67a, 0x3, 0x2, 0x2, 0x2, 0x67b, 0x67c, 0x3, 0x2, 0x2, 0x2, - 0x67c, 0x67e, 0x3, 0x2, 0x2, 0x2, 0x67d, 0x67f, 0x5, 0xb2, 0x5a, 0x2, - 0x67e, 0x67d, 0x3, 0x2, 0x2, 0x2, 0x67e, 0x67f, 0x3, 0x2, 0x2, 0x2, - 0x67f, 0x680, 0x3, 0x2, 0x2, 0x2, 0x680, 0x681, 0x7, 0xda, 0x2, 0x2, - 0x681, 0x6a0, 0x3, 0x2, 0x2, 0x2, 0x682, 0x6a0, 0x5, 0xcc, 0x67, 0x2, - 0x683, 0x684, 0x7, 0xc7, 0x2, 0x2, 0x684, 0x6a0, 0x5, 0xb0, 0x59, 0x13, - 0x685, 0x686, 0x7, 0x72, 0x2, 0x2, 0x686, 0x6a0, 0x5, 0xb0, 0x59, 0xe, - 0x687, 0x688, 0x5, 0xc0, 0x61, 0x2, 0x688, 0x689, 0x7, 0xc8, 0x2, 0x2, - 0x689, 0x68b, 0x3, 0x2, 0x2, 0x2, 0x68a, 0x687, 0x3, 0x2, 0x2, 0x2, - 0x68a, 0x68b, 0x3, 0x2, 0x2, 0x2, 0x68b, 0x68c, 0x3, 0x2, 0x2, 0x2, - 0x68c, 0x6a0, 0x7, 0xc1, 0x2, 0x2, 0x68d, 0x68e, 0x7, 0xd0, 0x2, 0x2, - 0x68e, 0x68f, 0x5, 0x68, 0x35, 0x2, 0x68f, 0x690, 0x7, 0xda, 0x2, 0x2, - 0x690, 0x6a0, 0x3, 0x2, 0x2, 0x2, 0x691, 0x692, 0x7, 0xd0, 0x2, 0x2, - 0x692, 0x693, 0x5, 0xb0, 0x59, 0x2, 0x693, 0x694, 0x7, 0xda, 0x2, 0x2, - 0x694, 0x6a0, 0x3, 0x2, 0x2, 0x2, 0x695, 0x696, 0x7, 0xd0, 0x2, 0x2, - 0x696, 0x697, 0x5, 0xac, 0x57, 0x2, 0x697, 0x698, 0x7, 0xda, 0x2, 0x2, - 0x698, 0x6a0, 0x3, 0x2, 0x2, 0x2, 0x699, 0x69b, 0x7, 0xce, 0x2, 0x2, - 0x69a, 0x69c, 0x5, 0xac, 0x57, 0x2, 0x69b, 0x69a, 0x3, 0x2, 0x2, 0x2, - 0x69b, 0x69c, 0x3, 0x2, 0x2, 0x2, 0x69c, 0x69d, 0x3, 0x2, 0x2, 0x2, - 0x69d, 0x6a0, 0x7, 0xd9, 0x2, 0x2, 0x69e, 0x6a0, 0x5, 0xb8, 0x5d, 0x2, - 0x69f, 0x634, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x648, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x64f, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x651, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x658, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x65c, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x667, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x669, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x671, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x682, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x683, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x685, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x68a, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x68d, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x691, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x695, 0x3, 0x2, 0x2, 0x2, - 0x69f, 0x699, 0x3, 0x2, 0x2, 0x2, 0x69f, 0x69e, 0x3, 0x2, 0x2, 0x2, - 0x6a0, 0x6e8, 0x3, 0x2, 0x2, 0x2, 0x6a1, 0x6a2, 0xc, 0x12, 0x2, 0x2, - 0x6a2, 0x6a3, 0x9, 0x15, 0x2, 0x2, 0x6a3, 0x6e7, 0x5, 0xb0, 0x59, 0x13, - 0x6a4, 0x6a5, 0xc, 0x11, 0x2, 0x2, 0x6a5, 0x6a6, 0x9, 0x16, 0x2, 0x2, - 0x6a6, 0x6e7, 0x5, 0xb0, 0x59, 0x12, 0x6a7, 0x6ba, 0xc, 0x10, 0x2, 0x2, - 0x6a8, 0x6bb, 0x7, 0xc9, 0x2, 0x2, 0x6a9, 0x6bb, 0x7, 0xca, 0x2, 0x2, - 0x6aa, 0x6bb, 0x7, 0xd2, 0x2, 0x2, 0x6ab, 0x6bb, 0x7, 0xcf, 0x2, 0x2, - 0x6ac, 0x6bb, 0x7, 0xcb, 0x2, 0x2, 0x6ad, 0x6bb, 0x7, 0xd1, 0x2, 0x2, - 0x6ae, 0x6bb, 0x7, 0xcc, 0x2, 0x2, 0x6af, 0x6b1, 0x7, 0x46, 0x2, 0x2, - 0x6b0, 0x6af, 0x3, 0x2, 0x2, 0x2, 0x6b0, 0x6b1, 0x3, 0x2, 0x2, 0x2, - 0x6b1, 0x6b3, 0x3, 0x2, 0x2, 0x2, 0x6b2, 0x6b4, 0x7, 0x72, 0x2, 0x2, - 0x6b3, 0x6b2, 0x3, 0x2, 0x2, 0x2, 0x6b3, 0x6b4, 0x3, 0x2, 0x2, 0x2, - 0x6b4, 0x6b5, 0x3, 0x2, 0x2, 0x2, 0x6b5, 0x6bb, 0x7, 0x4f, 0x2, 0x2, - 0x6b6, 0x6b8, 0x7, 0x72, 0x2, 0x2, 0x6b7, 0x6b6, 0x3, 0x2, 0x2, 0x2, - 0x6b7, 0x6b8, 0x3, 0x2, 0x2, 0x2, 0x6b8, 0x6b9, 0x3, 0x2, 0x2, 0x2, - 0x6b9, 0x6bb, 0x9, 0x17, 0x2, 0x2, 0x6ba, 0x6a8, 0x3, 0x2, 0x2, 0x2, - 0x6ba, 0x6a9, 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6aa, 0x3, 0x2, 0x2, 0x2, - 0x6ba, 0x6ab, 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6ac, 0x3, 0x2, 0x2, 0x2, - 0x6ba, 0x6ad, 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6ae, 0x3, 0x2, 0x2, 0x2, - 0x6ba, 0x6b0, 0x3, 0x2, 0x2, 0x2, 0x6ba, 0x6b7, 0x3, 0x2, 0x2, 0x2, - 0x6bb, 0x6bc, 0x3, 0x2, 0x2, 0x2, 0x6bc, 0x6e7, 0x5, 0xb0, 0x59, 0x11, - 0x6bd, 0x6be, 0xc, 0xd, 0x2, 0x2, 0x6be, 0x6bf, 0x7, 0x8, 0x2, 0x2, - 0x6bf, 0x6e7, 0x5, 0xb0, 0x59, 0xe, 0x6c0, 0x6c1, 0xc, 0xc, 0x2, 0x2, - 0x6c1, 0x6c2, 0x7, 0x78, 0x2, 0x2, 0x6c2, 0x6e7, 0x5, 0xb0, 0x59, 0xd, - 0x6c3, 0x6c5, 0xc, 0xb, 0x2, 0x2, 0x6c4, 0x6c6, 0x7, 0x72, 0x2, 0x2, - 0x6c5, 0x6c4, 0x3, 0x2, 0x2, 0x2, 0x6c5, 0x6c6, 0x3, 0x2, 0x2, 0x2, - 0x6c6, 0x6c7, 0x3, 0x2, 0x2, 0x2, 0x6c7, 0x6c8, 0x7, 0x12, 0x2, 0x2, - 0x6c8, 0x6c9, 0x5, 0xb0, 0x59, 0x2, 0x6c9, 0x6ca, 0x7, 0x8, 0x2, 0x2, - 0x6ca, 0x6cb, 0x5, 0xb0, 0x59, 0xc, 0x6cb, 0x6e7, 0x3, 0x2, 0x2, 0x2, - 0x6cc, 0x6cd, 0xc, 0xa, 0x2, 0x2, 0x6cd, 0x6ce, 0x7, 0xd5, 0x2, 0x2, - 0x6ce, 0x6cf, 0x5, 0xb0, 0x59, 0x2, 0x6cf, 0x6d0, 0x7, 0xc4, 0x2, 0x2, - 0x6d0, 0x6d1, 0x5, 0xb0, 0x59, 0xa, 0x6d1, 0x6e7, 0x3, 0x2, 0x2, 0x2, - 0x6d2, 0x6d3, 0xc, 0x15, 0x2, 0x2, 0x6d3, 0x6d4, 0x7, 0xce, 0x2, 0x2, - 0x6d4, 0x6d5, 0x5, 0xb0, 0x59, 0x2, 0x6d5, 0x6d6, 0x7, 0xd9, 0x2, 0x2, - 0x6d6, 0x6e7, 0x3, 0x2, 0x2, 0x2, 0x6d7, 0x6d8, 0xc, 0x14, 0x2, 0x2, - 0x6d8, 0x6d9, 0x7, 0xc8, 0x2, 0x2, 0x6d9, 0x6e7, 0x7, 0xbd, 0x2, 0x2, - 0x6da, 0x6db, 0xc, 0xf, 0x2, 0x2, 0x6db, 0x6dd, 0x7, 0x57, 0x2, 0x2, - 0x6dc, 0x6de, 0x7, 0x72, 0x2, 0x2, 0x6dd, 0x6dc, 0x3, 0x2, 0x2, 0x2, - 0x6dd, 0x6de, 0x3, 0x2, 0x2, 0x2, 0x6de, 0x6df, 0x3, 0x2, 0x2, 0x2, - 0x6df, 0x6e7, 0x7, 0x73, 0x2, 0x2, 0x6e0, 0x6e4, 0xc, 0x9, 0x2, 0x2, - 0x6e1, 0x6e5, 0x5, 0xd4, 0x6b, 0x2, 0x6e2, 0x6e3, 0x7, 0xc, 0x2, 0x2, - 0x6e3, 0x6e5, 0x5, 0xd6, 0x6c, 0x2, 0x6e4, 0x6e1, 0x3, 0x2, 0x2, 0x2, - 0x6e4, 0x6e2, 0x3, 0x2, 0x2, 0x2, 0x6e5, 0x6e7, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6a1, 0x3, 0x2, 0x2, 0x2, 0x6e6, 0x6a4, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6a7, 0x3, 0x2, 0x2, 0x2, 0x6e6, 0x6bd, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6c0, 0x3, 0x2, 0x2, 0x2, 0x6e6, 0x6c3, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6cc, 0x3, 0x2, 0x2, 0x2, 0x6e6, 0x6d2, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6d7, 0x3, 0x2, 0x2, 0x2, 0x6e6, 0x6da, 0x3, 0x2, 0x2, 0x2, - 0x6e6, 0x6e0, 0x3, 0x2, 0x2, 0x2, 0x6e7, 0x6ea, 0x3, 0x2, 0x2, 0x2, - 0x6e8, 0x6e6, 0x3, 0x2, 0x2, 0x2, 0x6e8, 0x6e9, 0x3, 0x2, 0x2, 0x2, - 0x6e9, 0xb1, 0x3, 0x2, 0x2, 0x2, 0x6ea, 0x6e8, 0x3, 0x2, 0x2, 0x2, 0x6eb, - 0x6f0, 0x5, 0xb4, 0x5b, 0x2, 0x6ec, 0x6ed, 0x7, 0xc5, 0x2, 0x2, 0x6ed, - 0x6ef, 0x5, 0xb4, 0x5b, 0x2, 0x6ee, 0x6ec, 0x3, 0x2, 0x2, 0x2, 0x6ef, - 0x6f2, 0x3, 0x2, 0x2, 0x2, 0x6f0, 0x6ee, 0x3, 0x2, 0x2, 0x2, 0x6f0, - 0x6f1, 0x3, 0x2, 0x2, 0x2, 0x6f1, 0xb3, 0x3, 0x2, 0x2, 0x2, 0x6f2, 0x6f0, - 0x3, 0x2, 0x2, 0x2, 0x6f3, 0x6f6, 0x5, 0xb6, 0x5c, 0x2, 0x6f4, 0x6f6, - 0x5, 0xb0, 0x59, 0x2, 0x6f5, 0x6f3, 0x3, 0x2, 0x2, 0x2, 0x6f5, 0x6f4, - 0x3, 0x2, 0x2, 0x2, 0x6f6, 0xb5, 0x3, 0x2, 0x2, 0x2, 0x6f7, 0x6f8, 0x7, - 0xd0, 0x2, 0x2, 0x6f8, 0x6fd, 0x5, 0xd6, 0x6c, 0x2, 0x6f9, 0x6fa, 0x7, - 0xc5, 0x2, 0x2, 0x6fa, 0x6fc, 0x5, 0xd6, 0x6c, 0x2, 0x6fb, 0x6f9, 0x3, - 0x2, 0x2, 0x2, 0x6fc, 0x6ff, 0x3, 0x2, 0x2, 0x2, 0x6fd, 0x6fb, 0x3, - 0x2, 0x2, 0x2, 0x6fd, 0x6fe, 0x3, 0x2, 0x2, 0x2, 0x6fe, 0x700, 0x3, - 0x2, 0x2, 0x2, 0x6ff, 0x6fd, 0x3, 0x2, 0x2, 0x2, 0x700, 0x701, 0x7, - 0xda, 0x2, 0x2, 0x701, 0x70b, 0x3, 0x2, 0x2, 0x2, 0x702, 0x707, 0x5, - 0xd6, 0x6c, 0x2, 0x703, 0x704, 0x7, 0xc5, 0x2, 0x2, 0x704, 0x706, 0x5, - 0xd6, 0x6c, 0x2, 0x705, 0x703, 0x3, 0x2, 0x2, 0x2, 0x706, 0x709, 0x3, - 0x2, 0x2, 0x2, 0x707, 0x705, 0x3, 0x2, 0x2, 0x2, 0x707, 0x708, 0x3, - 0x2, 0x2, 0x2, 0x708, 0x70b, 0x3, 0x2, 0x2, 0x2, 0x709, 0x707, 0x3, - 0x2, 0x2, 0x2, 0x70a, 0x6f7, 0x3, 0x2, 0x2, 0x2, 0x70a, 0x702, 0x3, - 0x2, 0x2, 0x2, 0x70b, 0x70c, 0x3, 0x2, 0x2, 0x2, 0x70c, 0x70d, 0x7, - 0xc0, 0x2, 0x2, 0x70d, 0x70e, 0x5, 0xb0, 0x59, 0x2, 0x70e, 0xb7, 0x3, - 0x2, 0x2, 0x2, 0x70f, 0x710, 0x5, 0xc0, 0x61, 0x2, 0x710, 0x711, 0x7, - 0xc8, 0x2, 0x2, 0x711, 0x713, 0x3, 0x2, 0x2, 0x2, 0x712, 0x70f, 0x3, - 0x2, 0x2, 0x2, 0x712, 0x713, 0x3, 0x2, 0x2, 0x2, 0x713, 0x714, 0x3, - 0x2, 0x2, 0x2, 0x714, 0x715, 0x5, 0xba, 0x5e, 0x2, 0x715, 0xb9, 0x3, - 0x2, 0x2, 0x2, 0x716, 0x719, 0x5, 0xd6, 0x6c, 0x2, 0x717, 0x718, 0x7, - 0xc8, 0x2, 0x2, 0x718, 0x71a, 0x5, 0xd6, 0x6c, 0x2, 0x719, 0x717, 0x3, - 0x2, 0x2, 0x2, 0x719, 0x71a, 0x3, 0x2, 0x2, 0x2, 0x71a, 0xbb, 0x3, 0x2, - 0x2, 0x2, 0x71b, 0x71c, 0x8, 0x5f, 0x1, 0x2, 0x71c, 0x723, 0x5, 0xc0, - 0x61, 0x2, 0x71d, 0x723, 0x5, 0xbe, 0x60, 0x2, 0x71e, 0x71f, 0x7, 0xd0, - 0x2, 0x2, 0x71f, 0x720, 0x5, 0x68, 0x35, 0x2, 0x720, 0x721, 0x7, 0xda, - 0x2, 0x2, 0x721, 0x723, 0x3, 0x2, 0x2, 0x2, 0x722, 0x71b, 0x3, 0x2, - 0x2, 0x2, 0x722, 0x71d, 0x3, 0x2, 0x2, 0x2, 0x722, 0x71e, 0x3, 0x2, - 0x2, 0x2, 0x723, 0x72c, 0x3, 0x2, 0x2, 0x2, 0x724, 0x728, 0xc, 0x3, - 0x2, 0x2, 0x725, 0x729, 0x5, 0xd4, 0x6b, 0x2, 0x726, 0x727, 0x7, 0xc, - 0x2, 0x2, 0x727, 0x729, 0x5, 0xd6, 0x6c, 0x2, 0x728, 0x725, 0x3, 0x2, - 0x2, 0x2, 0x728, 0x726, 0x3, 0x2, 0x2, 0x2, 0x729, 0x72b, 0x3, 0x2, - 0x2, 0x2, 0x72a, 0x724, 0x3, 0x2, 0x2, 0x2, 0x72b, 0x72e, 0x3, 0x2, - 0x2, 0x2, 0x72c, 0x72a, 0x3, 0x2, 0x2, 0x2, 0x72c, 0x72d, 0x3, 0x2, - 0x2, 0x2, 0x72d, 0xbd, 0x3, 0x2, 0x2, 0x2, 0x72e, 0x72c, 0x3, 0x2, 0x2, - 0x2, 0x72f, 0x730, 0x5, 0xd6, 0x6c, 0x2, 0x730, 0x732, 0x7, 0xd0, 0x2, - 0x2, 0x731, 0x733, 0x5, 0xc2, 0x62, 0x2, 0x732, 0x731, 0x3, 0x2, 0x2, - 0x2, 0x732, 0x733, 0x3, 0x2, 0x2, 0x2, 0x733, 0x734, 0x3, 0x2, 0x2, - 0x2, 0x734, 0x735, 0x7, 0xda, 0x2, 0x2, 0x735, 0xbf, 0x3, 0x2, 0x2, - 0x2, 0x736, 0x737, 0x5, 0xc6, 0x64, 0x2, 0x737, 0x738, 0x7, 0xc8, 0x2, - 0x2, 0x738, 0x73a, 0x3, 0x2, 0x2, 0x2, 0x739, 0x736, 0x3, 0x2, 0x2, - 0x2, 0x739, 0x73a, 0x3, 0x2, 0x2, 0x2, 0x73a, 0x73b, 0x3, 0x2, 0x2, - 0x2, 0x73b, 0x73c, 0x5, 0xd6, 0x6c, 0x2, 0x73c, 0xc1, 0x3, 0x2, 0x2, - 0x2, 0x73d, 0x742, 0x5, 0xc4, 0x63, 0x2, 0x73e, 0x73f, 0x7, 0xc5, 0x2, - 0x2, 0x73f, 0x741, 0x5, 0xc4, 0x63, 0x2, 0x740, 0x73e, 0x3, 0x2, 0x2, - 0x2, 0x741, 0x744, 0x3, 0x2, 0x2, 0x2, 0x742, 0x740, 0x3, 0x2, 0x2, - 0x2, 0x742, 0x743, 0x3, 0x2, 0x2, 0x2, 0x743, 0xc3, 0x3, 0x2, 0x2, 0x2, - 0x744, 0x742, 0x3, 0x2, 0x2, 0x2, 0x745, 0x749, 0x5, 0xba, 0x5e, 0x2, - 0x746, 0x749, 0x5, 0xbe, 0x60, 0x2, 0x747, 0x749, 0x5, 0xcc, 0x67, 0x2, - 0x748, 0x745, 0x3, 0x2, 0x2, 0x2, 0x748, 0x746, 0x3, 0x2, 0x2, 0x2, - 0x748, 0x747, 0x3, 0x2, 0x2, 0x2, 0x749, 0xc5, 0x3, 0x2, 0x2, 0x2, 0x74a, - 0x74b, 0x5, 0xd6, 0x6c, 0x2, 0x74b, 0xc7, 0x3, 0x2, 0x2, 0x2, 0x74c, - 0x755, 0x7, 0xbb, 0x2, 0x2, 0x74d, 0x74e, 0x7, 0xc8, 0x2, 0x2, 0x74e, - 0x755, 0x9, 0x18, 0x2, 0x2, 0x74f, 0x750, 0x7, 0xbd, 0x2, 0x2, 0x750, - 0x752, 0x7, 0xc8, 0x2, 0x2, 0x751, 0x753, 0x9, 0x18, 0x2, 0x2, 0x752, - 0x751, 0x3, 0x2, 0x2, 0x2, 0x752, 0x753, 0x3, 0x2, 0x2, 0x2, 0x753, - 0x755, 0x3, 0x2, 0x2, 0x2, 0x754, 0x74c, 0x3, 0x2, 0x2, 0x2, 0x754, - 0x74d, 0x3, 0x2, 0x2, 0x2, 0x754, 0x74f, 0x3, 0x2, 0x2, 0x2, 0x755, - 0xc9, 0x3, 0x2, 0x2, 0x2, 0x756, 0x758, 0x9, 0x19, 0x2, 0x2, 0x757, - 0x756, 0x3, 0x2, 0x2, 0x2, 0x757, 0x758, 0x3, 0x2, 0x2, 0x2, 0x758, - 0x75f, 0x3, 0x2, 0x2, 0x2, 0x759, 0x760, 0x5, 0xc8, 0x65, 0x2, 0x75a, - 0x760, 0x7, 0xbc, 0x2, 0x2, 0x75b, 0x760, 0x7, 0xbd, 0x2, 0x2, 0x75c, - 0x760, 0x7, 0xbe, 0x2, 0x2, 0x75d, 0x760, 0x7, 0x51, 0x2, 0x2, 0x75e, - 0x760, 0x7, 0x70, 0x2, 0x2, 0x75f, 0x759, 0x3, 0x2, 0x2, 0x2, 0x75f, - 0x75a, 0x3, 0x2, 0x2, 0x2, 0x75f, 0x75b, 0x3, 0x2, 0x2, 0x2, 0x75f, - 0x75c, 0x3, 0x2, 0x2, 0x2, 0x75f, 0x75d, 0x3, 0x2, 0x2, 0x2, 0x75f, - 0x75e, 0x3, 0x2, 0x2, 0x2, 0x760, 0xcb, 0x3, 0x2, 0x2, 0x2, 0x761, 0x765, - 0x5, 0xca, 0x66, 0x2, 0x762, 0x765, 0x7, 0xbf, 0x2, 0x2, 0x763, 0x765, - 0x7, 0x73, 0x2, 0x2, 0x764, 0x761, 0x3, 0x2, 0x2, 0x2, 0x764, 0x762, - 0x3, 0x2, 0x2, 0x2, 0x764, 0x763, 0x3, 0x2, 0x2, 0x2, 0x765, 0xcd, 0x3, - 0x2, 0x2, 0x2, 0x766, 0x767, 0x9, 0x1a, 0x2, 0x2, 0x767, 0xcf, 0x3, - 0x2, 0x2, 0x2, 0x768, 0x769, 0x9, 0x1b, 0x2, 0x2, 0x769, 0xd1, 0x3, - 0x2, 0x2, 0x2, 0x76a, 0x76b, 0x9, 0x1c, 0x2, 0x2, 0x76b, 0xd3, 0x3, - 0x2, 0x2, 0x2, 0x76c, 0x76f, 0x7, 0xba, 0x2, 0x2, 0x76d, 0x76f, 0x5, - 0xd2, 0x6a, 0x2, 0x76e, 0x76c, 0x3, 0x2, 0x2, 0x2, 0x76e, 0x76d, 0x3, - 0x2, 0x2, 0x2, 0x76f, 0xd5, 0x3, 0x2, 0x2, 0x2, 0x770, 0x774, 0x7, 0xba, - 0x2, 0x2, 0x771, 0x774, 0x5, 0xce, 0x68, 0x2, 0x772, 0x774, 0x5, 0xd0, - 0x69, 0x2, 0x773, 0x770, 0x3, 0x2, 0x2, 0x2, 0x773, 0x771, 0x3, 0x2, - 0x2, 0x2, 0x773, 0x772, 0x3, 0x2, 0x2, 0x2, 0x774, 0xd7, 0x3, 0x2, 0x2, - 0x2, 0x775, 0x778, 0x5, 0xd6, 0x6c, 0x2, 0x776, 0x778, 0x7, 0x73, 0x2, - 0x2, 0x777, 0x775, 0x3, 0x2, 0x2, 0x2, 0x777, 0x776, 0x3, 0x2, 0x2, - 0x2, 0x778, 0xd9, 0x3, 0x2, 0x2, 0x2, 0x779, 0x77a, 0x7, 0xbf, 0x2, - 0x2, 0x77a, 0x77b, 0x7, 0xca, 0x2, 0x2, 0x77b, 0x77c, 0x5, 0xca, 0x66, - 0x2, 0x77c, 0xdb, 0x3, 0x2, 0x2, 0x2, 0x103, 0xe0, 0xe4, 0xe7, 0xea, - 0xfe, 0x104, 0x10b, 0x113, 0x118, 0x11f, 0x124, 0x12b, 0x130, 0x136, - 0x13c, 0x141, 0x147, 0x14c, 0x152, 0x157, 0x15d, 0x16b, 0x172, 0x179, - 0x180, 0x186, 0x18b, 0x191, 0x196, 0x19c, 0x1a5, 0x1af, 0x1b9, 0x1cd, - 0x1d5, 0x1e4, 0x1eb, 0x1f9, 0x1ff, 0x205, 0x20c, 0x210, 0x213, 0x21a, - 0x21e, 0x221, 0x22c, 0x230, 0x233, 0x238, 0x23a, 0x23d, 0x240, 0x24a, - 0x24e, 0x251, 0x254, 0x259, 0x25b, 0x261, 0x267, 0x26b, 0x26e, 0x271, - 0x274, 0x277, 0x27c, 0x282, 0x286, 0x289, 0x28c, 0x290, 0x298, 0x2b2, - 0x2b4, 0x2b8, 0x2ce, 0x2d0, 0x2db, 0x2de, 0x2e7, 0x2f8, 0x303, 0x315, - 0x322, 0x333, 0x33c, 0x357, 0x359, 0x36e, 0x373, 0x378, 0x37b, 0x387, - 0x38c, 0x390, 0x393, 0x397, 0x39b, 0x3a0, 0x3a3, 0x3a7, 0x3a9, 0x3bf, - 0x3c7, 0x3ca, 0x3d4, 0x3d8, 0x3e0, 0x3e4, 0x3e9, 0x3ed, 0x3f1, 0x3f5, - 0x3f9, 0x3fb, 0x403, 0x407, 0x40a, 0x412, 0x417, 0x41c, 0x41f, 0x429, - 0x433, 0x437, 0x43c, 0x440, 0x446, 0x449, 0x44c, 0x44f, 0x45d, 0x461, - 0x465, 0x46a, 0x46d, 0x477, 0x47f, 0x482, 0x486, 0x489, 0x48d, 0x490, - 0x493, 0x496, 0x499, 0x49d, 0x4a1, 0x4a4, 0x4a7, 0x4aa, 0x4ad, 0x4b0, - 0x4b9, 0x4bf, 0x4d3, 0x4e9, 0x4f1, 0x4f4, 0x4fa, 0x502, 0x505, 0x50b, - 0x50d, 0x511, 0x516, 0x519, 0x51c, 0x520, 0x524, 0x527, 0x529, 0x52c, - 0x530, 0x534, 0x537, 0x539, 0x53b, 0x53e, 0x543, 0x54e, 0x554, 0x559, - 0x560, 0x565, 0x569, 0x56d, 0x572, 0x579, 0x58e, 0x591, 0x59a, 0x59e, - 0x5a3, 0x5a8, 0x5ab, 0x5ad, 0x5c3, 0x5c6, 0x5d1, 0x5d5, 0x5d8, 0x5dc, - 0x5e0, 0x5e8, 0x5ec, 0x5f9, 0x605, 0x611, 0x619, 0x61d, 0x624, 0x62a, - 0x632, 0x637, 0x640, 0x644, 0x663, 0x674, 0x677, 0x67b, 0x67e, 0x68a, - 0x69b, 0x69f, 0x6b0, 0x6b3, 0x6b7, 0x6ba, 0x6c5, 0x6dd, 0x6e4, 0x6e6, - 0x6e8, 0x6f0, 0x6f5, 0x6fd, 0x707, 0x70a, 0x712, 0x719, 0x722, 0x728, - 0x72c, 0x732, 0x739, 0x742, 0x748, 0x752, 0x754, 0x757, 0x75f, 0x764, - 0x76e, 0x773, 0x777, - }; - - atn::ATNDeserializer deserializer; - _atn = deserializer.deserialize(_serializedATN); - - size_t count = _atn.getNumberOfDecisions(); - _decisionToDFA.reserve(count); - for (size_t i = 0; i < count; i++) { - _decisionToDFA.emplace_back(_atn.getDecisionState(i), i); - } -} - -ClickHouseParser::Initializer ClickHouseParser::_init; diff --git a/src/Parsers/New/ClickHouseParser.h b/src/Parsers/New/ClickHouseParser.h deleted file mode 100644 index c860932ba1c..00000000000 --- a/src/Parsers/New/ClickHouseParser.h +++ /dev/null @@ -1,3435 +0,0 @@ - -// Generated from ClickHouseParser.g4 by ANTLR 4.7.2 - -#pragma once - - -#include "antlr4-runtime.h" - - -namespace DB { - - -class ClickHouseParser : public antlr4::Parser { -public: - enum { - ADD = 1, AFTER = 2, ALIAS = 3, ALL = 4, ALTER = 5, AND = 6, ANTI = 7, - ANY = 8, ARRAY = 9, AS = 10, ASCENDING = 11, ASOF = 12, AST = 13, ASYNC = 14, - ATTACH = 15, BETWEEN = 16, BOTH = 17, BY = 18, CASE = 19, CAST = 20, - CHECK = 21, CLEAR = 22, CLUSTER = 23, CODEC = 24, COLLATE = 25, COLUMN = 26, - COMMENT = 27, CONSTRAINT = 28, CREATE = 29, CROSS = 30, CUBE = 31, DATABASE = 32, - DATABASES = 33, DATE = 34, DAY = 35, DEDUPLICATE = 36, DEFAULT = 37, - DELAY = 38, DELETE = 39, DESC = 40, DESCENDING = 41, DESCRIBE = 42, - DETACH = 43, DICTIONARIES = 44, DICTIONARY = 45, DISK = 46, DISTINCT = 47, - DISTRIBUTED = 48, DROP = 49, ELSE = 50, END = 51, ENGINE = 52, EVENTS = 53, - EXISTS = 54, EXPLAIN = 55, EXPRESSION = 56, EXTRACT = 57, FETCHES = 58, - FINAL = 59, FIRST = 60, FLUSH = 61, FOR = 62, FORMAT = 63, FREEZE = 64, - FROM = 65, FULL = 66, FUNCTION = 67, GLOBAL = 68, GRANULARITY = 69, - GROUP = 70, HAVING = 71, HIERARCHICAL = 72, HOUR = 73, ID = 74, IF = 75, - ILIKE = 76, IN = 77, INDEX = 78, INF = 79, INJECTIVE = 80, INNER = 81, - INSERT = 82, INTERVAL = 83, INTO = 84, IS = 85, IS_OBJECT_ID = 86, JOIN = 87, - KEY = 88, KILL = 89, LAST = 90, LAYOUT = 91, LEADING = 92, LEFT = 93, - LIFETIME = 94, LIKE = 95, LIMIT = 96, LIVE = 97, LOCAL = 98, LOGS = 99, - MATERIALIZE = 100, MATERIALIZED = 101, MAX = 102, MERGES = 103, MIN = 104, - MINUTE = 105, MODIFY = 106, MONTH = 107, MOVE = 108, MUTATION = 109, - NAN_SQL = 110, NO = 111, NOT = 112, NULL_SQL = 113, NULLS = 114, OFFSET = 115, - ON = 116, OPTIMIZE = 117, OR = 118, ORDER = 119, OUTER = 120, OUTFILE = 121, - PARTITION = 122, POPULATE = 123, PREWHERE = 124, PRIMARY = 125, PROJECTION = 126, - QUARTER = 127, RANGE = 128, RELOAD = 129, REMOVE = 130, RENAME = 131, - REPLACE = 132, REPLICA = 133, REPLICATED = 134, RIGHT = 135, ROLLUP = 136, - SAMPLE = 137, SECOND = 138, SELECT = 139, SEMI = 140, SENDS = 141, SET = 142, - SETTINGS = 143, SHOW = 144, SOURCE = 145, START = 146, STOP = 147, SUBSTRING = 148, - SYNC = 149, SYNTAX = 150, SYSTEM = 151, TABLE = 152, TABLES = 153, TEMPORARY = 154, - TEST = 155, THEN = 156, TIES = 157, TIMEOUT = 158, TIMESTAMP = 159, - TO = 160, TOP = 161, TOTALS = 162, TRAILING = 163, TRIM = 164, TRUNCATE = 165, - TTL = 166, TYPE = 167, UNION = 168, UPDATE = 169, USE = 170, USING = 171, - UUID = 172, VALUES = 173, VIEW = 174, VOLUME = 175, WATCH = 176, WEEK = 177, - WHEN = 178, WHERE = 179, WITH = 180, YEAR = 181, JSON_FALSE = 182, JSON_TRUE = 183, - IDENTIFIER = 184, FLOATING_LITERAL = 185, OCTAL_LITERAL = 186, DECIMAL_LITERAL = 187, - HEXADECIMAL_LITERAL = 188, STRING_LITERAL = 189, ARROW = 190, ASTERISK = 191, - BACKQUOTE = 192, BACKSLASH = 193, COLON = 194, COMMA = 195, CONCAT = 196, - DASH = 197, DOT = 198, EQ_DOUBLE = 199, EQ_SINGLE = 200, GE = 201, GT = 202, - LBRACE = 203, LBRACKET = 204, LE = 205, LPAREN = 206, LT = 207, NOT_EQ = 208, - PERCENT = 209, PLUS = 210, QUERY = 211, QUOTE_DOUBLE = 212, QUOTE_SINGLE = 213, - RBRACE = 214, RBRACKET = 215, RPAREN = 216, SEMICOLON = 217, SLASH = 218, - UNDERSCORE = 219, MULTI_LINE_COMMENT = 220, SINGLE_LINE_COMMENT = 221, - WHITESPACE = 222 - }; - - enum { - RuleQueryStmt = 0, RuleQuery = 1, RuleAlterStmt = 2, RuleAlterTableClause = 3, - RuleAssignmentExprList = 4, RuleAssignmentExpr = 5, RuleTableColumnPropertyType = 6, - RulePartitionClause = 7, RuleAttachStmt = 8, RuleCheckStmt = 9, RuleCreateStmt = 10, - RuleDictionarySchemaClause = 11, RuleDictionaryAttrDfnt = 12, RuleDictionaryEngineClause = 13, - RuleDictionaryPrimaryKeyClause = 14, RuleDictionaryArgExpr = 15, RuleSourceClause = 16, - RuleLifetimeClause = 17, RuleLayoutClause = 18, RuleRangeClause = 19, - RuleDictionarySettingsClause = 20, RuleClusterClause = 21, RuleUuidClause = 22, - RuleDestinationClause = 23, RuleSubqueryClause = 24, RuleTableSchemaClause = 25, - RuleEngineClause = 26, RulePartitionByClause = 27, RulePrimaryKeyClause = 28, - RuleSampleByClause = 29, RuleTtlClause = 30, RuleEngineExpr = 31, RuleTableElementExpr = 32, - RuleTableColumnDfnt = 33, RuleTableColumnPropertyExpr = 34, RuleTableIndexDfnt = 35, - RuleTableProjectionDfnt = 36, RuleCodecExpr = 37, RuleCodecArgExpr = 38, - RuleTtlExpr = 39, RuleDescribeStmt = 40, RuleDropStmt = 41, RuleExistsStmt = 42, - RuleExplainStmt = 43, RuleInsertStmt = 44, RuleColumnsClause = 45, RuleDataClause = 46, - RuleKillStmt = 47, RuleOptimizeStmt = 48, RuleRenameStmt = 49, RuleProjectionSelectStmt = 50, - RuleSelectUnionStmt = 51, RuleSelectStmtWithParens = 52, RuleSelectStmt = 53, - RuleWithClause = 54, RuleTopClause = 55, RuleFromClause = 56, RuleArrayJoinClause = 57, - RulePrewhereClause = 58, RuleWhereClause = 59, RuleGroupByClause = 60, - RuleHavingClause = 61, RuleOrderByClause = 62, RuleProjectionOrderByClause = 63, - RuleLimitByClause = 64, RuleLimitClause = 65, RuleSettingsClause = 66, - RuleJoinExpr = 67, RuleJoinOp = 68, RuleJoinOpCross = 69, RuleJoinConstraintClause = 70, - RuleSampleClause = 71, RuleLimitExpr = 72, RuleOrderExprList = 73, RuleOrderExpr = 74, - RuleRatioExpr = 75, RuleSettingExprList = 76, RuleSettingExpr = 77, - RuleSetStmt = 78, RuleShowStmt = 79, RuleSystemStmt = 80, RuleTruncateStmt = 81, - RuleUseStmt = 82, RuleWatchStmt = 83, RuleColumnTypeExpr = 84, RuleColumnExprList = 85, - RuleColumnsExpr = 86, RuleColumnExpr = 87, RuleColumnArgList = 88, RuleColumnArgExpr = 89, - RuleColumnLambdaExpr = 90, RuleColumnIdentifier = 91, RuleNestedIdentifier = 92, - RuleTableExpr = 93, RuleTableFunctionExpr = 94, RuleTableIdentifier = 95, - RuleTableArgList = 96, RuleTableArgExpr = 97, RuleDatabaseIdentifier = 98, - RuleFloatingLiteral = 99, RuleNumberLiteral = 100, RuleLiteral = 101, - RuleInterval = 102, RuleKeyword = 103, RuleKeywordForAlias = 104, RuleAlias = 105, - RuleIdentifier = 106, RuleIdentifierOrNull = 107, RuleEnumValue = 108 - }; - - ClickHouseParser(antlr4::TokenStream *input); - ~ClickHouseParser(); - - virtual std::string getGrammarFileName() const override; - virtual const antlr4::atn::ATN& getATN() const override { return _atn; }; - virtual const std::vector& getTokenNames() const override { return _tokenNames; }; // deprecated: use vocabulary instead. - virtual const std::vector& getRuleNames() const override; - virtual antlr4::dfa::Vocabulary& getVocabulary() const override; - - - class QueryStmtContext; - class QueryContext; - class AlterStmtContext; - class AlterTableClauseContext; - class AssignmentExprListContext; - class AssignmentExprContext; - class TableColumnPropertyTypeContext; - class PartitionClauseContext; - class AttachStmtContext; - class CheckStmtContext; - class CreateStmtContext; - class DictionarySchemaClauseContext; - class DictionaryAttrDfntContext; - class DictionaryEngineClauseContext; - class DictionaryPrimaryKeyClauseContext; - class DictionaryArgExprContext; - class SourceClauseContext; - class LifetimeClauseContext; - class LayoutClauseContext; - class RangeClauseContext; - class DictionarySettingsClauseContext; - class ClusterClauseContext; - class UuidClauseContext; - class DestinationClauseContext; - class SubqueryClauseContext; - class TableSchemaClauseContext; - class EngineClauseContext; - class PartitionByClauseContext; - class PrimaryKeyClauseContext; - class SampleByClauseContext; - class TtlClauseContext; - class EngineExprContext; - class TableElementExprContext; - class TableColumnDfntContext; - class TableColumnPropertyExprContext; - class TableIndexDfntContext; - class TableProjectionDfntContext; - class CodecExprContext; - class CodecArgExprContext; - class TtlExprContext; - class DescribeStmtContext; - class DropStmtContext; - class ExistsStmtContext; - class ExplainStmtContext; - class InsertStmtContext; - class ColumnsClauseContext; - class DataClauseContext; - class KillStmtContext; - class OptimizeStmtContext; - class RenameStmtContext; - class ProjectionSelectStmtContext; - class SelectUnionStmtContext; - class SelectStmtWithParensContext; - class SelectStmtContext; - class WithClauseContext; - class TopClauseContext; - class FromClauseContext; - class ArrayJoinClauseContext; - class PrewhereClauseContext; - class WhereClauseContext; - class GroupByClauseContext; - class HavingClauseContext; - class OrderByClauseContext; - class ProjectionOrderByClauseContext; - class LimitByClauseContext; - class LimitClauseContext; - class SettingsClauseContext; - class JoinExprContext; - class JoinOpContext; - class JoinOpCrossContext; - class JoinConstraintClauseContext; - class SampleClauseContext; - class LimitExprContext; - class OrderExprListContext; - class OrderExprContext; - class RatioExprContext; - class SettingExprListContext; - class SettingExprContext; - class SetStmtContext; - class ShowStmtContext; - class SystemStmtContext; - class TruncateStmtContext; - class UseStmtContext; - class WatchStmtContext; - class ColumnTypeExprContext; - class ColumnExprListContext; - class ColumnsExprContext; - class ColumnExprContext; - class ColumnArgListContext; - class ColumnArgExprContext; - class ColumnLambdaExprContext; - class ColumnIdentifierContext; - class NestedIdentifierContext; - class TableExprContext; - class TableFunctionExprContext; - class TableIdentifierContext; - class TableArgListContext; - class TableArgExprContext; - class DatabaseIdentifierContext; - class FloatingLiteralContext; - class NumberLiteralContext; - class LiteralContext; - class IntervalContext; - class KeywordContext; - class KeywordForAliasContext; - class AliasContext; - class IdentifierContext; - class IdentifierOrNullContext; - class EnumValueContext; - - class QueryStmtContext : public antlr4::ParserRuleContext { - public: - QueryStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - QueryContext *query(); - antlr4::tree::TerminalNode *INTO(); - antlr4::tree::TerminalNode *OUTFILE(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *FORMAT(); - IdentifierOrNullContext *identifierOrNull(); - antlr4::tree::TerminalNode *SEMICOLON(); - InsertStmtContext *insertStmt(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - QueryStmtContext* queryStmt(); - - class QueryContext : public antlr4::ParserRuleContext { - public: - QueryContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - AlterStmtContext *alterStmt(); - AttachStmtContext *attachStmt(); - CheckStmtContext *checkStmt(); - CreateStmtContext *createStmt(); - DescribeStmtContext *describeStmt(); - DropStmtContext *dropStmt(); - ExistsStmtContext *existsStmt(); - ExplainStmtContext *explainStmt(); - KillStmtContext *killStmt(); - OptimizeStmtContext *optimizeStmt(); - RenameStmtContext *renameStmt(); - SelectUnionStmtContext *selectUnionStmt(); - SetStmtContext *setStmt(); - ShowStmtContext *showStmt(); - SystemStmtContext *systemStmt(); - TruncateStmtContext *truncateStmt(); - UseStmtContext *useStmt(); - WatchStmtContext *watchStmt(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - QueryContext* query(); - - class AlterStmtContext : public antlr4::ParserRuleContext { - public: - AlterStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - AlterStmtContext() = default; - void copyFrom(AlterStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class AlterTableStmtContext : public AlterStmtContext { - public: - AlterTableStmtContext(AlterStmtContext *ctx); - - antlr4::tree::TerminalNode *ALTER(); - antlr4::tree::TerminalNode *TABLE(); - TableIdentifierContext *tableIdentifier(); - std::vector alterTableClause(); - AlterTableClauseContext* alterTableClause(size_t i); - ClusterClauseContext *clusterClause(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - AlterStmtContext* alterStmt(); - - class AlterTableClauseContext : public antlr4::ParserRuleContext { - public: - AlterTableClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - AlterTableClauseContext() = default; - void copyFrom(AlterTableClauseContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class AlterTableClauseReplaceContext : public AlterTableClauseContext { - public: - AlterTableClauseReplaceContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *REPLACE(); - PartitionClauseContext *partitionClause(); - antlr4::tree::TerminalNode *FROM(); - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyOrderByContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyOrderByContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *ORDER(); - antlr4::tree::TerminalNode *BY(); - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseUpdateContext : public AlterTableClauseContext { - public: - AlterTableClauseUpdateContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *UPDATE(); - AssignmentExprListContext *assignmentExprList(); - WhereClauseContext *whereClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseClearProjectionContext : public AlterTableClauseContext { - public: - AlterTableClauseClearProjectionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *CLEAR(); - antlr4::tree::TerminalNode *PROJECTION(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *IN(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyRemoveContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyRemoveContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *REMOVE(); - TableColumnPropertyTypeContext *tableColumnPropertyType(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDeleteContext : public AlterTableClauseContext { - public: - AlterTableClauseDeleteContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DELETE(); - antlr4::tree::TerminalNode *WHERE(); - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseCommentContext : public AlterTableClauseContext { - public: - AlterTableClauseCommentContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *COMMENT(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDropColumnContext : public AlterTableClauseContext { - public: - AlterTableClauseDropColumnContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDetachContext : public AlterTableClauseContext { - public: - AlterTableClauseDetachContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DETACH(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseAddIndexContext : public AlterTableClauseContext { - public: - AlterTableClauseAddIndexContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *ADD(); - antlr4::tree::TerminalNode *INDEX(); - TableIndexDfntContext *tableIndexDfnt(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *AFTER(); - NestedIdentifierContext *nestedIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDropPartitionContext : public AlterTableClauseContext { - public: - AlterTableClauseDropPartitionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DROP(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseMaterializeIndexContext : public AlterTableClauseContext { - public: - AlterTableClauseMaterializeIndexContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MATERIALIZE(); - antlr4::tree::TerminalNode *INDEX(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *IN(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseMaterializeProjectionContext : public AlterTableClauseContext { - public: - AlterTableClauseMaterializeProjectionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MATERIALIZE(); - antlr4::tree::TerminalNode *PROJECTION(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *IN(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseMovePartitionContext : public AlterTableClauseContext { - public: - AlterTableClauseMovePartitionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MOVE(); - PartitionClauseContext *partitionClause(); - antlr4::tree::TerminalNode *TO(); - antlr4::tree::TerminalNode *DISK(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *VOLUME(); - antlr4::tree::TerminalNode *TABLE(); - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseRenameContext : public AlterTableClauseContext { - public: - AlterTableClauseRenameContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *RENAME(); - antlr4::tree::TerminalNode *COLUMN(); - std::vector nestedIdentifier(); - NestedIdentifierContext* nestedIdentifier(size_t i); - antlr4::tree::TerminalNode *TO(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseFreezePartitionContext : public AlterTableClauseContext { - public: - AlterTableClauseFreezePartitionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *FREEZE(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseClearColumnContext : public AlterTableClauseContext { - public: - AlterTableClauseClearColumnContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *CLEAR(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *IN(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *COLUMN(); - TableColumnDfntContext *tableColumnDfnt(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseClearIndexContext : public AlterTableClauseContext { - public: - AlterTableClauseClearIndexContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *CLEAR(); - antlr4::tree::TerminalNode *INDEX(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *IN(); - PartitionClauseContext *partitionClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseRemoveTTLContext : public AlterTableClauseContext { - public: - AlterTableClauseRemoveTTLContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *REMOVE(); - antlr4::tree::TerminalNode *TTL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyCodecContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyCodecContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - CodecExprContext *codecExpr(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseAttachContext : public AlterTableClauseContext { - public: - AlterTableClauseAttachContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *ATTACH(); - PartitionClauseContext *partitionClause(); - antlr4::tree::TerminalNode *FROM(); - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDropProjectionContext : public AlterTableClauseContext { - public: - AlterTableClauseDropProjectionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *PROJECTION(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseDropIndexContext : public AlterTableClauseContext { - public: - AlterTableClauseDropIndexContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *INDEX(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyCommentContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyCommentContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *COLUMN(); - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *COMMENT(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseModifyTTLContext : public AlterTableClauseContext { - public: - AlterTableClauseModifyTTLContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *MODIFY(); - TtlClauseContext *ttlClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseAddProjectionContext : public AlterTableClauseContext { - public: - AlterTableClauseAddProjectionContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *ADD(); - antlr4::tree::TerminalNode *PROJECTION(); - TableProjectionDfntContext *tableProjectionDfnt(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *AFTER(); - NestedIdentifierContext *nestedIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class AlterTableClauseAddColumnContext : public AlterTableClauseContext { - public: - AlterTableClauseAddColumnContext(AlterTableClauseContext *ctx); - - antlr4::tree::TerminalNode *ADD(); - antlr4::tree::TerminalNode *COLUMN(); - TableColumnDfntContext *tableColumnDfnt(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *AFTER(); - NestedIdentifierContext *nestedIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - AlterTableClauseContext* alterTableClause(); - - class AssignmentExprListContext : public antlr4::ParserRuleContext { - public: - AssignmentExprListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector assignmentExpr(); - AssignmentExprContext* assignmentExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - AssignmentExprListContext* assignmentExprList(); - - class AssignmentExprContext : public antlr4::ParserRuleContext { - public: - AssignmentExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - antlr4::tree::TerminalNode *EQ_SINGLE(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - AssignmentExprContext* assignmentExpr(); - - class TableColumnPropertyTypeContext : public antlr4::ParserRuleContext { - public: - TableColumnPropertyTypeContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ALIAS(); - antlr4::tree::TerminalNode *CODEC(); - antlr4::tree::TerminalNode *COMMENT(); - antlr4::tree::TerminalNode *DEFAULT(); - antlr4::tree::TerminalNode *MATERIALIZED(); - antlr4::tree::TerminalNode *TTL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableColumnPropertyTypeContext* tableColumnPropertyType(); - - class PartitionClauseContext : public antlr4::ParserRuleContext { - public: - PartitionClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *PARTITION(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *ID(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - PartitionClauseContext* partitionClause(); - - class AttachStmtContext : public antlr4::ParserRuleContext { - public: - AttachStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - AttachStmtContext() = default; - void copyFrom(AttachStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class AttachDictionaryStmtContext : public AttachStmtContext { - public: - AttachDictionaryStmtContext(AttachStmtContext *ctx); - - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *DICTIONARY(); - TableIdentifierContext *tableIdentifier(); - ClusterClauseContext *clusterClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - AttachStmtContext* attachStmt(); - - class CheckStmtContext : public antlr4::ParserRuleContext { - public: - CheckStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *CHECK(); - antlr4::tree::TerminalNode *TABLE(); - TableIdentifierContext *tableIdentifier(); - PartitionClauseContext *partitionClause(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - CheckStmtContext* checkStmt(); - - class CreateStmtContext : public antlr4::ParserRuleContext { - public: - CreateStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - CreateStmtContext() = default; - void copyFrom(CreateStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class CreateViewStmtContext : public CreateStmtContext { - public: - CreateViewStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *VIEW(); - TableIdentifierContext *tableIdentifier(); - SubqueryClauseContext *subqueryClause(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *OR(); - antlr4::tree::TerminalNode *REPLACE(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - UuidClauseContext *uuidClause(); - ClusterClauseContext *clusterClause(); - TableSchemaClauseContext *tableSchemaClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class CreateDictionaryStmtContext : public CreateStmtContext { - public: - CreateDictionaryStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *DICTIONARY(); - TableIdentifierContext *tableIdentifier(); - DictionarySchemaClauseContext *dictionarySchemaClause(); - DictionaryEngineClauseContext *dictionaryEngineClause(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - UuidClauseContext *uuidClause(); - ClusterClauseContext *clusterClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class CreateDatabaseStmtContext : public CreateStmtContext { - public: - CreateDatabaseStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *DATABASE(); - DatabaseIdentifierContext *databaseIdentifier(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - ClusterClauseContext *clusterClause(); - EngineExprContext *engineExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class CreateLiveViewStmtContext : public CreateStmtContext { - public: - CreateLiveViewStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *LIVE(); - antlr4::tree::TerminalNode *VIEW(); - TableIdentifierContext *tableIdentifier(); - SubqueryClauseContext *subqueryClause(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - UuidClauseContext *uuidClause(); - ClusterClauseContext *clusterClause(); - antlr4::tree::TerminalNode *WITH(); - antlr4::tree::TerminalNode *TIMEOUT(); - DestinationClauseContext *destinationClause(); - TableSchemaClauseContext *tableSchemaClause(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class CreateMaterializedViewStmtContext : public CreateStmtContext { - public: - CreateMaterializedViewStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *MATERIALIZED(); - antlr4::tree::TerminalNode *VIEW(); - TableIdentifierContext *tableIdentifier(); - SubqueryClauseContext *subqueryClause(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - DestinationClauseContext *destinationClause(); - EngineClauseContext *engineClause(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - UuidClauseContext *uuidClause(); - ClusterClauseContext *clusterClause(); - TableSchemaClauseContext *tableSchemaClause(); - antlr4::tree::TerminalNode *POPULATE(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class CreateTableStmtContext : public CreateStmtContext { - public: - CreateTableStmtContext(CreateStmtContext *ctx); - - antlr4::tree::TerminalNode *TABLE(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *TEMPORARY(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *EXISTS(); - UuidClauseContext *uuidClause(); - ClusterClauseContext *clusterClause(); - TableSchemaClauseContext *tableSchemaClause(); - EngineClauseContext *engineClause(); - SubqueryClauseContext *subqueryClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - CreateStmtContext* createStmt(); - - class DictionarySchemaClauseContext : public antlr4::ParserRuleContext { - public: - DictionarySchemaClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LPAREN(); - std::vector dictionaryAttrDfnt(); - DictionaryAttrDfntContext* dictionaryAttrDfnt(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionarySchemaClauseContext* dictionarySchemaClause(); - - class DictionaryAttrDfntContext : public antlr4::ParserRuleContext { - public: - std::set attrs; - DictionaryAttrDfntContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - ColumnTypeExprContext *columnTypeExpr(); - std::vector DEFAULT(); - antlr4::tree::TerminalNode* DEFAULT(size_t i); - std::vector literal(); - LiteralContext* literal(size_t i); - std::vector EXPRESSION(); - antlr4::tree::TerminalNode* EXPRESSION(size_t i); - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - std::vector HIERARCHICAL(); - antlr4::tree::TerminalNode* HIERARCHICAL(size_t i); - std::vector INJECTIVE(); - antlr4::tree::TerminalNode* INJECTIVE(size_t i); - std::vector IS_OBJECT_ID(); - antlr4::tree::TerminalNode* IS_OBJECT_ID(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionaryAttrDfntContext* dictionaryAttrDfnt(); - - class DictionaryEngineClauseContext : public antlr4::ParserRuleContext { - public: - std::set clauses; - DictionaryEngineClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - DictionaryPrimaryKeyClauseContext *dictionaryPrimaryKeyClause(); - std::vector sourceClause(); - SourceClauseContext* sourceClause(size_t i); - std::vector lifetimeClause(); - LifetimeClauseContext* lifetimeClause(size_t i); - std::vector layoutClause(); - LayoutClauseContext* layoutClause(size_t i); - std::vector rangeClause(); - RangeClauseContext* rangeClause(size_t i); - std::vector dictionarySettingsClause(); - DictionarySettingsClauseContext* dictionarySettingsClause(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionaryEngineClauseContext* dictionaryEngineClause(); - - class DictionaryPrimaryKeyClauseContext : public antlr4::ParserRuleContext { - public: - DictionaryPrimaryKeyClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *PRIMARY(); - antlr4::tree::TerminalNode *KEY(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionaryPrimaryKeyClauseContext* dictionaryPrimaryKeyClause(); - - class DictionaryArgExprContext : public antlr4::ParserRuleContext { - public: - DictionaryArgExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector identifier(); - IdentifierContext* identifier(size_t i); - LiteralContext *literal(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionaryArgExprContext* dictionaryArgExpr(); - - class SourceClauseContext : public antlr4::ParserRuleContext { - public: - SourceClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SOURCE(); - std::vector LPAREN(); - antlr4::tree::TerminalNode* LPAREN(size_t i); - IdentifierContext *identifier(); - std::vector RPAREN(); - antlr4::tree::TerminalNode* RPAREN(size_t i); - std::vector dictionaryArgExpr(); - DictionaryArgExprContext* dictionaryArgExpr(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SourceClauseContext* sourceClause(); - - class LifetimeClauseContext : public antlr4::ParserRuleContext { - public: - LifetimeClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LIFETIME(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - std::vector DECIMAL_LITERAL(); - antlr4::tree::TerminalNode* DECIMAL_LITERAL(size_t i); - antlr4::tree::TerminalNode *MIN(); - antlr4::tree::TerminalNode *MAX(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LifetimeClauseContext* lifetimeClause(); - - class LayoutClauseContext : public antlr4::ParserRuleContext { - public: - LayoutClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LAYOUT(); - std::vector LPAREN(); - antlr4::tree::TerminalNode* LPAREN(size_t i); - IdentifierContext *identifier(); - std::vector RPAREN(); - antlr4::tree::TerminalNode* RPAREN(size_t i); - std::vector dictionaryArgExpr(); - DictionaryArgExprContext* dictionaryArgExpr(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LayoutClauseContext* layoutClause(); - - class RangeClauseContext : public antlr4::ParserRuleContext { - public: - RangeClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *RANGE(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - antlr4::tree::TerminalNode *MIN(); - std::vector identifier(); - IdentifierContext* identifier(size_t i); - antlr4::tree::TerminalNode *MAX(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - RangeClauseContext* rangeClause(); - - class DictionarySettingsClauseContext : public antlr4::ParserRuleContext { - public: - DictionarySettingsClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SETTINGS(); - antlr4::tree::TerminalNode *LPAREN(); - SettingExprListContext *settingExprList(); - antlr4::tree::TerminalNode *RPAREN(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DictionarySettingsClauseContext* dictionarySettingsClause(); - - class ClusterClauseContext : public antlr4::ParserRuleContext { - public: - ClusterClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ON(); - antlr4::tree::TerminalNode *CLUSTER(); - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ClusterClauseContext* clusterClause(); - - class UuidClauseContext : public antlr4::ParserRuleContext { - public: - UuidClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *UUID(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - UuidClauseContext* uuidClause(); - - class DestinationClauseContext : public antlr4::ParserRuleContext { - public: - DestinationClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *TO(); - TableIdentifierContext *tableIdentifier(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DestinationClauseContext* destinationClause(); - - class SubqueryClauseContext : public antlr4::ParserRuleContext { - public: - SubqueryClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *AS(); - SelectUnionStmtContext *selectUnionStmt(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SubqueryClauseContext* subqueryClause(); - - class TableSchemaClauseContext : public antlr4::ParserRuleContext { - public: - TableSchemaClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - TableSchemaClauseContext() = default; - void copyFrom(TableSchemaClauseContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class SchemaAsTableClauseContext : public TableSchemaClauseContext { - public: - SchemaAsTableClauseContext(TableSchemaClauseContext *ctx); - - antlr4::tree::TerminalNode *AS(); - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class SchemaAsFunctionClauseContext : public TableSchemaClauseContext { - public: - SchemaAsFunctionClauseContext(TableSchemaClauseContext *ctx); - - antlr4::tree::TerminalNode *AS(); - TableFunctionExprContext *tableFunctionExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class SchemaDescriptionClauseContext : public TableSchemaClauseContext { - public: - SchemaDescriptionClauseContext(TableSchemaClauseContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - std::vector tableElementExpr(); - TableElementExprContext* tableElementExpr(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - TableSchemaClauseContext* tableSchemaClause(); - - class EngineClauseContext : public antlr4::ParserRuleContext { - public: - std::set clauses; - EngineClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - EngineExprContext *engineExpr(); - std::vector orderByClause(); - OrderByClauseContext* orderByClause(size_t i); - std::vector partitionByClause(); - PartitionByClauseContext* partitionByClause(size_t i); - std::vector primaryKeyClause(); - PrimaryKeyClauseContext* primaryKeyClause(size_t i); - std::vector sampleByClause(); - SampleByClauseContext* sampleByClause(size_t i); - std::vector ttlClause(); - TtlClauseContext* ttlClause(size_t i); - std::vector settingsClause(); - SettingsClauseContext* settingsClause(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - EngineClauseContext* engineClause(); - - class PartitionByClauseContext : public antlr4::ParserRuleContext { - public: - PartitionByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *PARTITION(); - antlr4::tree::TerminalNode *BY(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - PartitionByClauseContext* partitionByClause(); - - class PrimaryKeyClauseContext : public antlr4::ParserRuleContext { - public: - PrimaryKeyClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *PRIMARY(); - antlr4::tree::TerminalNode *KEY(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - PrimaryKeyClauseContext* primaryKeyClause(); - - class SampleByClauseContext : public antlr4::ParserRuleContext { - public: - SampleByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SAMPLE(); - antlr4::tree::TerminalNode *BY(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SampleByClauseContext* sampleByClause(); - - class TtlClauseContext : public antlr4::ParserRuleContext { - public: - TtlClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *TTL(); - std::vector ttlExpr(); - TtlExprContext* ttlExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TtlClauseContext* ttlClause(); - - class EngineExprContext : public antlr4::ParserRuleContext { - public: - EngineExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ENGINE(); - IdentifierOrNullContext *identifierOrNull(); - antlr4::tree::TerminalNode *EQ_SINGLE(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - EngineExprContext* engineExpr(); - - class TableElementExprContext : public antlr4::ParserRuleContext { - public: - TableElementExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - TableElementExprContext() = default; - void copyFrom(TableElementExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class TableElementExprProjectionContext : public TableElementExprContext { - public: - TableElementExprProjectionContext(TableElementExprContext *ctx); - - antlr4::tree::TerminalNode *PROJECTION(); - TableProjectionDfntContext *tableProjectionDfnt(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableElementExprConstraintContext : public TableElementExprContext { - public: - TableElementExprConstraintContext(TableElementExprContext *ctx); - - antlr4::tree::TerminalNode *CONSTRAINT(); - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *CHECK(); - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableElementExprColumnContext : public TableElementExprContext { - public: - TableElementExprColumnContext(TableElementExprContext *ctx); - - TableColumnDfntContext *tableColumnDfnt(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableElementExprIndexContext : public TableElementExprContext { - public: - TableElementExprIndexContext(TableElementExprContext *ctx); - - antlr4::tree::TerminalNode *INDEX(); - TableIndexDfntContext *tableIndexDfnt(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - TableElementExprContext* tableElementExpr(); - - class TableColumnDfntContext : public antlr4::ParserRuleContext { - public: - TableColumnDfntContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - ColumnTypeExprContext *columnTypeExpr(); - TableColumnPropertyExprContext *tableColumnPropertyExpr(); - antlr4::tree::TerminalNode *COMMENT(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - CodecExprContext *codecExpr(); - antlr4::tree::TerminalNode *TTL(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableColumnDfntContext* tableColumnDfnt(); - - class TableColumnPropertyExprContext : public antlr4::ParserRuleContext { - public: - TableColumnPropertyExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *DEFAULT(); - antlr4::tree::TerminalNode *MATERIALIZED(); - antlr4::tree::TerminalNode *ALIAS(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableColumnPropertyExprContext* tableColumnPropertyExpr(); - - class TableIndexDfntContext : public antlr4::ParserRuleContext { - public: - TableIndexDfntContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *TYPE(); - ColumnTypeExprContext *columnTypeExpr(); - antlr4::tree::TerminalNode *GRANULARITY(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableIndexDfntContext* tableIndexDfnt(); - - class TableProjectionDfntContext : public antlr4::ParserRuleContext { - public: - TableProjectionDfntContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - ProjectionSelectStmtContext *projectionSelectStmt(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableProjectionDfntContext* tableProjectionDfnt(); - - class CodecExprContext : public antlr4::ParserRuleContext { - public: - CodecExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *CODEC(); - antlr4::tree::TerminalNode *LPAREN(); - std::vector codecArgExpr(); - CodecArgExprContext* codecArgExpr(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - CodecExprContext* codecExpr(); - - class CodecArgExprContext : public antlr4::ParserRuleContext { - public: - CodecArgExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - CodecArgExprContext* codecArgExpr(); - - class TtlExprContext : public antlr4::ParserRuleContext { - public: - TtlExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *DELETE(); - antlr4::tree::TerminalNode *TO(); - antlr4::tree::TerminalNode *DISK(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *VOLUME(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TtlExprContext* ttlExpr(); - - class DescribeStmtContext : public antlr4::ParserRuleContext { - public: - DescribeStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - TableExprContext *tableExpr(); - antlr4::tree::TerminalNode *DESCRIBE(); - antlr4::tree::TerminalNode *DESC(); - antlr4::tree::TerminalNode *TABLE(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DescribeStmtContext* describeStmt(); - - class DropStmtContext : public antlr4::ParserRuleContext { - public: - DropStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - DropStmtContext() = default; - void copyFrom(DropStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class DropDatabaseStmtContext : public DropStmtContext { - public: - DropDatabaseStmtContext(DropStmtContext *ctx); - - antlr4::tree::TerminalNode *DATABASE(); - DatabaseIdentifierContext *databaseIdentifier(); - antlr4::tree::TerminalNode *DETACH(); - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - ClusterClauseContext *clusterClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class DropTableStmtContext : public DropStmtContext { - public: - DropTableStmtContext(DropStmtContext *ctx); - - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *DETACH(); - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *DICTIONARY(); - antlr4::tree::TerminalNode *TABLE(); - antlr4::tree::TerminalNode *VIEW(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - ClusterClauseContext *clusterClause(); - antlr4::tree::TerminalNode *NO(); - antlr4::tree::TerminalNode *DELAY(); - antlr4::tree::TerminalNode *TEMPORARY(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - DropStmtContext* dropStmt(); - - class ExistsStmtContext : public antlr4::ParserRuleContext { - public: - ExistsStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ExistsStmtContext() = default; - void copyFrom(ExistsStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ExistsTableStmtContext : public ExistsStmtContext { - public: - ExistsTableStmtContext(ExistsStmtContext *ctx); - - antlr4::tree::TerminalNode *EXISTS(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *DICTIONARY(); - antlr4::tree::TerminalNode *TABLE(); - antlr4::tree::TerminalNode *VIEW(); - antlr4::tree::TerminalNode *TEMPORARY(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ExistsDatabaseStmtContext : public ExistsStmtContext { - public: - ExistsDatabaseStmtContext(ExistsStmtContext *ctx); - - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *DATABASE(); - DatabaseIdentifierContext *databaseIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ExistsStmtContext* existsStmt(); - - class ExplainStmtContext : public antlr4::ParserRuleContext { - public: - ExplainStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ExplainStmtContext() = default; - void copyFrom(ExplainStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ExplainSyntaxStmtContext : public ExplainStmtContext { - public: - ExplainSyntaxStmtContext(ExplainStmtContext *ctx); - - antlr4::tree::TerminalNode *EXPLAIN(); - antlr4::tree::TerminalNode *SYNTAX(); - QueryContext *query(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ExplainASTStmtContext : public ExplainStmtContext { - public: - ExplainASTStmtContext(ExplainStmtContext *ctx); - - antlr4::tree::TerminalNode *EXPLAIN(); - antlr4::tree::TerminalNode *AST(); - QueryContext *query(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ExplainStmtContext* explainStmt(); - - class InsertStmtContext : public antlr4::ParserRuleContext { - public: - InsertStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *INSERT(); - antlr4::tree::TerminalNode *INTO(); - DataClauseContext *dataClause(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *FUNCTION(); - TableFunctionExprContext *tableFunctionExpr(); - antlr4::tree::TerminalNode *TABLE(); - ColumnsClauseContext *columnsClause(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - InsertStmtContext* insertStmt(); - - class ColumnsClauseContext : public antlr4::ParserRuleContext { - public: - ColumnsClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LPAREN(); - std::vector nestedIdentifier(); - NestedIdentifierContext* nestedIdentifier(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnsClauseContext* columnsClause(); - - class DataClauseContext : public antlr4::ParserRuleContext { - public: - DataClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - DataClauseContext() = default; - void copyFrom(DataClauseContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class DataClauseValuesContext : public DataClauseContext { - public: - DataClauseValuesContext(DataClauseContext *ctx); - - antlr4::tree::TerminalNode *VALUES(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class DataClauseFormatContext : public DataClauseContext { - public: - DataClauseFormatContext(DataClauseContext *ctx); - - antlr4::tree::TerminalNode *FORMAT(); - IdentifierContext *identifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class DataClauseSelectContext : public DataClauseContext { - public: - DataClauseSelectContext(DataClauseContext *ctx); - - SelectUnionStmtContext *selectUnionStmt(); - antlr4::tree::TerminalNode *EOF(); - antlr4::tree::TerminalNode *SEMICOLON(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - DataClauseContext* dataClause(); - - class KillStmtContext : public antlr4::ParserRuleContext { - public: - KillStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - KillStmtContext() = default; - void copyFrom(KillStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class KillMutationStmtContext : public KillStmtContext { - public: - KillMutationStmtContext(KillStmtContext *ctx); - - antlr4::tree::TerminalNode *KILL(); - antlr4::tree::TerminalNode *MUTATION(); - WhereClauseContext *whereClause(); - ClusterClauseContext *clusterClause(); - antlr4::tree::TerminalNode *SYNC(); - antlr4::tree::TerminalNode *ASYNC(); - antlr4::tree::TerminalNode *TEST(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - KillStmtContext* killStmt(); - - class OptimizeStmtContext : public antlr4::ParserRuleContext { - public: - OptimizeStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *OPTIMIZE(); - antlr4::tree::TerminalNode *TABLE(); - TableIdentifierContext *tableIdentifier(); - ClusterClauseContext *clusterClause(); - PartitionClauseContext *partitionClause(); - antlr4::tree::TerminalNode *FINAL(); - antlr4::tree::TerminalNode *DEDUPLICATE(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - OptimizeStmtContext* optimizeStmt(); - - class RenameStmtContext : public antlr4::ParserRuleContext { - public: - RenameStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *RENAME(); - antlr4::tree::TerminalNode *TABLE(); - std::vector tableIdentifier(); - TableIdentifierContext* tableIdentifier(size_t i); - std::vector TO(); - antlr4::tree::TerminalNode* TO(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - ClusterClauseContext *clusterClause(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - RenameStmtContext* renameStmt(); - - class ProjectionSelectStmtContext : public antlr4::ParserRuleContext { - public: - ProjectionSelectStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *SELECT(); - ColumnExprListContext *columnExprList(); - antlr4::tree::TerminalNode *RPAREN(); - WithClauseContext *withClause(); - GroupByClauseContext *groupByClause(); - ProjectionOrderByClauseContext *projectionOrderByClause(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ProjectionSelectStmtContext* projectionSelectStmt(); - - class SelectUnionStmtContext : public antlr4::ParserRuleContext { - public: - SelectUnionStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector selectStmtWithParens(); - SelectStmtWithParensContext* selectStmtWithParens(size_t i); - std::vector UNION(); - antlr4::tree::TerminalNode* UNION(size_t i); - std::vector ALL(); - antlr4::tree::TerminalNode* ALL(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SelectUnionStmtContext* selectUnionStmt(); - - class SelectStmtWithParensContext : public antlr4::ParserRuleContext { - public: - SelectStmtWithParensContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - SelectStmtContext *selectStmt(); - antlr4::tree::TerminalNode *LPAREN(); - SelectUnionStmtContext *selectUnionStmt(); - antlr4::tree::TerminalNode *RPAREN(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SelectStmtWithParensContext* selectStmtWithParens(); - - class SelectStmtContext : public antlr4::ParserRuleContext { - public: - SelectStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SELECT(); - ColumnExprListContext *columnExprList(); - WithClauseContext *withClause(); - antlr4::tree::TerminalNode *DISTINCT(); - TopClauseContext *topClause(); - FromClauseContext *fromClause(); - ArrayJoinClauseContext *arrayJoinClause(); - PrewhereClauseContext *prewhereClause(); - WhereClauseContext *whereClause(); - GroupByClauseContext *groupByClause(); - std::vector WITH(); - antlr4::tree::TerminalNode* WITH(size_t i); - antlr4::tree::TerminalNode *TOTALS(); - HavingClauseContext *havingClause(); - OrderByClauseContext *orderByClause(); - LimitByClauseContext *limitByClause(); - LimitClauseContext *limitClause(); - SettingsClauseContext *settingsClause(); - antlr4::tree::TerminalNode *CUBE(); - antlr4::tree::TerminalNode *ROLLUP(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SelectStmtContext* selectStmt(); - - class WithClauseContext : public antlr4::ParserRuleContext { - public: - WithClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *WITH(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - WithClauseContext* withClause(); - - class TopClauseContext : public antlr4::ParserRuleContext { - public: - TopClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *TOP(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - antlr4::tree::TerminalNode *WITH(); - antlr4::tree::TerminalNode *TIES(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TopClauseContext* topClause(); - - class FromClauseContext : public antlr4::ParserRuleContext { - public: - FromClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *FROM(); - JoinExprContext *joinExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - FromClauseContext* fromClause(); - - class ArrayJoinClauseContext : public antlr4::ParserRuleContext { - public: - ArrayJoinClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ARRAY(); - antlr4::tree::TerminalNode *JOIN(); - ColumnExprListContext *columnExprList(); - antlr4::tree::TerminalNode *LEFT(); - antlr4::tree::TerminalNode *INNER(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ArrayJoinClauseContext* arrayJoinClause(); - - class PrewhereClauseContext : public antlr4::ParserRuleContext { - public: - PrewhereClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *PREWHERE(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - PrewhereClauseContext* prewhereClause(); - - class WhereClauseContext : public antlr4::ParserRuleContext { - public: - WhereClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *WHERE(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - WhereClauseContext* whereClause(); - - class GroupByClauseContext : public antlr4::ParserRuleContext { - public: - GroupByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *GROUP(); - antlr4::tree::TerminalNode *BY(); - antlr4::tree::TerminalNode *LPAREN(); - ColumnExprListContext *columnExprList(); - antlr4::tree::TerminalNode *RPAREN(); - antlr4::tree::TerminalNode *CUBE(); - antlr4::tree::TerminalNode *ROLLUP(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - GroupByClauseContext* groupByClause(); - - class HavingClauseContext : public antlr4::ParserRuleContext { - public: - HavingClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *HAVING(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - HavingClauseContext* havingClause(); - - class OrderByClauseContext : public antlr4::ParserRuleContext { - public: - OrderByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ORDER(); - antlr4::tree::TerminalNode *BY(); - OrderExprListContext *orderExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - OrderByClauseContext* orderByClause(); - - class ProjectionOrderByClauseContext : public antlr4::ParserRuleContext { - public: - ProjectionOrderByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ORDER(); - antlr4::tree::TerminalNode *BY(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ProjectionOrderByClauseContext* projectionOrderByClause(); - - class LimitByClauseContext : public antlr4::ParserRuleContext { - public: - LimitByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LIMIT(); - LimitExprContext *limitExpr(); - antlr4::tree::TerminalNode *BY(); - ColumnExprListContext *columnExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LimitByClauseContext* limitByClause(); - - class LimitClauseContext : public antlr4::ParserRuleContext { - public: - LimitClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *LIMIT(); - LimitExprContext *limitExpr(); - antlr4::tree::TerminalNode *WITH(); - antlr4::tree::TerminalNode *TIES(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LimitClauseContext* limitClause(); - - class SettingsClauseContext : public antlr4::ParserRuleContext { - public: - SettingsClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SETTINGS(); - SettingExprListContext *settingExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SettingsClauseContext* settingsClause(); - - class JoinExprContext : public antlr4::ParserRuleContext { - public: - JoinExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - JoinExprContext() = default; - void copyFrom(JoinExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class JoinExprOpContext : public JoinExprContext { - public: - JoinExprOpContext(JoinExprContext *ctx); - - std::vector joinExpr(); - JoinExprContext* joinExpr(size_t i); - antlr4::tree::TerminalNode *JOIN(); - JoinConstraintClauseContext *joinConstraintClause(); - JoinOpContext *joinOp(); - antlr4::tree::TerminalNode *GLOBAL(); - antlr4::tree::TerminalNode *LOCAL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class JoinExprTableContext : public JoinExprContext { - public: - JoinExprTableContext(JoinExprContext *ctx); - - TableExprContext *tableExpr(); - antlr4::tree::TerminalNode *FINAL(); - SampleClauseContext *sampleClause(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class JoinExprParensContext : public JoinExprContext { - public: - JoinExprParensContext(JoinExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - JoinExprContext *joinExpr(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class JoinExprCrossOpContext : public JoinExprContext { - public: - JoinExprCrossOpContext(JoinExprContext *ctx); - - std::vector joinExpr(); - JoinExprContext* joinExpr(size_t i); - JoinOpCrossContext *joinOpCross(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - JoinExprContext* joinExpr(); - JoinExprContext* joinExpr(int precedence); - class JoinOpContext : public antlr4::ParserRuleContext { - public: - JoinOpContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - JoinOpContext() = default; - void copyFrom(JoinOpContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class JoinOpFullContext : public JoinOpContext { - public: - JoinOpFullContext(JoinOpContext *ctx); - - antlr4::tree::TerminalNode *FULL(); - antlr4::tree::TerminalNode *OUTER(); - antlr4::tree::TerminalNode *ALL(); - antlr4::tree::TerminalNode *ANY(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class JoinOpInnerContext : public JoinOpContext { - public: - JoinOpInnerContext(JoinOpContext *ctx); - - antlr4::tree::TerminalNode *INNER(); - antlr4::tree::TerminalNode *ALL(); - antlr4::tree::TerminalNode *ANY(); - antlr4::tree::TerminalNode *ASOF(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class JoinOpLeftRightContext : public JoinOpContext { - public: - JoinOpLeftRightContext(JoinOpContext *ctx); - - antlr4::tree::TerminalNode *LEFT(); - antlr4::tree::TerminalNode *RIGHT(); - antlr4::tree::TerminalNode *OUTER(); - antlr4::tree::TerminalNode *SEMI(); - antlr4::tree::TerminalNode *ALL(); - antlr4::tree::TerminalNode *ANTI(); - antlr4::tree::TerminalNode *ANY(); - antlr4::tree::TerminalNode *ASOF(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - JoinOpContext* joinOp(); - - class JoinOpCrossContext : public antlr4::ParserRuleContext { - public: - JoinOpCrossContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *CROSS(); - antlr4::tree::TerminalNode *JOIN(); - antlr4::tree::TerminalNode *GLOBAL(); - antlr4::tree::TerminalNode *LOCAL(); - antlr4::tree::TerminalNode *COMMA(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - JoinOpCrossContext* joinOpCross(); - - class JoinConstraintClauseContext : public antlr4::ParserRuleContext { - public: - JoinConstraintClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ON(); - ColumnExprListContext *columnExprList(); - antlr4::tree::TerminalNode *USING(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - JoinConstraintClauseContext* joinConstraintClause(); - - class SampleClauseContext : public antlr4::ParserRuleContext { - public: - SampleClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SAMPLE(); - std::vector ratioExpr(); - RatioExprContext* ratioExpr(size_t i); - antlr4::tree::TerminalNode *OFFSET(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SampleClauseContext* sampleClause(); - - class LimitExprContext : public antlr4::ParserRuleContext { - public: - LimitExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *COMMA(); - antlr4::tree::TerminalNode *OFFSET(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LimitExprContext* limitExpr(); - - class OrderExprListContext : public antlr4::ParserRuleContext { - public: - OrderExprListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector orderExpr(); - OrderExprContext* orderExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - OrderExprListContext* orderExprList(); - - class OrderExprContext : public antlr4::ParserRuleContext { - public: - OrderExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *NULLS(); - antlr4::tree::TerminalNode *COLLATE(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *ASCENDING(); - antlr4::tree::TerminalNode *DESCENDING(); - antlr4::tree::TerminalNode *DESC(); - antlr4::tree::TerminalNode *FIRST(); - antlr4::tree::TerminalNode *LAST(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - OrderExprContext* orderExpr(); - - class RatioExprContext : public antlr4::ParserRuleContext { - public: - RatioExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector numberLiteral(); - NumberLiteralContext* numberLiteral(size_t i); - antlr4::tree::TerminalNode *SLASH(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - RatioExprContext* ratioExpr(); - - class SettingExprListContext : public antlr4::ParserRuleContext { - public: - SettingExprListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector settingExpr(); - SettingExprContext* settingExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SettingExprListContext* settingExprList(); - - class SettingExprContext : public antlr4::ParserRuleContext { - public: - SettingExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *EQ_SINGLE(); - LiteralContext *literal(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SettingExprContext* settingExpr(); - - class SetStmtContext : public antlr4::ParserRuleContext { - public: - SetStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SET(); - SettingExprListContext *settingExprList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SetStmtContext* setStmt(); - - class ShowStmtContext : public antlr4::ParserRuleContext { - public: - ShowStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ShowStmtContext() = default; - void copyFrom(ShowStmtContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ShowCreateDatabaseStmtContext : public ShowStmtContext { - public: - ShowCreateDatabaseStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *DATABASE(); - DatabaseIdentifierContext *databaseIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ShowDatabasesStmtContext : public ShowStmtContext { - public: - ShowDatabasesStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *DATABASES(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ShowCreateTableStmtContext : public ShowStmtContext { - public: - ShowCreateTableStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *CREATE(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *TEMPORARY(); - antlr4::tree::TerminalNode *TABLE(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ShowTablesStmtContext : public ShowStmtContext { - public: - ShowTablesStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *TABLES(); - antlr4::tree::TerminalNode *TEMPORARY(); - DatabaseIdentifierContext *databaseIdentifier(); - antlr4::tree::TerminalNode *LIKE(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - WhereClauseContext *whereClause(); - LimitClauseContext *limitClause(); - antlr4::tree::TerminalNode *FROM(); - antlr4::tree::TerminalNode *IN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ShowDictionariesStmtContext : public ShowStmtContext { - public: - ShowDictionariesStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *DICTIONARIES(); - antlr4::tree::TerminalNode *FROM(); - DatabaseIdentifierContext *databaseIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ShowCreateDictionaryStmtContext : public ShowStmtContext { - public: - ShowCreateDictionaryStmtContext(ShowStmtContext *ctx); - - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *DICTIONARY(); - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ShowStmtContext* showStmt(); - - class SystemStmtContext : public antlr4::ParserRuleContext { - public: - SystemStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SYSTEM(); - antlr4::tree::TerminalNode *FLUSH(); - antlr4::tree::TerminalNode *DISTRIBUTED(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *LOGS(); - antlr4::tree::TerminalNode *RELOAD(); - antlr4::tree::TerminalNode *DICTIONARIES(); - antlr4::tree::TerminalNode *DICTIONARY(); - antlr4::tree::TerminalNode *START(); - antlr4::tree::TerminalNode *STOP(); - antlr4::tree::TerminalNode *SENDS(); - antlr4::tree::TerminalNode *FETCHES(); - antlr4::tree::TerminalNode *MERGES(); - antlr4::tree::TerminalNode *TTL(); - antlr4::tree::TerminalNode *REPLICATED(); - antlr4::tree::TerminalNode *SYNC(); - antlr4::tree::TerminalNode *REPLICA(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - SystemStmtContext* systemStmt(); - - class TruncateStmtContext : public antlr4::ParserRuleContext { - public: - TruncateStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *TRUNCATE(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *TEMPORARY(); - antlr4::tree::TerminalNode *TABLE(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *EXISTS(); - ClusterClauseContext *clusterClause(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TruncateStmtContext* truncateStmt(); - - class UseStmtContext : public antlr4::ParserRuleContext { - public: - UseStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *USE(); - DatabaseIdentifierContext *databaseIdentifier(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - UseStmtContext* useStmt(); - - class WatchStmtContext : public antlr4::ParserRuleContext { - public: - WatchStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *WATCH(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *EVENTS(); - antlr4::tree::TerminalNode *LIMIT(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - WatchStmtContext* watchStmt(); - - class ColumnTypeExprContext : public antlr4::ParserRuleContext { - public: - ColumnTypeExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ColumnTypeExprContext() = default; - void copyFrom(ColumnTypeExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ColumnTypeExprNestedContext : public ColumnTypeExprContext { - public: - ColumnTypeExprNestedContext(ColumnTypeExprContext *ctx); - - std::vector identifier(); - IdentifierContext* identifier(size_t i); - antlr4::tree::TerminalNode *LPAREN(); - std::vector columnTypeExpr(); - ColumnTypeExprContext* columnTypeExpr(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnTypeExprParamContext : public ColumnTypeExprContext { - public: - ColumnTypeExprParamContext(ColumnTypeExprContext *ctx); - - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - ColumnExprListContext *columnExprList(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnTypeExprSimpleContext : public ColumnTypeExprContext { - public: - ColumnTypeExprSimpleContext(ColumnTypeExprContext *ctx); - - IdentifierContext *identifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnTypeExprComplexContext : public ColumnTypeExprContext { - public: - ColumnTypeExprComplexContext(ColumnTypeExprContext *ctx); - - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *LPAREN(); - std::vector columnTypeExpr(); - ColumnTypeExprContext* columnTypeExpr(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnTypeExprEnumContext : public ColumnTypeExprContext { - public: - ColumnTypeExprEnumContext(ColumnTypeExprContext *ctx); - - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *LPAREN(); - std::vector enumValue(); - EnumValueContext* enumValue(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ColumnTypeExprContext* columnTypeExpr(); - - class ColumnExprListContext : public antlr4::ParserRuleContext { - public: - ColumnExprListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector columnsExpr(); - ColumnsExprContext* columnsExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnExprListContext* columnExprList(); - - class ColumnsExprContext : public antlr4::ParserRuleContext { - public: - ColumnsExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ColumnsExprContext() = default; - void copyFrom(ColumnsExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ColumnsExprColumnContext : public ColumnsExprContext { - public: - ColumnsExprColumnContext(ColumnsExprContext *ctx); - - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnsExprAsteriskContext : public ColumnsExprContext { - public: - ColumnsExprAsteriskContext(ColumnsExprContext *ctx); - - antlr4::tree::TerminalNode *ASTERISK(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *DOT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnsExprSubqueryContext : public ColumnsExprContext { - public: - ColumnsExprSubqueryContext(ColumnsExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - SelectUnionStmtContext *selectUnionStmt(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ColumnsExprContext* columnsExpr(); - - class ColumnExprContext : public antlr4::ParserRuleContext { - public: - ColumnExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - ColumnExprContext() = default; - void copyFrom(ColumnExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class ColumnExprTernaryOpContext : public ColumnExprContext { - public: - ColumnExprTernaryOpContext(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *QUERY(); - antlr4::tree::TerminalNode *COLON(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprAliasContext : public ColumnExprContext { - public: - ColumnExprAliasContext(ColumnExprContext *ctx); - - ColumnExprContext *columnExpr(); - AliasContext *alias(); - antlr4::tree::TerminalNode *AS(); - IdentifierContext *identifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprExtractContext : public ColumnExprContext { - public: - ColumnExprExtractContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *EXTRACT(); - antlr4::tree::TerminalNode *LPAREN(); - IntervalContext *interval(); - antlr4::tree::TerminalNode *FROM(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprNegateContext : public ColumnExprContext { - public: - ColumnExprNegateContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *DASH(); - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprSubqueryContext : public ColumnExprContext { - public: - ColumnExprSubqueryContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - SelectUnionStmtContext *selectUnionStmt(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprLiteralContext : public ColumnExprContext { - public: - ColumnExprLiteralContext(ColumnExprContext *ctx); - - LiteralContext *literal(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprArrayContext : public ColumnExprContext { - public: - ColumnExprArrayContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *LBRACKET(); - antlr4::tree::TerminalNode *RBRACKET(); - ColumnExprListContext *columnExprList(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprSubstringContext : public ColumnExprContext { - public: - ColumnExprSubstringContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *SUBSTRING(); - antlr4::tree::TerminalNode *LPAREN(); - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *FROM(); - antlr4::tree::TerminalNode *RPAREN(); - antlr4::tree::TerminalNode *FOR(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprCastContext : public ColumnExprContext { - public: - ColumnExprCastContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *CAST(); - antlr4::tree::TerminalNode *LPAREN(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *AS(); - ColumnTypeExprContext *columnTypeExpr(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprOrContext : public ColumnExprContext { - public: - ColumnExprOrContext(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *OR(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprPrecedence1Context : public ColumnExprContext { - public: - ColumnExprPrecedence1Context(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *ASTERISK(); - antlr4::tree::TerminalNode *SLASH(); - antlr4::tree::TerminalNode *PERCENT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprPrecedence2Context : public ColumnExprContext { - public: - ColumnExprPrecedence2Context(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *PLUS(); - antlr4::tree::TerminalNode *DASH(); - antlr4::tree::TerminalNode *CONCAT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprPrecedence3Context : public ColumnExprContext { - public: - ColumnExprPrecedence3Context(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *EQ_DOUBLE(); - antlr4::tree::TerminalNode *EQ_SINGLE(); - antlr4::tree::TerminalNode *NOT_EQ(); - antlr4::tree::TerminalNode *LE(); - antlr4::tree::TerminalNode *GE(); - antlr4::tree::TerminalNode *LT(); - antlr4::tree::TerminalNode *GT(); - antlr4::tree::TerminalNode *IN(); - antlr4::tree::TerminalNode *LIKE(); - antlr4::tree::TerminalNode *ILIKE(); - antlr4::tree::TerminalNode *GLOBAL(); - antlr4::tree::TerminalNode *NOT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprIntervalContext : public ColumnExprContext { - public: - ColumnExprIntervalContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *INTERVAL(); - ColumnExprContext *columnExpr(); - IntervalContext *interval(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprIsNullContext : public ColumnExprContext { - public: - ColumnExprIsNullContext(ColumnExprContext *ctx); - - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *IS(); - antlr4::tree::TerminalNode *NULL_SQL(); - antlr4::tree::TerminalNode *NOT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprTrimContext : public ColumnExprContext { - public: - ColumnExprTrimContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *TRIM(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *FROM(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *RPAREN(); - antlr4::tree::TerminalNode *BOTH(); - antlr4::tree::TerminalNode *LEADING(); - antlr4::tree::TerminalNode *TRAILING(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprTupleContext : public ColumnExprContext { - public: - ColumnExprTupleContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - ColumnExprListContext *columnExprList(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprArrayAccessContext : public ColumnExprContext { - public: - ColumnExprArrayAccessContext(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *LBRACKET(); - antlr4::tree::TerminalNode *RBRACKET(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprBetweenContext : public ColumnExprContext { - public: - ColumnExprBetweenContext(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *BETWEEN(); - antlr4::tree::TerminalNode *AND(); - antlr4::tree::TerminalNode *NOT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprParensContext : public ColumnExprContext { - public: - ColumnExprParensContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprTimestampContext : public ColumnExprContext { - public: - ColumnExprTimestampContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *TIMESTAMP(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprAndContext : public ColumnExprContext { - public: - ColumnExprAndContext(ColumnExprContext *ctx); - - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - antlr4::tree::TerminalNode *AND(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprTupleAccessContext : public ColumnExprContext { - public: - ColumnExprTupleAccessContext(ColumnExprContext *ctx); - - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *DOT(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprCaseContext : public ColumnExprContext { - public: - ColumnExprCaseContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *CASE(); - antlr4::tree::TerminalNode *END(); - std::vector columnExpr(); - ColumnExprContext* columnExpr(size_t i); - std::vector WHEN(); - antlr4::tree::TerminalNode* WHEN(size_t i); - std::vector THEN(); - antlr4::tree::TerminalNode* THEN(size_t i); - antlr4::tree::TerminalNode *ELSE(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprDateContext : public ColumnExprContext { - public: - ColumnExprDateContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *DATE(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprNotContext : public ColumnExprContext { - public: - ColumnExprNotContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *NOT(); - ColumnExprContext *columnExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprIdentifierContext : public ColumnExprContext { - public: - ColumnExprIdentifierContext(ColumnExprContext *ctx); - - ColumnIdentifierContext *columnIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprFunctionContext : public ColumnExprContext { - public: - ColumnExprFunctionContext(ColumnExprContext *ctx); - - IdentifierContext *identifier(); - std::vector LPAREN(); - antlr4::tree::TerminalNode* LPAREN(size_t i); - std::vector RPAREN(); - antlr4::tree::TerminalNode* RPAREN(size_t i); - antlr4::tree::TerminalNode *DISTINCT(); - ColumnArgListContext *columnArgList(); - ColumnExprListContext *columnExprList(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class ColumnExprAsteriskContext : public ColumnExprContext { - public: - ColumnExprAsteriskContext(ColumnExprContext *ctx); - - antlr4::tree::TerminalNode *ASTERISK(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *DOT(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - ColumnExprContext* columnExpr(); - ColumnExprContext* columnExpr(int precedence); - class ColumnArgListContext : public antlr4::ParserRuleContext { - public: - ColumnArgListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector columnArgExpr(); - ColumnArgExprContext* columnArgExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnArgListContext* columnArgList(); - - class ColumnArgExprContext : public antlr4::ParserRuleContext { - public: - ColumnArgExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - ColumnLambdaExprContext *columnLambdaExpr(); - ColumnExprContext *columnExpr(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnArgExprContext* columnArgExpr(); - - class ColumnLambdaExprContext : public antlr4::ParserRuleContext { - public: - ColumnLambdaExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *ARROW(); - ColumnExprContext *columnExpr(); - antlr4::tree::TerminalNode *LPAREN(); - std::vector identifier(); - IdentifierContext* identifier(size_t i); - antlr4::tree::TerminalNode *RPAREN(); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnLambdaExprContext* columnLambdaExpr(); - - class ColumnIdentifierContext : public antlr4::ParserRuleContext { - public: - ColumnIdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - TableIdentifierContext *tableIdentifier(); - antlr4::tree::TerminalNode *DOT(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - ColumnIdentifierContext* columnIdentifier(); - - class NestedIdentifierContext : public antlr4::ParserRuleContext { - public: - NestedIdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector identifier(); - IdentifierContext* identifier(size_t i); - antlr4::tree::TerminalNode *DOT(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - NestedIdentifierContext* nestedIdentifier(); - - class TableExprContext : public antlr4::ParserRuleContext { - public: - TableExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - - TableExprContext() = default; - void copyFrom(TableExprContext *context); - using antlr4::ParserRuleContext::copyFrom; - - virtual size_t getRuleIndex() const override; - - - }; - - class TableExprIdentifierContext : public TableExprContext { - public: - TableExprIdentifierContext(TableExprContext *ctx); - - TableIdentifierContext *tableIdentifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableExprSubqueryContext : public TableExprContext { - public: - TableExprSubqueryContext(TableExprContext *ctx); - - antlr4::tree::TerminalNode *LPAREN(); - SelectUnionStmtContext *selectUnionStmt(); - antlr4::tree::TerminalNode *RPAREN(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableExprAliasContext : public TableExprContext { - public: - TableExprAliasContext(TableExprContext *ctx); - - TableExprContext *tableExpr(); - AliasContext *alias(); - antlr4::tree::TerminalNode *AS(); - IdentifierContext *identifier(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - class TableExprFunctionContext : public TableExprContext { - public: - TableExprFunctionContext(TableExprContext *ctx); - - TableFunctionExprContext *tableFunctionExpr(); - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - }; - - TableExprContext* tableExpr(); - TableExprContext* tableExpr(int precedence); - class TableFunctionExprContext : public antlr4::ParserRuleContext { - public: - TableFunctionExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *LPAREN(); - antlr4::tree::TerminalNode *RPAREN(); - TableArgListContext *tableArgList(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableFunctionExprContext* tableFunctionExpr(); - - class TableIdentifierContext : public antlr4::ParserRuleContext { - public: - TableIdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - DatabaseIdentifierContext *databaseIdentifier(); - antlr4::tree::TerminalNode *DOT(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableIdentifierContext* tableIdentifier(); - - class TableArgListContext : public antlr4::ParserRuleContext { - public: - TableArgListContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - std::vector tableArgExpr(); - TableArgExprContext* tableArgExpr(size_t i); - std::vector COMMA(); - antlr4::tree::TerminalNode* COMMA(size_t i); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableArgListContext* tableArgList(); - - class TableArgExprContext : public antlr4::ParserRuleContext { - public: - TableArgExprContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NestedIdentifierContext *nestedIdentifier(); - TableFunctionExprContext *tableFunctionExpr(); - LiteralContext *literal(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - TableArgExprContext* tableArgExpr(); - - class DatabaseIdentifierContext : public antlr4::ParserRuleContext { - public: - DatabaseIdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - DatabaseIdentifierContext* databaseIdentifier(); - - class FloatingLiteralContext : public antlr4::ParserRuleContext { - public: - FloatingLiteralContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *FLOATING_LITERAL(); - antlr4::tree::TerminalNode *DOT(); - std::vector DECIMAL_LITERAL(); - antlr4::tree::TerminalNode* DECIMAL_LITERAL(size_t i); - antlr4::tree::TerminalNode *OCTAL_LITERAL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - FloatingLiteralContext* floatingLiteral(); - - class NumberLiteralContext : public antlr4::ParserRuleContext { - public: - NumberLiteralContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - FloatingLiteralContext *floatingLiteral(); - antlr4::tree::TerminalNode *OCTAL_LITERAL(); - antlr4::tree::TerminalNode *DECIMAL_LITERAL(); - antlr4::tree::TerminalNode *HEXADECIMAL_LITERAL(); - antlr4::tree::TerminalNode *INF(); - antlr4::tree::TerminalNode *NAN_SQL(); - antlr4::tree::TerminalNode *PLUS(); - antlr4::tree::TerminalNode *DASH(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - NumberLiteralContext* numberLiteral(); - - class LiteralContext : public antlr4::ParserRuleContext { - public: - LiteralContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - NumberLiteralContext *numberLiteral(); - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *NULL_SQL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - LiteralContext* literal(); - - class IntervalContext : public antlr4::ParserRuleContext { - public: - IntervalContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *SECOND(); - antlr4::tree::TerminalNode *MINUTE(); - antlr4::tree::TerminalNode *HOUR(); - antlr4::tree::TerminalNode *DAY(); - antlr4::tree::TerminalNode *WEEK(); - antlr4::tree::TerminalNode *MONTH(); - antlr4::tree::TerminalNode *QUARTER(); - antlr4::tree::TerminalNode *YEAR(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - IntervalContext* interval(); - - class KeywordContext : public antlr4::ParserRuleContext { - public: - KeywordContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *AFTER(); - antlr4::tree::TerminalNode *ALIAS(); - antlr4::tree::TerminalNode *ALL(); - antlr4::tree::TerminalNode *ALTER(); - antlr4::tree::TerminalNode *AND(); - antlr4::tree::TerminalNode *ANTI(); - antlr4::tree::TerminalNode *ANY(); - antlr4::tree::TerminalNode *ARRAY(); - antlr4::tree::TerminalNode *AS(); - antlr4::tree::TerminalNode *ASCENDING(); - antlr4::tree::TerminalNode *ASOF(); - antlr4::tree::TerminalNode *AST(); - antlr4::tree::TerminalNode *ASYNC(); - antlr4::tree::TerminalNode *ATTACH(); - antlr4::tree::TerminalNode *BETWEEN(); - antlr4::tree::TerminalNode *BOTH(); - antlr4::tree::TerminalNode *BY(); - antlr4::tree::TerminalNode *CASE(); - antlr4::tree::TerminalNode *CAST(); - antlr4::tree::TerminalNode *CHECK(); - antlr4::tree::TerminalNode *CLEAR(); - antlr4::tree::TerminalNode *CLUSTER(); - antlr4::tree::TerminalNode *CODEC(); - antlr4::tree::TerminalNode *COLLATE(); - antlr4::tree::TerminalNode *COLUMN(); - antlr4::tree::TerminalNode *COMMENT(); - antlr4::tree::TerminalNode *CONSTRAINT(); - antlr4::tree::TerminalNode *CREATE(); - antlr4::tree::TerminalNode *CROSS(); - antlr4::tree::TerminalNode *CUBE(); - antlr4::tree::TerminalNode *DATABASE(); - antlr4::tree::TerminalNode *DATABASES(); - antlr4::tree::TerminalNode *DATE(); - antlr4::tree::TerminalNode *DEDUPLICATE(); - antlr4::tree::TerminalNode *DEFAULT(); - antlr4::tree::TerminalNode *DELAY(); - antlr4::tree::TerminalNode *DELETE(); - antlr4::tree::TerminalNode *DESCRIBE(); - antlr4::tree::TerminalNode *DESC(); - antlr4::tree::TerminalNode *DESCENDING(); - antlr4::tree::TerminalNode *DETACH(); - antlr4::tree::TerminalNode *DICTIONARIES(); - antlr4::tree::TerminalNode *DICTIONARY(); - antlr4::tree::TerminalNode *DISK(); - antlr4::tree::TerminalNode *DISTINCT(); - antlr4::tree::TerminalNode *DISTRIBUTED(); - antlr4::tree::TerminalNode *DROP(); - antlr4::tree::TerminalNode *ELSE(); - antlr4::tree::TerminalNode *END(); - antlr4::tree::TerminalNode *ENGINE(); - antlr4::tree::TerminalNode *EVENTS(); - antlr4::tree::TerminalNode *EXISTS(); - antlr4::tree::TerminalNode *EXPLAIN(); - antlr4::tree::TerminalNode *EXPRESSION(); - antlr4::tree::TerminalNode *EXTRACT(); - antlr4::tree::TerminalNode *FETCHES(); - antlr4::tree::TerminalNode *FINAL(); - antlr4::tree::TerminalNode *FIRST(); - antlr4::tree::TerminalNode *FLUSH(); - antlr4::tree::TerminalNode *FOR(); - antlr4::tree::TerminalNode *FORMAT(); - antlr4::tree::TerminalNode *FREEZE(); - antlr4::tree::TerminalNode *FROM(); - antlr4::tree::TerminalNode *FULL(); - antlr4::tree::TerminalNode *FUNCTION(); - antlr4::tree::TerminalNode *GLOBAL(); - antlr4::tree::TerminalNode *GRANULARITY(); - antlr4::tree::TerminalNode *GROUP(); - antlr4::tree::TerminalNode *HAVING(); - antlr4::tree::TerminalNode *HIERARCHICAL(); - antlr4::tree::TerminalNode *ID(); - antlr4::tree::TerminalNode *IF(); - antlr4::tree::TerminalNode *ILIKE(); - antlr4::tree::TerminalNode *IN(); - antlr4::tree::TerminalNode *INDEX(); - antlr4::tree::TerminalNode *INJECTIVE(); - antlr4::tree::TerminalNode *INNER(); - antlr4::tree::TerminalNode *INSERT(); - antlr4::tree::TerminalNode *INTERVAL(); - antlr4::tree::TerminalNode *INTO(); - antlr4::tree::TerminalNode *IS(); - antlr4::tree::TerminalNode *IS_OBJECT_ID(); - antlr4::tree::TerminalNode *JOIN(); - antlr4::tree::TerminalNode *JSON_FALSE(); - antlr4::tree::TerminalNode *JSON_TRUE(); - antlr4::tree::TerminalNode *KEY(); - antlr4::tree::TerminalNode *KILL(); - antlr4::tree::TerminalNode *LAST(); - antlr4::tree::TerminalNode *LAYOUT(); - antlr4::tree::TerminalNode *LEADING(); - antlr4::tree::TerminalNode *LEFT(); - antlr4::tree::TerminalNode *LIFETIME(); - antlr4::tree::TerminalNode *LIKE(); - antlr4::tree::TerminalNode *LIMIT(); - antlr4::tree::TerminalNode *LIVE(); - antlr4::tree::TerminalNode *LOCAL(); - antlr4::tree::TerminalNode *LOGS(); - antlr4::tree::TerminalNode *MATERIALIZE(); - antlr4::tree::TerminalNode *MATERIALIZED(); - antlr4::tree::TerminalNode *MAX(); - antlr4::tree::TerminalNode *MERGES(); - antlr4::tree::TerminalNode *MIN(); - antlr4::tree::TerminalNode *MODIFY(); - antlr4::tree::TerminalNode *MOVE(); - antlr4::tree::TerminalNode *MUTATION(); - antlr4::tree::TerminalNode *NO(); - antlr4::tree::TerminalNode *NOT(); - antlr4::tree::TerminalNode *NULLS(); - antlr4::tree::TerminalNode *OFFSET(); - antlr4::tree::TerminalNode *ON(); - antlr4::tree::TerminalNode *OPTIMIZE(); - antlr4::tree::TerminalNode *OR(); - antlr4::tree::TerminalNode *ORDER(); - antlr4::tree::TerminalNode *OUTER(); - antlr4::tree::TerminalNode *OUTFILE(); - antlr4::tree::TerminalNode *PARTITION(); - antlr4::tree::TerminalNode *POPULATE(); - antlr4::tree::TerminalNode *PREWHERE(); - antlr4::tree::TerminalNode *PRIMARY(); - antlr4::tree::TerminalNode *RANGE(); - antlr4::tree::TerminalNode *RELOAD(); - antlr4::tree::TerminalNode *REMOVE(); - antlr4::tree::TerminalNode *RENAME(); - antlr4::tree::TerminalNode *REPLACE(); - antlr4::tree::TerminalNode *REPLICA(); - antlr4::tree::TerminalNode *REPLICATED(); - antlr4::tree::TerminalNode *RIGHT(); - antlr4::tree::TerminalNode *ROLLUP(); - antlr4::tree::TerminalNode *SAMPLE(); - antlr4::tree::TerminalNode *SELECT(); - antlr4::tree::TerminalNode *SEMI(); - antlr4::tree::TerminalNode *SENDS(); - antlr4::tree::TerminalNode *SET(); - antlr4::tree::TerminalNode *SETTINGS(); - antlr4::tree::TerminalNode *SHOW(); - antlr4::tree::TerminalNode *SOURCE(); - antlr4::tree::TerminalNode *START(); - antlr4::tree::TerminalNode *STOP(); - antlr4::tree::TerminalNode *SUBSTRING(); - antlr4::tree::TerminalNode *SYNC(); - antlr4::tree::TerminalNode *SYNTAX(); - antlr4::tree::TerminalNode *SYSTEM(); - antlr4::tree::TerminalNode *TABLE(); - antlr4::tree::TerminalNode *TABLES(); - antlr4::tree::TerminalNode *TEMPORARY(); - antlr4::tree::TerminalNode *TEST(); - antlr4::tree::TerminalNode *THEN(); - antlr4::tree::TerminalNode *TIES(); - antlr4::tree::TerminalNode *TIMEOUT(); - antlr4::tree::TerminalNode *TIMESTAMP(); - antlr4::tree::TerminalNode *TOTALS(); - antlr4::tree::TerminalNode *TRAILING(); - antlr4::tree::TerminalNode *TRIM(); - antlr4::tree::TerminalNode *TRUNCATE(); - antlr4::tree::TerminalNode *TO(); - antlr4::tree::TerminalNode *TOP(); - antlr4::tree::TerminalNode *TTL(); - antlr4::tree::TerminalNode *TYPE(); - antlr4::tree::TerminalNode *UNION(); - antlr4::tree::TerminalNode *UPDATE(); - antlr4::tree::TerminalNode *USE(); - antlr4::tree::TerminalNode *USING(); - antlr4::tree::TerminalNode *UUID(); - antlr4::tree::TerminalNode *VALUES(); - antlr4::tree::TerminalNode *VIEW(); - antlr4::tree::TerminalNode *VOLUME(); - antlr4::tree::TerminalNode *WATCH(); - antlr4::tree::TerminalNode *WHEN(); - antlr4::tree::TerminalNode *WHERE(); - antlr4::tree::TerminalNode *WITH(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - KeywordContext* keyword(); - - class KeywordForAliasContext : public antlr4::ParserRuleContext { - public: - KeywordForAliasContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *DATE(); - antlr4::tree::TerminalNode *FIRST(); - antlr4::tree::TerminalNode *ID(); - antlr4::tree::TerminalNode *KEY(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - KeywordForAliasContext* keywordForAlias(); - - class AliasContext : public antlr4::ParserRuleContext { - public: - AliasContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *IDENTIFIER(); - KeywordForAliasContext *keywordForAlias(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - AliasContext* alias(); - - class IdentifierContext : public antlr4::ParserRuleContext { - public: - IdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *IDENTIFIER(); - IntervalContext *interval(); - KeywordContext *keyword(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - IdentifierContext* identifier(); - - class IdentifierOrNullContext : public antlr4::ParserRuleContext { - public: - IdentifierOrNullContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - IdentifierContext *identifier(); - antlr4::tree::TerminalNode *NULL_SQL(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - IdentifierOrNullContext* identifierOrNull(); - - class EnumValueContext : public antlr4::ParserRuleContext { - public: - EnumValueContext(antlr4::ParserRuleContext *parent, size_t invokingState); - virtual size_t getRuleIndex() const override; - antlr4::tree::TerminalNode *STRING_LITERAL(); - antlr4::tree::TerminalNode *EQ_SINGLE(); - NumberLiteralContext *numberLiteral(); - - virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override; - - }; - - EnumValueContext* enumValue(); - - - virtual bool sempred(antlr4::RuleContext *_localctx, size_t ruleIndex, size_t predicateIndex) override; - bool dictionaryAttrDfntSempred(DictionaryAttrDfntContext *_localctx, size_t predicateIndex); - bool dictionaryEngineClauseSempred(DictionaryEngineClauseContext *_localctx, size_t predicateIndex); - bool engineClauseSempred(EngineClauseContext *_localctx, size_t predicateIndex); - bool joinExprSempred(JoinExprContext *_localctx, size_t predicateIndex); - bool columnExprSempred(ColumnExprContext *_localctx, size_t predicateIndex); - bool tableExprSempred(TableExprContext *_localctx, size_t predicateIndex); - -private: - static std::vector _decisionToDFA; - static antlr4::atn::PredictionContextCache _sharedContextCache; - static std::vector _ruleNames; - static std::vector _tokenNames; - - static std::vector _literalNames; - static std::vector _symbolicNames; - static antlr4::dfa::Vocabulary _vocabulary; - static antlr4::atn::ATN _atn; - static std::vector _serializedATN; - - - struct Initializer { - Initializer(); - }; - static Initializer _init; -}; - -} // namespace DB diff --git a/src/Parsers/New/ClickHouseParserVisitor.cpp b/src/Parsers/New/ClickHouseParserVisitor.cpp deleted file mode 100644 index ad0990faef9..00000000000 --- a/src/Parsers/New/ClickHouseParserVisitor.cpp +++ /dev/null @@ -1,9 +0,0 @@ - -// Generated from ClickHouseParser.g4 by ANTLR 4.7.2 - - -#include "ClickHouseParserVisitor.h" - - -using namespace DB; - diff --git a/src/Parsers/New/ClickHouseParserVisitor.h b/src/Parsers/New/ClickHouseParserVisitor.h deleted file mode 100644 index 088fdd7f0ca..00000000000 --- a/src/Parsers/New/ClickHouseParserVisitor.h +++ /dev/null @@ -1,422 +0,0 @@ - -// Generated from ClickHouseParser.g4 by ANTLR 4.7.2 - -#pragma once - - -#include "antlr4-runtime.h" -#include "ClickHouseParser.h" - - -namespace DB { - -/** - * This class defines an abstract visitor for a parse tree - * produced by ClickHouseParser. - */ -class ClickHouseParserVisitor : public antlr4::tree::AbstractParseTreeVisitor { -public: - - /** - * Visit parse trees produced by ClickHouseParser. - */ - virtual antlrcpp::Any visitQueryStmt(ClickHouseParser::QueryStmtContext *context) = 0; - - virtual antlrcpp::Any visitQuery(ClickHouseParser::QueryContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableStmt(ClickHouseParser::AlterTableStmtContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseAddColumn(ClickHouseParser::AlterTableClauseAddColumnContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseAddIndex(ClickHouseParser::AlterTableClauseAddIndexContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDelete(ClickHouseParser::AlterTableClauseDeleteContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDetach(ClickHouseParser::AlterTableClauseDetachContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDropColumn(ClickHouseParser::AlterTableClauseDropColumnContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDropIndex(ClickHouseParser::AlterTableClauseDropIndexContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseFreezePartition(ClickHouseParser::AlterTableClauseFreezePartitionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModifyCodec(ClickHouseParser::AlterTableClauseModifyCodecContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModifyComment(ClickHouseParser::AlterTableClauseModifyCommentContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModifyRemove(ClickHouseParser::AlterTableClauseModifyRemoveContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModify(ClickHouseParser::AlterTableClauseModifyContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModifyOrderBy(ClickHouseParser::AlterTableClauseModifyOrderByContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseModifyTTL(ClickHouseParser::AlterTableClauseModifyTTLContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseMovePartition(ClickHouseParser::AlterTableClauseMovePartitionContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseRemoveTTL(ClickHouseParser::AlterTableClauseRemoveTTLContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseRename(ClickHouseParser::AlterTableClauseRenameContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseReplace(ClickHouseParser::AlterTableClauseReplaceContext *context) = 0; - - virtual antlrcpp::Any visitAlterTableClauseUpdate(ClickHouseParser::AlterTableClauseUpdateContext *context) = 0; - - virtual antlrcpp::Any visitAssignmentExprList(ClickHouseParser::AssignmentExprListContext *context) = 0; - - virtual antlrcpp::Any visitAssignmentExpr(ClickHouseParser::AssignmentExprContext *context) = 0; - - virtual antlrcpp::Any visitTableColumnPropertyType(ClickHouseParser::TableColumnPropertyTypeContext *context) = 0; - - virtual antlrcpp::Any visitPartitionClause(ClickHouseParser::PartitionClauseContext *context) = 0; - - virtual antlrcpp::Any visitAttachDictionaryStmt(ClickHouseParser::AttachDictionaryStmtContext *context) = 0; - - virtual antlrcpp::Any visitCheckStmt(ClickHouseParser::CheckStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateDatabaseStmt(ClickHouseParser::CreateDatabaseStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateDictionaryStmt(ClickHouseParser::CreateDictionaryStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateLiveViewStmt(ClickHouseParser::CreateLiveViewStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateMaterializedViewStmt(ClickHouseParser::CreateMaterializedViewStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateTableStmt(ClickHouseParser::CreateTableStmtContext *context) = 0; - - virtual antlrcpp::Any visitCreateViewStmt(ClickHouseParser::CreateViewStmtContext *context) = 0; - - virtual antlrcpp::Any visitDictionarySchemaClause(ClickHouseParser::DictionarySchemaClauseContext *context) = 0; - - virtual antlrcpp::Any visitDictionaryAttrDfnt(ClickHouseParser::DictionaryAttrDfntContext *context) = 0; - - virtual antlrcpp::Any visitDictionaryEngineClause(ClickHouseParser::DictionaryEngineClauseContext *context) = 0; - - virtual antlrcpp::Any visitDictionaryPrimaryKeyClause(ClickHouseParser::DictionaryPrimaryKeyClauseContext *context) = 0; - - virtual antlrcpp::Any visitDictionaryArgExpr(ClickHouseParser::DictionaryArgExprContext *context) = 0; - - virtual antlrcpp::Any visitSourceClause(ClickHouseParser::SourceClauseContext *context) = 0; - - virtual antlrcpp::Any visitLifetimeClause(ClickHouseParser::LifetimeClauseContext *context) = 0; - - virtual antlrcpp::Any visitLayoutClause(ClickHouseParser::LayoutClauseContext *context) = 0; - - virtual antlrcpp::Any visitRangeClause(ClickHouseParser::RangeClauseContext *context) = 0; - - virtual antlrcpp::Any visitDictionarySettingsClause(ClickHouseParser::DictionarySettingsClauseContext *context) = 0; - - virtual antlrcpp::Any visitClusterClause(ClickHouseParser::ClusterClauseContext *context) = 0; - - virtual antlrcpp::Any visitUuidClause(ClickHouseParser::UuidClauseContext *context) = 0; - - virtual antlrcpp::Any visitDestinationClause(ClickHouseParser::DestinationClauseContext *context) = 0; - - virtual antlrcpp::Any visitSubqueryClause(ClickHouseParser::SubqueryClauseContext *context) = 0; - - virtual antlrcpp::Any visitSchemaDescriptionClause(ClickHouseParser::SchemaDescriptionClauseContext *context) = 0; - - virtual antlrcpp::Any visitSchemaAsTableClause(ClickHouseParser::SchemaAsTableClauseContext *context) = 0; - - virtual antlrcpp::Any visitSchemaAsFunctionClause(ClickHouseParser::SchemaAsFunctionClauseContext *context) = 0; - - virtual antlrcpp::Any visitEngineClause(ClickHouseParser::EngineClauseContext *context) = 0; - - virtual antlrcpp::Any visitPartitionByClause(ClickHouseParser::PartitionByClauseContext *context) = 0; - - virtual antlrcpp::Any visitPrimaryKeyClause(ClickHouseParser::PrimaryKeyClauseContext *context) = 0; - - virtual antlrcpp::Any visitSampleByClause(ClickHouseParser::SampleByClauseContext *context) = 0; - - virtual antlrcpp::Any visitTtlClause(ClickHouseParser::TtlClauseContext *context) = 0; - - virtual antlrcpp::Any visitEngineExpr(ClickHouseParser::EngineExprContext *context) = 0; - - virtual antlrcpp::Any visitTableElementExprColumn(ClickHouseParser::TableElementExprColumnContext *context) = 0; - - virtual antlrcpp::Any visitTableElementExprConstraint(ClickHouseParser::TableElementExprConstraintContext *context) = 0; - - virtual antlrcpp::Any visitTableElementExprIndex(ClickHouseParser::TableElementExprIndexContext *context) = 0; - - virtual antlrcpp::Any visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext *context) = 0; - - virtual antlrcpp::Any visitTableColumnDfnt(ClickHouseParser::TableColumnDfntContext *context) = 0; - - virtual antlrcpp::Any visitTableColumnPropertyExpr(ClickHouseParser::TableColumnPropertyExprContext *context) = 0; - - virtual antlrcpp::Any visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext *context) = 0; - - virtual antlrcpp::Any visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext *context) = 0; - - virtual antlrcpp::Any visitCodecExpr(ClickHouseParser::CodecExprContext *context) = 0; - - virtual antlrcpp::Any visitCodecArgExpr(ClickHouseParser::CodecArgExprContext *context) = 0; - - virtual antlrcpp::Any visitTtlExpr(ClickHouseParser::TtlExprContext *context) = 0; - - virtual antlrcpp::Any visitDescribeStmt(ClickHouseParser::DescribeStmtContext *context) = 0; - - virtual antlrcpp::Any visitDropDatabaseStmt(ClickHouseParser::DropDatabaseStmtContext *context) = 0; - - virtual antlrcpp::Any visitDropTableStmt(ClickHouseParser::DropTableStmtContext *context) = 0; - - virtual antlrcpp::Any visitExistsDatabaseStmt(ClickHouseParser::ExistsDatabaseStmtContext *context) = 0; - - virtual antlrcpp::Any visitExistsTableStmt(ClickHouseParser::ExistsTableStmtContext *context) = 0; - - virtual antlrcpp::Any visitExplainASTStmt(ClickHouseParser::ExplainASTStmtContext *context) = 0; - - virtual antlrcpp::Any visitExplainSyntaxStmt(ClickHouseParser::ExplainSyntaxStmtContext *context) = 0; - - virtual antlrcpp::Any visitInsertStmt(ClickHouseParser::InsertStmtContext *context) = 0; - - virtual antlrcpp::Any visitColumnsClause(ClickHouseParser::ColumnsClauseContext *context) = 0; - - virtual antlrcpp::Any visitDataClauseFormat(ClickHouseParser::DataClauseFormatContext *context) = 0; - - virtual antlrcpp::Any visitDataClauseValues(ClickHouseParser::DataClauseValuesContext *context) = 0; - - virtual antlrcpp::Any visitDataClauseSelect(ClickHouseParser::DataClauseSelectContext *context) = 0; - - virtual antlrcpp::Any visitKillMutationStmt(ClickHouseParser::KillMutationStmtContext *context) = 0; - - virtual antlrcpp::Any visitOptimizeStmt(ClickHouseParser::OptimizeStmtContext *context) = 0; - - virtual antlrcpp::Any visitRenameStmt(ClickHouseParser::RenameStmtContext *context) = 0; - - virtual antlrcpp::Any visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext *context) = 0; - - virtual antlrcpp::Any visitSelectUnionStmt(ClickHouseParser::SelectUnionStmtContext *context) = 0; - - virtual antlrcpp::Any visitSelectStmtWithParens(ClickHouseParser::SelectStmtWithParensContext *context) = 0; - - virtual antlrcpp::Any visitSelectStmt(ClickHouseParser::SelectStmtContext *context) = 0; - - virtual antlrcpp::Any visitWithClause(ClickHouseParser::WithClauseContext *context) = 0; - - virtual antlrcpp::Any visitTopClause(ClickHouseParser::TopClauseContext *context) = 0; - - virtual antlrcpp::Any visitFromClause(ClickHouseParser::FromClauseContext *context) = 0; - - virtual antlrcpp::Any visitArrayJoinClause(ClickHouseParser::ArrayJoinClauseContext *context) = 0; - - virtual antlrcpp::Any visitPrewhereClause(ClickHouseParser::PrewhereClauseContext *context) = 0; - - virtual antlrcpp::Any visitWhereClause(ClickHouseParser::WhereClauseContext *context) = 0; - - virtual antlrcpp::Any visitGroupByClause(ClickHouseParser::GroupByClauseContext *context) = 0; - - virtual antlrcpp::Any visitHavingClause(ClickHouseParser::HavingClauseContext *context) = 0; - - virtual antlrcpp::Any visitOrderByClause(ClickHouseParser::OrderByClauseContext *context) = 0; - - virtual antlrcpp::Any visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *context) = 0; - - virtual antlrcpp::Any visitLimitByClause(ClickHouseParser::LimitByClauseContext *context) = 0; - - virtual antlrcpp::Any visitLimitClause(ClickHouseParser::LimitClauseContext *context) = 0; - - virtual antlrcpp::Any visitSettingsClause(ClickHouseParser::SettingsClauseContext *context) = 0; - - virtual antlrcpp::Any visitJoinExprOp(ClickHouseParser::JoinExprOpContext *context) = 0; - - virtual antlrcpp::Any visitJoinExprTable(ClickHouseParser::JoinExprTableContext *context) = 0; - - virtual antlrcpp::Any visitJoinExprParens(ClickHouseParser::JoinExprParensContext *context) = 0; - - virtual antlrcpp::Any visitJoinExprCrossOp(ClickHouseParser::JoinExprCrossOpContext *context) = 0; - - virtual antlrcpp::Any visitJoinOpInner(ClickHouseParser::JoinOpInnerContext *context) = 0; - - virtual antlrcpp::Any visitJoinOpLeftRight(ClickHouseParser::JoinOpLeftRightContext *context) = 0; - - virtual antlrcpp::Any visitJoinOpFull(ClickHouseParser::JoinOpFullContext *context) = 0; - - virtual antlrcpp::Any visitJoinOpCross(ClickHouseParser::JoinOpCrossContext *context) = 0; - - virtual antlrcpp::Any visitJoinConstraintClause(ClickHouseParser::JoinConstraintClauseContext *context) = 0; - - virtual antlrcpp::Any visitSampleClause(ClickHouseParser::SampleClauseContext *context) = 0; - - virtual antlrcpp::Any visitLimitExpr(ClickHouseParser::LimitExprContext *context) = 0; - - virtual antlrcpp::Any visitOrderExprList(ClickHouseParser::OrderExprListContext *context) = 0; - - virtual antlrcpp::Any visitOrderExpr(ClickHouseParser::OrderExprContext *context) = 0; - - virtual antlrcpp::Any visitRatioExpr(ClickHouseParser::RatioExprContext *context) = 0; - - virtual antlrcpp::Any visitSettingExprList(ClickHouseParser::SettingExprListContext *context) = 0; - - virtual antlrcpp::Any visitSettingExpr(ClickHouseParser::SettingExprContext *context) = 0; - - virtual antlrcpp::Any visitSetStmt(ClickHouseParser::SetStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowCreateDatabaseStmt(ClickHouseParser::ShowCreateDatabaseStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowCreateDictionaryStmt(ClickHouseParser::ShowCreateDictionaryStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowCreateTableStmt(ClickHouseParser::ShowCreateTableStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowDatabasesStmt(ClickHouseParser::ShowDatabasesStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowDictionariesStmt(ClickHouseParser::ShowDictionariesStmtContext *context) = 0; - - virtual antlrcpp::Any visitShowTablesStmt(ClickHouseParser::ShowTablesStmtContext *context) = 0; - - virtual antlrcpp::Any visitSystemStmt(ClickHouseParser::SystemStmtContext *context) = 0; - - virtual antlrcpp::Any visitTruncateStmt(ClickHouseParser::TruncateStmtContext *context) = 0; - - virtual antlrcpp::Any visitUseStmt(ClickHouseParser::UseStmtContext *context) = 0; - - virtual antlrcpp::Any visitWatchStmt(ClickHouseParser::WatchStmtContext *context) = 0; - - virtual antlrcpp::Any visitColumnTypeExprSimple(ClickHouseParser::ColumnTypeExprSimpleContext *context) = 0; - - virtual antlrcpp::Any visitColumnTypeExprNested(ClickHouseParser::ColumnTypeExprNestedContext *context) = 0; - - virtual antlrcpp::Any visitColumnTypeExprEnum(ClickHouseParser::ColumnTypeExprEnumContext *context) = 0; - - virtual antlrcpp::Any visitColumnTypeExprComplex(ClickHouseParser::ColumnTypeExprComplexContext *context) = 0; - - virtual antlrcpp::Any visitColumnTypeExprParam(ClickHouseParser::ColumnTypeExprParamContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprList(ClickHouseParser::ColumnExprListContext *context) = 0; - - virtual antlrcpp::Any visitColumnsExprAsterisk(ClickHouseParser::ColumnsExprAsteriskContext *context) = 0; - - virtual antlrcpp::Any visitColumnsExprSubquery(ClickHouseParser::ColumnsExprSubqueryContext *context) = 0; - - virtual antlrcpp::Any visitColumnsExprColumn(ClickHouseParser::ColumnsExprColumnContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprTernaryOp(ClickHouseParser::ColumnExprTernaryOpContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprAlias(ClickHouseParser::ColumnExprAliasContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprExtract(ClickHouseParser::ColumnExprExtractContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprNegate(ClickHouseParser::ColumnExprNegateContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprSubquery(ClickHouseParser::ColumnExprSubqueryContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprLiteral(ClickHouseParser::ColumnExprLiteralContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprArray(ClickHouseParser::ColumnExprArrayContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprSubstring(ClickHouseParser::ColumnExprSubstringContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprCast(ClickHouseParser::ColumnExprCastContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprOr(ClickHouseParser::ColumnExprOrContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprPrecedence1(ClickHouseParser::ColumnExprPrecedence1Context *context) = 0; - - virtual antlrcpp::Any visitColumnExprPrecedence2(ClickHouseParser::ColumnExprPrecedence2Context *context) = 0; - - virtual antlrcpp::Any visitColumnExprPrecedence3(ClickHouseParser::ColumnExprPrecedence3Context *context) = 0; - - virtual antlrcpp::Any visitColumnExprInterval(ClickHouseParser::ColumnExprIntervalContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprIsNull(ClickHouseParser::ColumnExprIsNullContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprTrim(ClickHouseParser::ColumnExprTrimContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprTuple(ClickHouseParser::ColumnExprTupleContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprArrayAccess(ClickHouseParser::ColumnExprArrayAccessContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprBetween(ClickHouseParser::ColumnExprBetweenContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprParens(ClickHouseParser::ColumnExprParensContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprTimestamp(ClickHouseParser::ColumnExprTimestampContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprAnd(ClickHouseParser::ColumnExprAndContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprTupleAccess(ClickHouseParser::ColumnExprTupleAccessContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprCase(ClickHouseParser::ColumnExprCaseContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprDate(ClickHouseParser::ColumnExprDateContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprNot(ClickHouseParser::ColumnExprNotContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprIdentifier(ClickHouseParser::ColumnExprIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprFunction(ClickHouseParser::ColumnExprFunctionContext *context) = 0; - - virtual antlrcpp::Any visitColumnExprAsterisk(ClickHouseParser::ColumnExprAsteriskContext *context) = 0; - - virtual antlrcpp::Any visitColumnArgList(ClickHouseParser::ColumnArgListContext *context) = 0; - - virtual antlrcpp::Any visitColumnArgExpr(ClickHouseParser::ColumnArgExprContext *context) = 0; - - virtual antlrcpp::Any visitColumnLambdaExpr(ClickHouseParser::ColumnLambdaExprContext *context) = 0; - - virtual antlrcpp::Any visitColumnIdentifier(ClickHouseParser::ColumnIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitNestedIdentifier(ClickHouseParser::NestedIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitTableExprIdentifier(ClickHouseParser::TableExprIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitTableExprSubquery(ClickHouseParser::TableExprSubqueryContext *context) = 0; - - virtual antlrcpp::Any visitTableExprAlias(ClickHouseParser::TableExprAliasContext *context) = 0; - - virtual antlrcpp::Any visitTableExprFunction(ClickHouseParser::TableExprFunctionContext *context) = 0; - - virtual antlrcpp::Any visitTableFunctionExpr(ClickHouseParser::TableFunctionExprContext *context) = 0; - - virtual antlrcpp::Any visitTableIdentifier(ClickHouseParser::TableIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitTableArgList(ClickHouseParser::TableArgListContext *context) = 0; - - virtual antlrcpp::Any visitTableArgExpr(ClickHouseParser::TableArgExprContext *context) = 0; - - virtual antlrcpp::Any visitDatabaseIdentifier(ClickHouseParser::DatabaseIdentifierContext *context) = 0; - - virtual antlrcpp::Any visitFloatingLiteral(ClickHouseParser::FloatingLiteralContext *context) = 0; - - virtual antlrcpp::Any visitNumberLiteral(ClickHouseParser::NumberLiteralContext *context) = 0; - - virtual antlrcpp::Any visitLiteral(ClickHouseParser::LiteralContext *context) = 0; - - virtual antlrcpp::Any visitInterval(ClickHouseParser::IntervalContext *context) = 0; - - virtual antlrcpp::Any visitKeyword(ClickHouseParser::KeywordContext *context) = 0; - - virtual antlrcpp::Any visitKeywordForAlias(ClickHouseParser::KeywordForAliasContext *context) = 0; - - virtual antlrcpp::Any visitAlias(ClickHouseParser::AliasContext *context) = 0; - - virtual antlrcpp::Any visitIdentifier(ClickHouseParser::IdentifierContext *context) = 0; - - virtual antlrcpp::Any visitIdentifierOrNull(ClickHouseParser::IdentifierOrNullContext *context) = 0; - - virtual antlrcpp::Any visitEnumValue(ClickHouseParser::EnumValueContext *context) = 0; - - -}; - -} // namespace DB diff --git a/src/Parsers/New/LexerErrorListener.cpp b/src/Parsers/New/LexerErrorListener.cpp deleted file mode 100644 index ed6dc358c52..00000000000 --- a/src/Parsers/New/LexerErrorListener.cpp +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include - -#include - - -using namespace antlr4; - -namespace DB -{ - -namespace ErrorCodes -{ - -extern int SYNTAX_ERROR; - -} - -void LexerErrorListener::syntaxError(Recognizer *, Token *, size_t, size_t, const std::string & message, std::exception_ptr) -{ - LOG_ERROR(&Poco::Logger::get("ClickHouseLexer"), "Lexer error: {}", message); - - throw DB::Exception("Can't recognize input: " + message, ErrorCodes::SYNTAX_ERROR); -} - -} diff --git a/src/Parsers/New/LexerErrorListener.h b/src/Parsers/New/LexerErrorListener.h deleted file mode 100644 index 62445ffb166..00000000000 --- a/src/Parsers/New/LexerErrorListener.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -class LexerErrorListener : public antlr4::BaseErrorListener -{ -public: - void syntaxError( - antlr4::Recognizer * recognizer, - antlr4::Token * offending_symbol, - size_t line, - size_t pos, - const std::string & message, - std::exception_ptr e) override; -}; - -} diff --git a/src/Parsers/New/ParseTreeVisitor.cpp b/src/Parsers/New/ParseTreeVisitor.cpp deleted file mode 100644 index a7c7a2758eb..00000000000 --- a/src/Parsers/New/ParseTreeVisitor.cpp +++ /dev/null @@ -1,150 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// Include last, because antlr-runtime undefines EOF macros, which is required in boost multiprecision numbers. -#include - -namespace DB -{ - -using namespace AST; - -antlrcpp::Any ParseTreeVisitor::visitQueryStmt(ClickHouseParser::QueryStmtContext *ctx) -{ - if (ctx->insertStmt()) return std::static_pointer_cast(visit(ctx->insertStmt()).as>()); - - auto query = visit(ctx->query()).as>(); - - if (ctx->OUTFILE()) query->setOutFile(Literal::createString(ctx->STRING_LITERAL())); - if (ctx->FORMAT()) query->setFormat(visit(ctx->identifierOrNull())); - - return query; -} - -antlrcpp::Any ParseTreeVisitor::visitQuery(ClickHouseParser::QueryContext *ctx) -{ - auto query = visit(ctx->children[0]); - -#define TRY_POINTER_CAST(TYPE) if (query.is>()) return std::static_pointer_cast(query.as>()); - TRY_POINTER_CAST(AlterTableQuery) - TRY_POINTER_CAST(AttachQuery) - TRY_POINTER_CAST(CheckQuery) - TRY_POINTER_CAST(CreateDatabaseQuery) - TRY_POINTER_CAST(CreateDictionaryQuery) - TRY_POINTER_CAST(CreateLiveViewQuery) - TRY_POINTER_CAST(CreateMaterializedViewQuery) - TRY_POINTER_CAST(CreateTableQuery) - TRY_POINTER_CAST(CreateViewQuery) - TRY_POINTER_CAST(DescribeQuery) - TRY_POINTER_CAST(DropQuery) - TRY_POINTER_CAST(ExistsQuery) - TRY_POINTER_CAST(ExplainQuery) - TRY_POINTER_CAST(KillQuery) - TRY_POINTER_CAST(OptimizeQuery) - TRY_POINTER_CAST(RenameQuery) - TRY_POINTER_CAST(SelectUnionQuery) - TRY_POINTER_CAST(SetQuery) - TRY_POINTER_CAST(ShowQuery) - TRY_POINTER_CAST(ShowCreateQuery) - TRY_POINTER_CAST(SystemQuery) - TRY_POINTER_CAST(TruncateQuery) - TRY_POINTER_CAST(UseQuery) - TRY_POINTER_CAST(WatchQuery) -#undef TRY_POINTER_CAST - - throw std::runtime_error("Query is unknown: " + ctx->children[0]->getText()); - - __builtin_unreachable(); -} - -antlrcpp::Any ParseTreeVisitor::visitShowDatabasesStmt(ClickHouseParser::ShowDatabasesStmtContext *) -{ - auto database_name = std::make_shared(nullptr, std::make_shared("name")); - auto expr_list = PtrTo(new ColumnExprList{ColumnExpr::createIdentifier(database_name)}); - auto select_stmt = std::make_shared(false, SelectStmt::ModifierType::NONE, false, expr_list); - - auto system = std::make_shared(std::make_shared("system")); - auto databases = std::make_shared(system, std::make_shared("databases")); - auto system_tables = JoinExpr::createTableExpr(TableExpr::createIdentifier(databases), nullptr, false); - - select_stmt->setFromClause(std::make_shared(system_tables)); - - return PtrTo( - new SelectUnionQuery(std::make_shared>(std::initializer_list>{select_stmt}))); -} - -antlrcpp::Any ParseTreeVisitor::visitShowTablesStmt(ClickHouseParser::ShowTablesStmtContext *ctx) -{ - // TODO: don't forget to convert TEMPORARY into 'is_temporary=1' condition. - - auto table_name = std::make_shared(nullptr, std::make_shared("name")); - auto expr_list = PtrTo(new ColumnExprList{ColumnExpr::createIdentifier(table_name)}); - auto select_stmt = std::make_shared(false, SelectStmt::ModifierType::NONE, false, expr_list); - - auto and_args = PtrTo(new ColumnExprList{ColumnExpr::createLiteral(Literal::createNumber("1"))}); - - auto current_database = ColumnExpr::createLiteral(Literal::createString(current_database_name)); - if (ctx->databaseIdentifier()) - { - current_database = ColumnExpr::createLiteral(Literal::createString(visit(ctx->databaseIdentifier()).as>()->getName())); - } - auto database = std::make_shared(nullptr, std::make_shared("database")); - auto equals_args = PtrTo(new ColumnExprList{ - ColumnExpr::createIdentifier(database), - current_database - }); - and_args->push(ColumnExpr::createFunction(std::make_shared("equals"), nullptr, equals_args)); - - if (ctx->LIKE()) - { - auto args = PtrTo(new ColumnExprList{ - ColumnExpr::createIdentifier(table_name), ColumnExpr::createLiteral(Literal::createString(ctx->STRING_LITERAL()))}); - and_args->push(ColumnExpr::createFunction(std::make_shared("like"), nullptr, args)); - } - else if (ctx->whereClause()) - and_args->push(visit(ctx->whereClause()->columnExpr())); - - auto system = std::make_shared(std::make_shared("system")); - auto tables = std::make_shared(system, std::make_shared("tables")); - auto system_tables = JoinExpr::createTableExpr(TableExpr::createIdentifier(tables), nullptr, false); - - select_stmt->setFromClause(std::make_shared(system_tables)); - select_stmt->setWhereClause( - std::make_shared(ColumnExpr::createFunction(std::make_shared("and"), nullptr, and_args))); - select_stmt->setLimitClause(ctx->limitClause() ? visit(ctx->limitClause()).as>() : nullptr); - - return PtrTo( - new SelectUnionQuery(std::make_shared>(std::initializer_list>{select_stmt}))); -} - -} diff --git a/src/Parsers/New/ParseTreeVisitor.h b/src/Parsers/New/ParseTreeVisitor.h deleted file mode 100644 index 35d5ae9b12e..00000000000 --- a/src/Parsers/New/ParseTreeVisitor.h +++ /dev/null @@ -1,304 +0,0 @@ -#pragma once - -#include - - -namespace DB { - -class ParseTreeVisitor : public ClickHouseParserVisitor -{ - const String & current_database_name; -public: - explicit ParseTreeVisitor(const String & database_name) : ClickHouseParserVisitor(), current_database_name(database_name) {} - virtual ~ParseTreeVisitor() override = default; - - // Top-level statements - antlrcpp::Any visitQueryStmt(ClickHouseParser::QueryStmtContext * ctx) override; - antlrcpp::Any visitQuery(ClickHouseParser::QueryContext * ctx) override; - - // AlterTableQuery - antlrcpp::Any visitAlterTableClauseAddColumn(ClickHouseParser::AlterTableClauseAddColumnContext * ctx) override; - antlrcpp::Any visitAlterTableClauseAddIndex(ClickHouseParser::AlterTableClauseAddIndexContext * ctx) override; - antlrcpp::Any visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext * ctx) override; - antlrcpp::Any visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext * ctx) override; - antlrcpp::Any visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext * ctx) override; - antlrcpp::Any visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDelete(ClickHouseParser::AlterTableClauseDeleteContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDetach(ClickHouseParser::AlterTableClauseDetachContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDropColumn(ClickHouseParser::AlterTableClauseDropColumnContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDropIndex(ClickHouseParser::AlterTableClauseDropIndexContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseFreezePartition(ClickHouseParser::AlterTableClauseFreezePartitionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext * ctx) override; - antlrcpp::Any visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModify(ClickHouseParser::AlterTableClauseModifyContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModifyCodec(ClickHouseParser::AlterTableClauseModifyCodecContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModifyComment(ClickHouseParser::AlterTableClauseModifyCommentContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModifyOrderBy(ClickHouseParser::AlterTableClauseModifyOrderByContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModifyRemove(ClickHouseParser::AlterTableClauseModifyRemoveContext * ctx) override; - antlrcpp::Any visitAlterTableClauseModifyTTL(ClickHouseParser::AlterTableClauseModifyTTLContext * ctx) override; - antlrcpp::Any visitAlterTableClauseMovePartition(ClickHouseParser::AlterTableClauseMovePartitionContext * ctx) override; - antlrcpp::Any visitAlterTableClauseRemoveTTL(ClickHouseParser::AlterTableClauseRemoveTTLContext * ctx) override; - antlrcpp::Any visitAlterTableClauseRename(ClickHouseParser::AlterTableClauseRenameContext * ctx) override; - antlrcpp::Any visitAlterTableClauseReplace(ClickHouseParser::AlterTableClauseReplaceContext * ctx) override; - antlrcpp::Any visitAlterTableClauseUpdate(ClickHouseParser::AlterTableClauseUpdateContext * ctx) override; - antlrcpp::Any visitAlterTableStmt(ClickHouseParser::AlterTableStmtContext * ctx) override; - antlrcpp::Any visitAssignmentExpr(ClickHouseParser::AssignmentExprContext * ctx) override; - antlrcpp::Any visitAssignmentExprList(ClickHouseParser::AssignmentExprListContext * ctx) override; - antlrcpp::Any visitTableColumnPropertyType(ClickHouseParser::TableColumnPropertyTypeContext * ctx) override; - - // AttachQuery - antlrcpp::Any visitAttachDictionaryStmt(ClickHouseParser::AttachDictionaryStmtContext * ctx) override; - - // CheckQuery - antlrcpp::Any visitCheckStmt(ClickHouseParser::CheckStmtContext * ctx) override; - - // ColumnExpr - antlrcpp::Any visitColumnExprAlias(ClickHouseParser::ColumnExprAliasContext * ctx) override; - antlrcpp::Any visitColumnExprAnd(ClickHouseParser::ColumnExprAndContext * ctx) override; - antlrcpp::Any visitColumnExprArray(ClickHouseParser::ColumnExprArrayContext * ctx) override; - antlrcpp::Any visitColumnExprArrayAccess(ClickHouseParser::ColumnExprArrayAccessContext * ctx) override; - antlrcpp::Any visitColumnExprAsterisk(ClickHouseParser::ColumnExprAsteriskContext * ctx) override; - antlrcpp::Any visitColumnExprBetween(ClickHouseParser::ColumnExprBetweenContext * ctx) override; - antlrcpp::Any visitColumnExprCase(ClickHouseParser::ColumnExprCaseContext * ctx) override; - antlrcpp::Any visitColumnExprCast(ClickHouseParser::ColumnExprCastContext * ctx) override; - antlrcpp::Any visitColumnExprDate(ClickHouseParser::ColumnExprDateContext * ctx) override; - antlrcpp::Any visitColumnExprExtract(ClickHouseParser::ColumnExprExtractContext * ctx) override; - antlrcpp::Any visitColumnExprFunction(ClickHouseParser::ColumnExprFunctionContext * ctx) override; - antlrcpp::Any visitColumnExprIdentifier(ClickHouseParser::ColumnExprIdentifierContext * ctx) override; - antlrcpp::Any visitColumnExprInterval(ClickHouseParser::ColumnExprIntervalContext * ctx) override; - antlrcpp::Any visitColumnExprIsNull(ClickHouseParser::ColumnExprIsNullContext * ctx) override; - antlrcpp::Any visitColumnExprList(ClickHouseParser::ColumnExprListContext * ctx) override; - antlrcpp::Any visitColumnExprLiteral(ClickHouseParser::ColumnExprLiteralContext * ctx) override; - antlrcpp::Any visitColumnExprNegate(ClickHouseParser::ColumnExprNegateContext * ctx) override; - antlrcpp::Any visitColumnExprNot(ClickHouseParser::ColumnExprNotContext * ctx) override; - antlrcpp::Any visitColumnExprOr(ClickHouseParser::ColumnExprOrContext * ctx) override; - antlrcpp::Any visitColumnExprParens(ClickHouseParser::ColumnExprParensContext * ctx) override; - antlrcpp::Any visitColumnExprPrecedence1(ClickHouseParser::ColumnExprPrecedence1Context * ctx) override; - antlrcpp::Any visitColumnExprPrecedence2(ClickHouseParser::ColumnExprPrecedence2Context * ctx) override; - antlrcpp::Any visitColumnExprPrecedence3(ClickHouseParser::ColumnExprPrecedence3Context * ctx) override; - antlrcpp::Any visitColumnExprSubquery(ClickHouseParser::ColumnExprSubqueryContext * ctx) override; - antlrcpp::Any visitColumnExprSubstring(ClickHouseParser::ColumnExprSubstringContext * ctx) override; - antlrcpp::Any visitColumnExprTernaryOp(ClickHouseParser::ColumnExprTernaryOpContext * ctx) override; - antlrcpp::Any visitColumnExprTimestamp(ClickHouseParser::ColumnExprTimestampContext * ctx) override; - antlrcpp::Any visitColumnExprTrim(ClickHouseParser::ColumnExprTrimContext * ctx) override; - antlrcpp::Any visitColumnExprTuple(ClickHouseParser::ColumnExprTupleContext * ctx) override; - antlrcpp::Any visitColumnExprTupleAccess(ClickHouseParser::ColumnExprTupleAccessContext * ctx) override; - - // ColumnTypeExpr - antlrcpp::Any visitColumnTypeExprSimple(ClickHouseParser::ColumnTypeExprSimpleContext * ctx) override; - antlrcpp::Any visitColumnTypeExprParam(ClickHouseParser::ColumnTypeExprParamContext * ctx) override; - antlrcpp::Any visitColumnTypeExprEnum(ClickHouseParser::ColumnTypeExprEnumContext * ctx) override; - antlrcpp::Any visitColumnTypeExprComplex(ClickHouseParser::ColumnTypeExprComplexContext * ctx) override; - antlrcpp::Any visitColumnTypeExprNested(ClickHouseParser::ColumnTypeExprNestedContext * ctx) override; - - // CreateDatabaseQuery - antlrcpp::Any visitCreateDatabaseStmt(ClickHouseParser::CreateDatabaseStmtContext * ctx) override; - - // CreateDictionaryQuery - antlrcpp::Any visitCreateDictionaryStmt(ClickHouseParser::CreateDictionaryStmtContext * ctx) override; - antlrcpp::Any visitDictionaryArgExpr(ClickHouseParser::DictionaryArgExprContext * ctx) override; - antlrcpp::Any visitDictionaryAttrDfnt(ClickHouseParser::DictionaryAttrDfntContext * ctx) override; - antlrcpp::Any visitDictionaryEngineClause(ClickHouseParser::DictionaryEngineClauseContext * ctx) override; - antlrcpp::Any visitDictionaryPrimaryKeyClause(ClickHouseParser::DictionaryPrimaryKeyClauseContext * ctx) override; - antlrcpp::Any visitDictionarySchemaClause(ClickHouseParser::DictionarySchemaClauseContext * ctx) override; - antlrcpp::Any visitDictionarySettingsClause(ClickHouseParser::DictionarySettingsClauseContext * ctx) override; - antlrcpp::Any visitLayoutClause(ClickHouseParser::LayoutClauseContext * ctx) override; - antlrcpp::Any visitLifetimeClause(ClickHouseParser::LifetimeClauseContext * ctx) override; - antlrcpp::Any visitRangeClause(ClickHouseParser::RangeClauseContext * ctx) override; - antlrcpp::Any visitSourceClause(ClickHouseParser::SourceClauseContext * ctx) override; - - // CreateLiveViewQuery - antlrcpp::Any visitCreateLiveViewStmt(ClickHouseParser::CreateLiveViewStmtContext * ctx) override; - - // CreateMaterializedViewQuery - antlrcpp::Any visitCreateMaterializedViewStmt(ClickHouseParser::CreateMaterializedViewStmtContext * ctx) override; - - // CreateTableQuery - antlrcpp::Any visitClusterClause(ClickHouseParser::ClusterClauseContext * ctx) override; - antlrcpp::Any visitCreateTableStmt(ClickHouseParser::CreateTableStmtContext * ctx) override; - antlrcpp::Any visitUuidClause(ClickHouseParser::UuidClauseContext * ctx) override; - - // CreateViewQuery - antlrcpp::Any visitCreateViewStmt(ClickHouseParser::CreateViewStmtContext * ctx) override; - - // DescribeQuery - antlrcpp::Any visitDescribeStmt(ClickHouseParser::DescribeStmtContext * ctx) override; - - // DropQuery - antlrcpp::Any visitDropDatabaseStmt(ClickHouseParser::DropDatabaseStmtContext * ctx) override; - antlrcpp::Any visitDropTableStmt(ClickHouseParser::DropTableStmtContext * ctx) override; - - // EngineExpr - antlrcpp::Any visitEngineClause(ClickHouseParser::EngineClauseContext * ctx) override; - antlrcpp::Any visitEngineExpr(ClickHouseParser::EngineExprContext * ctx) override; - antlrcpp::Any visitPartitionByClause(ClickHouseParser::PartitionByClauseContext * ctx) override; - antlrcpp::Any visitPrimaryKeyClause(ClickHouseParser::PrimaryKeyClauseContext * ctx) override; - antlrcpp::Any visitSampleByClause(ClickHouseParser::SampleByClauseContext * ctx) override; - antlrcpp::Any visitTtlClause(ClickHouseParser::TtlClauseContext * ctx) override; - antlrcpp::Any visitTtlExpr(ClickHouseParser::TtlExprContext * ctx) override; - - // ExistsQuery - antlrcpp::Any visitExistsTableStmt(ClickHouseParser::ExistsTableStmtContext * ctx) override; - antlrcpp::Any visitExistsDatabaseStmt(ClickHouseParser::ExistsDatabaseStmtContext * ctx) override; - - // ExplainQuery - antlrcpp::Any visitExplainASTStmt(ClickHouseParser::ExplainASTStmtContext * ctx) override; - antlrcpp::Any visitExplainSyntaxStmt(ClickHouseParser::ExplainSyntaxStmtContext * ctx) override; - - // Identifier - antlrcpp::Any visitTableIdentifier(ClickHouseParser::TableIdentifierContext * ctx) override; - - // InsertQuery - antlrcpp::Any visitColumnsClause(ClickHouseParser::ColumnsClauseContext * ctx) override; - antlrcpp::Any visitDataClauseFormat(ClickHouseParser::DataClauseFormatContext * ctx) override; - antlrcpp::Any visitDataClauseSelect(ClickHouseParser::DataClauseSelectContext * ctx) override; - antlrcpp::Any visitDataClauseValues(ClickHouseParser::DataClauseValuesContext * ctx) override; - antlrcpp::Any visitInsertStmt(ClickHouseParser::InsertStmtContext * ctx) override; - - // KillQuery - antlrcpp::Any visitKillMutationStmt(ClickHouseParser::KillMutationStmtContext * ctx) override; - - // OptimizeQuery - antlrcpp::Any visitOptimizeStmt(ClickHouseParser::OptimizeStmtContext * ctx) override; - - // RenameQuery - antlrcpp::Any visitRenameStmt(ClickHouseParser::RenameStmtContext * ctx) override; - - // SelectUnionQuery - antlrcpp::Any visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext * ctx) override; - antlrcpp::Any visitSelectStmt(ClickHouseParser::SelectStmtContext * ctx) override; - antlrcpp::Any visitSelectStmtWithParens(ClickHouseParser::SelectStmtWithParensContext * ctx) override; - antlrcpp::Any visitSelectUnionStmt(ClickHouseParser::SelectUnionStmtContext * ctx) override; - - // SetQuery - antlrcpp::Any visitSetStmt(ClickHouseParser::SetStmtContext * ctx) override; - - // ShowCreateQuery - antlrcpp::Any visitShowCreateDatabaseStmt(ClickHouseParser::ShowCreateDatabaseStmtContext * ctx) override; - antlrcpp::Any visitShowCreateDictionaryStmt(ClickHouseParser::ShowCreateDictionaryStmtContext * ctx) override; - antlrcpp::Any visitShowCreateTableStmt(ClickHouseParser::ShowCreateTableStmtContext * ctx) override; - - // ShowQuery - antlrcpp::Any visitShowDatabasesStmt(ClickHouseParser::ShowDatabasesStmtContext * ctx) override; - antlrcpp::Any visitShowDictionariesStmt(ClickHouseParser::ShowDictionariesStmtContext * ctx) override; - antlrcpp::Any visitShowTablesStmt(ClickHouseParser::ShowTablesStmtContext * ctx) override; - - // SystemQuery - antlrcpp::Any visitSystemStmt(ClickHouseParser::SystemStmtContext * ctx) override; - - // TableElementExpr - antlrcpp::Any visitCodecArgExpr(ClickHouseParser::CodecArgExprContext * ctx) override; - antlrcpp::Any visitCodecExpr(ClickHouseParser::CodecExprContext * ctx) override; - antlrcpp::Any visitTableColumnDfnt(ClickHouseParser::TableColumnDfntContext * ctx) override; - antlrcpp::Any visitTableColumnPropertyExpr(ClickHouseParser::TableColumnPropertyExprContext * ctx) override; - antlrcpp::Any visitTableElementExprColumn(ClickHouseParser::TableElementExprColumnContext * ctx) override; - antlrcpp::Any visitTableElementExprConstraint(ClickHouseParser::TableElementExprConstraintContext * ctx) override; - antlrcpp::Any visitTableElementExprIndex(ClickHouseParser::TableElementExprIndexContext * ctx) override; - antlrcpp::Any visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext * ctx) override; - antlrcpp::Any visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext * ctx) override; - antlrcpp::Any visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext * ctx) override; - - // TableExpr - antlrcpp::Any visitTableArgExpr(ClickHouseParser::TableArgExprContext * ctx) override; - antlrcpp::Any visitTableArgList(ClickHouseParser::TableArgListContext * ctx) override; - antlrcpp::Any visitTableExprAlias(ClickHouseParser::TableExprAliasContext * ctx) override; - antlrcpp::Any visitTableExprFunction(ClickHouseParser::TableExprFunctionContext * ctx) override; - antlrcpp::Any visitTableExprIdentifier(ClickHouseParser::TableExprIdentifierContext * ctx) override; - antlrcpp::Any visitTableExprSubquery(ClickHouseParser::TableExprSubqueryContext * ctx) override; - antlrcpp::Any visitTableFunctionExpr(ClickHouseParser::TableFunctionExprContext * ctx) override; - - // TruncateQuery - antlrcpp::Any visitTruncateStmt(ClickHouseParser::TruncateStmtContext * ctx) override; - - // UseQuery - antlrcpp::Any visitUseStmt(ClickHouseParser::UseStmtContext * ctx) override; - - // WatchQuery - antlrcpp::Any visitWatchStmt(ClickHouseParser::WatchStmtContext * ctx) override; - - // TODO: sort methods below this comment. - - // CREATE clauses - - antlrcpp::Any visitDestinationClause(ClickHouseParser::DestinationClauseContext *ctx) override; - antlrcpp::Any visitSchemaDescriptionClause(ClickHouseParser::SchemaDescriptionClauseContext *ctx) override; - antlrcpp::Any visitSchemaAsTableClause(ClickHouseParser::SchemaAsTableClauseContext *ctx) override; - antlrcpp::Any visitSchemaAsFunctionClause(ClickHouseParser::SchemaAsFunctionClauseContext *ctx) override; - antlrcpp::Any visitSubqueryClause(ClickHouseParser::SubqueryClauseContext *ctx) override; - - // OPTIMIZE clauses - - antlrcpp::Any visitPartitionClause(ClickHouseParser::PartitionClauseContext *ctx) override; // returns |PtrTo| - - // SELECT clauses - - antlrcpp::Any visitWithClause(ClickHouseParser::WithClauseContext *ctx) override; - antlrcpp::Any visitTopClause(ClickHouseParser::TopClauseContext * ctx) override; - antlrcpp::Any visitFromClause(ClickHouseParser::FromClauseContext *ctx) override; - antlrcpp::Any visitSampleClause(ClickHouseParser::SampleClauseContext *ctx) override; - antlrcpp::Any visitArrayJoinClause(ClickHouseParser::ArrayJoinClauseContext *ctx) override; - antlrcpp::Any visitPrewhereClause(ClickHouseParser::PrewhereClauseContext *ctx) override; - antlrcpp::Any visitWhereClause(ClickHouseParser::WhereClauseContext *ctx) override; - antlrcpp::Any visitGroupByClause(ClickHouseParser::GroupByClauseContext *ctx) override; - antlrcpp::Any visitHavingClause(ClickHouseParser::HavingClauseContext *ctx) override; - antlrcpp::Any visitOrderByClause(ClickHouseParser::OrderByClauseContext *ctx) override; - antlrcpp::Any visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *ctx) override; - antlrcpp::Any visitLimitByClause(ClickHouseParser::LimitByClauseContext *ctx) override; - antlrcpp::Any visitLimitClause(ClickHouseParser::LimitClauseContext *ctx) override; - antlrcpp::Any visitSettingsClause(ClickHouseParser::SettingsClauseContext *ctx) override; - - // SELECT expressions - - antlrcpp::Any visitRatioExpr(ClickHouseParser::RatioExprContext *ctx) override; - antlrcpp::Any visitOrderExprList(ClickHouseParser::OrderExprListContext *ctx) override; - antlrcpp::Any visitOrderExpr(ClickHouseParser::OrderExprContext *ctx) override; - antlrcpp::Any visitLimitExpr(ClickHouseParser::LimitExprContext *ctx) override; - antlrcpp::Any visitSettingExprList(ClickHouseParser::SettingExprListContext *ctx) override; - antlrcpp::Any visitSettingExpr(ClickHouseParser::SettingExprContext *ctx) override; - - // Join expressions (alphabetically) - - antlrcpp::Any visitJoinConstraintClause(ClickHouseParser::JoinConstraintClauseContext *ctx) override; - antlrcpp::Any visitJoinExprCrossOp(ClickHouseParser::JoinExprCrossOpContext *ctx) override; - antlrcpp::Any visitJoinExprOp(ClickHouseParser::JoinExprOpContext *ctx) override; - antlrcpp::Any visitJoinExprParens(ClickHouseParser::JoinExprParensContext *ctx) override; - antlrcpp::Any visitJoinExprTable(ClickHouseParser::JoinExprTableContext *ctx) override; - antlrcpp::Any visitJoinOpCross(ClickHouseParser::JoinOpCrossContext *ctx) override; - antlrcpp::Any visitJoinOpFull(ClickHouseParser::JoinOpFullContext *ctx) override; - antlrcpp::Any visitJoinOpInner(ClickHouseParser::JoinOpInnerContext *ctx) override; - antlrcpp::Any visitJoinOpLeftRight(ClickHouseParser::JoinOpLeftRightContext *ctx) override; - - // Column expressions (alphabetically) - - antlrcpp::Any visitColumnArgExpr(ClickHouseParser::ColumnArgExprContext *ctx) override; - antlrcpp::Any visitColumnArgList(ClickHouseParser::ColumnArgListContext *ctx) override; - antlrcpp::Any visitColumnIdentifier(ClickHouseParser::ColumnIdentifierContext *ctx) override; - antlrcpp::Any visitColumnLambdaExpr(ClickHouseParser::ColumnLambdaExprContext *ctx) override; - antlrcpp::Any visitColumnsExprAsterisk(ClickHouseParser::ColumnsExprAsteriskContext *ctx) override; - antlrcpp::Any visitColumnsExprColumn(ClickHouseParser::ColumnsExprColumnContext *ctx) override; - antlrcpp::Any visitColumnsExprSubquery(ClickHouseParser::ColumnsExprSubqueryContext *ctx) override; - antlrcpp::Any visitNestedIdentifier(ClickHouseParser::NestedIdentifierContext *ctx) override; - - // Database expressions - - antlrcpp::Any visitDatabaseIdentifier(ClickHouseParser::DatabaseIdentifierContext *ctx) override; - - // Basic expressions (alphabetically) - - antlrcpp::Any visitAlias(ClickHouseParser::AliasContext * ctx) override; - antlrcpp::Any visitEnumValue(ClickHouseParser::EnumValueContext *ctx) override; - antlrcpp::Any visitFloatingLiteral(ClickHouseParser::FloatingLiteralContext *ctx) override; - antlrcpp::Any visitIdentifier(ClickHouseParser::IdentifierContext *ctx) override; - antlrcpp::Any visitIdentifierOrNull(ClickHouseParser::IdentifierOrNullContext *ctx) override; - antlrcpp::Any visitInterval(ClickHouseParser::IntervalContext * ctx) override; - antlrcpp::Any visitKeyword(ClickHouseParser::KeywordContext *ctx) override; - antlrcpp::Any visitKeywordForAlias(ClickHouseParser::KeywordForAliasContext * ctx) override; - antlrcpp::Any visitLiteral(ClickHouseParser::LiteralContext *ctx) override; - antlrcpp::Any visitNumberLiteral(ClickHouseParser::NumberLiteralContext *ctx) override; -}; - -} diff --git a/src/Parsers/New/ParserErrorListener.cpp b/src/Parsers/New/ParserErrorListener.cpp deleted file mode 100644 index f6ac0f0c451..00000000000 --- a/src/Parsers/New/ParserErrorListener.cpp +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include - -#include - -#include - -#include - - -using namespace antlr4; - -namespace DB -{ - -namespace ErrorCodes -{ - -extern int SYNTAX_ERROR; - -} - -void ParserErrorListener::syntaxError( - Recognizer * recognizer, Token * token, size_t, size_t, const std::string & message, std::exception_ptr) -{ - auto * parser = dynamic_cast(recognizer); - assert(parser); - - LOG_ERROR(&Poco::Logger::get("ClickHouseParser"), //-V522 - "Last element parsed so far:\n" - "{}\n" - "Parser error: (pos {}) {}", parser->getRuleContext()->toStringTree(parser, true), token->getStartIndex(), message); - - throw DB::Exception("Can't parse input: " + message, ErrorCodes::SYNTAX_ERROR); -} - -} diff --git a/src/Parsers/New/ParserErrorListener.h b/src/Parsers/New/ParserErrorListener.h deleted file mode 100644 index 1a02ff01abe..00000000000 --- a/src/Parsers/New/ParserErrorListener.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -class ParserErrorListener : public antlr4::BaseErrorListener -{ -public: - void syntaxError( - antlr4::Recognizer * recognizer, - antlr4::Token * token, - size_t line, - size_t pos, - const std::string & message, - std::exception_ptr e) override; -}; - -} diff --git a/src/Parsers/New/parseQuery.cpp b/src/Parsers/New/parseQuery.cpp deleted file mode 100644 index c66772385ca..00000000000 --- a/src/Parsers/New/parseQuery.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace DB -{ - -using namespace antlr4; -using namespace AST; - -// For testing only -PtrTo parseQuery(const String & query, const String & current_database) -{ - ANTLRInputStream input(query); - ClickHouseLexer lexer(&input); - CommonTokenStream tokens(&lexer); - ClickHouseParser parser(&tokens); - LexerErrorListener lexer_error_listener; - ParserErrorListener parser_error_listener; - - lexer.removeErrorListeners(); - parser.removeErrorListeners(); - lexer.addErrorListener(&lexer_error_listener); - parser.addErrorListener(&parser_error_listener); - - ParseTreeVisitor visitor { current_database }; - - return visitor.visit(parser.queryStmt()); -} - -ASTPtr parseQuery(const char * begin, const char * end, size_t, size_t, const String & current_database) -{ - // TODO: do not ignore |max_parser_depth|. - - size_t size = end - begin; - std::strstreambuf buffer(begin, size); - std::wbuffer_convert> converter(&buffer); - std::wistream stream(&converter); - - UnbufferedCharStream input(stream, size); - ClickHouseLexer lexer(&input); - CommonTokenStream tokens(&lexer); - ClickHouseParser parser(&tokens); - LexerErrorListener lexer_error_listener; - ParserErrorListener parser_error_listener; - - lexer.removeErrorListeners(); - parser.removeErrorListeners(); - lexer.addErrorListener(&lexer_error_listener); - parser.addErrorListener(&parser_error_listener); - - ParseTreeVisitor visitor { current_database }; - - PtrTo new_ast = visitor.visit(parser.queryStmt()); - auto old_ast = new_ast->convertToOld(); - - if (const auto * insert = new_ast->as()) - { - auto * old_insert = old_ast->as(); - - old_insert->end = end; - if (insert->hasData()) - { - old_insert->data = begin + insert->getDataOffset(); - - // Data starts after the first newline, if there is one, or after all the whitespace characters, otherwise. - auto & data = old_insert->data; - while (data < end && (*data == ' ' || *data == '\t' || *data == '\f')) ++data; - if (data < end && *data == '\r') ++data; - if (data < end && *data == '\n') ++data; - } - - old_insert->data = (old_insert->data != end) ? old_insert->data : nullptr; - } - - return old_ast; -} - -} diff --git a/src/Parsers/New/parseQuery.h b/src/Parsers/New/parseQuery.h deleted file mode 100644 index 8d9c8efd337..00000000000 --- a/src/Parsers/New/parseQuery.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace DB -{ - -// Compatibility interface -AST::PtrTo parseQuery(const std::string & query, const String & current_database); -ASTPtr parseQuery(const char * begin, const char * end, size_t max_query_size, size_t max_parser_depth, const String & current_database); - -} diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 2efa6ec6fef..52a101728ea 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -393,18 +393,6 @@ "with_coverage": false } }, - "Functional stateless tests (ANTLR debug)": { - "required_build_properties": { - "compiler": "clang-11", - "package_type": "deb", - "build_type": "debug", - "sanitizer": "none", - "bundled": "bundled", - "splitted": "unsplitted", - "clang-tidy": "disable", - "with_coverage": false - } - }, "Stress test (address)": { "required_build_properties": { "compiler": "clang-11", diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 924e4017670..63624246190 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -647,7 +647,6 @@ class BuildFlags(): RELEASE = 'release-build' DATABASE_ORDINARY = 'database-ordinary' POLYMORPHIC_PARTS = 'polymorphic-parts' - ANTLR = 'antlr' DATABASE_REPLICATED = 'database-replicated' @@ -770,8 +769,6 @@ def main(args): Note: if you are using split build, you may have to specify -c option.") build_flags = collect_build_flags(args.client) - if args.antlr: - build_flags.append(BuildFlags.ANTLR) if args.replicated_database: build_flags.append(BuildFlags.DATABASE_REPLICATED) @@ -1086,8 +1083,6 @@ if __name__ == '__main__': parser.add_argument('--use-skip-list', action='store_true', default=False, help="Use skip list to skip tests if found") parser.add_argument('--db-engine', help='Database engine name') parser.add_argument('--replicated-database', action='store_true', default=False, help='Run tests with Replicated database engine') - - parser.add_argument('--antlr', action='store_true', default=False, dest='antlr', help='Use new ANTLR parser in tests') parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests') parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests') parser.add_argument('--skip', nargs='+', help="Skip these tests") @@ -1180,13 +1175,6 @@ if __name__ == '__main__': os.environ['CLICKHOUSE_URL_PARAMS'] += get_additional_client_options_url(args) - if args.antlr: - if 'CLICKHOUSE_CLIENT_OPT' in os.environ: - os.environ['CLICKHOUSE_CLIENT_OPT'] += ' --use_antlr_parser=1' - else: - os.environ['CLICKHOUSE_CLIENT_OPT'] = '--use_antlr_parser=1' - args.client += ' --use_antlr_parser=1' - if args.extract_from_config is None: if os.access(args.binary + '-extract-from-config', os.X_OK): args.extract_from_config = args.binary + '-extract-from-config' diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 8fb95741bab..829eb8dd547 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -181,351 +181,6 @@ "01508_partition_pruning_long", /// bug, shoud be fixed "01482_move_to_prewhere_and_cast" /// bug, shoud be fixed ], - "antlr": [ - "00011_array_join_alias", - "00186_very_long_arrays", - "00233_position_function_sql_comparibilty", - "00417_kill_query", - "00534_functions_bad_arguments12", - "00534_functions_bad_arguments2", - "00534_functions_bad_arguments4", - "00534_functions_bad_arguments9", - "00564_temporary_table_management", - "00600_replace_running_query", - "00626_replace_partition_from_table_zookeeper", - "00652_replicated_mutations_zookeeper", - "00687_top_and_offset", - "00746_sql_fuzzy", - "00763_create_query_as_table_engine_bug", - "00765_sql_compatibility_aliases", - "00825_protobuf_format_array_3dim", - "00825_protobuf_format_array_of_arrays", - "00825_protobuf_format_enum_mapping", - "00825_protobuf_format_map", - "00825_protobuf_format_nested_in_nested", - "00825_protobuf_format_nested_optional", - "00825_protobuf_format_no_length_delimiter", - "00825_protobuf_format_persons", - "00825_protobuf_format_splitted_nested", - "00825_protobuf_format_squares", - "00825_protobuf_format_table_default", - "00826_cross_to_inner_join", - "00834_not_between", - "00855_join_with_array_join", - "00909_kill_not_initialized_query", - "00938_template_input_format", - "00939_limit_by_offset", - "00943_materialize_index", - "00944_clear_index_in_partition", - "00952_input_function", - "00953_constraints_operations", - "00954_client_prepared_statements", - "00956_sensitive_data_masking", - "00969_columns_clause", - "00975_indices_mutation_replicated_zookeeper_long", - "00975_values_list", - "00976_system_stop_ttl_merges", - "00977_int_div", - "00978_table_function_values_alias", - "00980_merge_alter_settings", - "00980_zookeeper_merge_tree_alter_settings", - "00982_array_enumerate_uniq_ranked", - "00984_materialized_view_to_columns", - "00988_constraints_replication_zookeeper", - "00995_order_by_with_fill", - "01001_enums_in_in_section", - "01011_group_uniq_array_memsan", - "01011_test_create_as_skip_indices", - "01014_format_custom_separated", - "01015_attach_part", - "01015_database_bad_tables", - "01017_uniqCombined_memory_usage", - "01018_ddl_dictionaries_concurrent_requrests", /// Cannot parse ATTACH DICTIONARY IF NOT EXISTS - "01019_alter_materialized_view_atomic", - "01019_alter_materialized_view_consistent", - "01019_alter_materialized_view_query", - "01021_tuple_parser", - "01025_array_compact_generic", - "01030_limit_by_with_ties_error", - "01033_quota_dcl", - "01034_with_fill_and_push_down_predicate", - "01035_avg_weighted_long", - "01039_row_policy_dcl", - "01039_test_setting_parse", - "01042_system_reload_dictionary_reloads_completely", - "01045_dictionaries_restrictions", - "01053_ssd_dictionary", - "01055_compact_parts_1", - "01056_create_table_as", - "01066_bit_count", - "01070_materialize_ttl", - "01070_mutations_with_dependencies", - "01073_grant_and_revoke", - "01073_show_tables_not_like", - "01074_partial_revokes", - "01075_allowed_client_hosts", - "01083_expressions_in_engine_arguments", - "01085_regexp_input_format", - "01086_regexp_input_format_skip_unmatched", - "01089_alter_settings_old_format", - "01095_tpch_like_smoke", - "01107_atomic_db_detach_attach", - "01109_exchange_tables", - "01109_sc0rp10_string_hash_map_zero_bytes", - "01110_dictionary_layout_without_arguments", - "01114_database_atomic", - "01114_materialize_clear_index_compact_parts", - "01115_join_with_dictionary", - "01117_comma_and_others_join_mix", - "01125_dict_ddl_cannot_add_column", - "01130_in_memory_parts", - "01144_multiple_joins_rewriter_v2_and_lambdas", - "01144_multiword_data_types", - "01145_with_fill_const", - "01149_zookeeper_mutation_stuck_after_replace_partition", - "01150_ddl_guard_rwr", - "01153_attach_mv_uuid", - "01155_old_mutation_parts_to_do", - "01155_rename_move_materialized_view", - "01182_materialized_view_different_structure", - "01185_create_or_replace_table", - "01187_set_profile_as_setting", - "01188_attach_table_from_path", - "01190_full_attach_syntax", - "01191_rename_dictionary", - "01192_rename_database_zookeeper", - "01213_alter_rename_column", - "01232_untuple", - "01240_join_get_or_null", - "01244_optimize_distributed_group_by_sharding_key", - "01254_dict_load_after_detach_attach", - "01256_misspell_layout_name_podshumok", - "01257_dictionary_mismatch_types", - "01267_alter_default_key_columns_zookeeper", - "01268_mv_scalars", - "01269_create_with_null", - "01271_show_privileges", - "01272_offset_without_limit", - "01277_alter_rename_column_constraint_zookeeper", - "01278_min_insert_block_size_rows_for_materialized_views", - "01280_min_map_max_map", - "01280_null_in", - "01280_ssd_complex_key_dictionary", - "01280_ttl_where_group_by_negative", - "01280_ttl_where_group_by", - "01280_unicode_whitespaces_lexer", - "01292_create_user", - "01293_create_role", - "01293_pretty_max_value_width", - "01293_show_clusters", - "01293_show_settings", - "01294_create_settings_profile", - "01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long", - "01294_system_distributed_on_cluster", - "01295_create_row_policy", - "01296_create_row_policy_in_current_database", - "01297_create_quota", - "01308_row_policy_and_trivial_count_query", - "01317_no_password_in_command_line", - "01318_map_add_map_subtract", - "01322_any_input_optimize", - "01324_if_transform_strings_to_enum", - "01337_mysql_global_variables", - "01355_alter_column_with_order", - "01355_ilike", - "01373_is_zero_or_null", - "01374_if_nullable_filimonov", - "01378_alter_rename_with_ttl_zookeeper", - "01379_with_fill_several_columns", - "01397_in_bad_arguments", - "01412_mod_float", - "01415_table_function_view", - "01417_freeze_partition_verbose_zookeeper", - "01417_freeze_partition_verbose", - "01418_custom_settings", - "01419_merge_tree_settings_sanity_check", - "01430_modify_sample_by_zookeeper", - "01447_json_strings", - "01449_json_compact_strings", - "01451_detach_drop_part", - "01451_replicated_detach_drop_and_quorum", - "01451_replicated_detach_drop_part", - "01457_create_as_table_function_structure", - "01460_allow_dollar_and_number_in_identifier", - "01463_test_alter_live_view_refresh", - "01465_ttl_recompression", - "01470_columns_transformers", - "01470_columns_transformers2", - "01470_explain", - "01470_show_databases_like", - "01470_test_insert_select_asterisk", - "01475_read_subcolumns_2", - "01475_read_subcolumns_3", - "01475_read_subcolumns_storages", - "01475_read_subcolumns", - "01480_binary_operator_monotonicity", - "01491_nested_multiline_comments", - "01493_table_function_null", - "01495_subqueries_in_with_statement_2", - "01495_subqueries_in_with_statement_3", - "01495_subqueries_in_with_statement", - "01501_clickhouse_client_INSERT_exception", - "01504_compression_multiple_streams", - "01504_rocksdb", - "01506_ttl_same_with_order_by", - "01508_explain_header", - "01508_partition_pruning_long", - "01509_check_parallel_quorum_inserts_long", - "01509_dictionary_preallocate", - "01509_parallel_quorum_and_merge_long", - "01515_mv_and_array_join_optimisation_bag", - "01515_with_global_and_with_propagation", - "01516_create_table_primary_key", - "01517_drop_mv_with_inner_table", - "01523_client_local_queries_file_parameter", - "01523_interval_operator_support_string_literal", - "01525_select_with_offset_fetch_clause", - "01526_client_start_and_exit", - "01527_dist_sharding_key_dictGet_reload", - "01529_union_distinct_and_setting_union_default_mode", - "01530_drop_database_atomic_sync", - "01532_execute_merges_on_single_replica", - "01532_primary_key_without_order_by_zookeeper", - "01533_multiple_nested", - "01541_max_memory_usage_for_user_long", - "01551_mergetree_read_in_order_spread", - "01552_dict_fixedstring", - "01554_bloom_filter_index_big_integer_uuid", - "01556_explain_select_with_union_query", - "01561_aggregate_functions_of_key_with_join", - "01562_optimize_monotonous_functions_in_order_by", - "01568_window_functions_distributed", - "01571_window_functions", - "01576_alias_column_rewrite", - "01581_deduplicate_by_columns_local", - "01581_deduplicate_by_columns_replicated", - "01582_any_join_supertype", - "01582_distinct_optimization", - "01590_countSubstrings", - "01591_window_functions", - "01592_long_window_functions1", - "01592_window_functions", - "01593_insert_settings", - "01594_too_low_memory_limits", - "01596_setting_limit_offset", - "01600_log_queries_with_extensive_info", - "01600_quota_by_forwarded_ip", - "01601_detach_permanently", - "01602_show_create_view", - "01603_read_with_backoff_bug", - "01604_explain_ast_of_nonselect_query", - "01605_drop_settings_profile_while_assigned", - "01605_skip_idx_compact_parts", - "01606_git_import", - "01606_merge_from_wide_to_compact", - "01614_with_fill_with_limit", - "01622_multiple_ttls", - "01630_disallow_floating_point_as_partition_key", - "01632_max_partitions_to_read", - "01632_select_all_syntax", - "01638_div_mod_ambiguities", - "01642_if_nullable_regression", - "01643_system_suspend", - "01646_system_restart_replicas_smoke", - "01650_drop_part_and_deduplication_zookeeper", - "01650_fetch_patition_with_macro_in_zk_path", - "01651_lc_insert_tiny_log", - "01655_plan_optimizations", - "01656_test_query_log_factories_info", - "01658_values_ubsan", - "01663_quantile_weighted_overflow", - "01666_blns", - "01666_merge_tree_max_query_limit", - "01666_merge_tree_max_query_limit", - "01674_clickhouse_client_query_param_cte", - "01674_unicode_asan", - "01676_dictget_in_default_expression", - "01680_predicate_pushdown_union_distinct_subquery", - "01681_cache_dictionary_simple_key", - "01682_cache_dictionary_complex_key", - "01683_flat_dictionary", - "01684_ssd_cache_dictionary_simple_key", - "01685_ssd_cache_dictionary_complex_key", - "01686_rocksdb", - "01699_timezoneOffset", - "01702_bitmap_native_integers", - "01702_system_query_log", - "01710_projections", - "01711_cte_subquery_fix", - "01712_no_adaptive_granularity_vertical_merge", - "01715_table_function_view_fix", - "01720_dictionary_create_source_with_functions", - "01720_union_distinct_with_limit", - "01721_dictionary_decimal_p_s", - "01721_engine_file_truncate_on_insert", - "01730_distributed_group_by_no_merge_order_by_long", - "01732_explain_syntax_union_query", - "01732_union_and_union_all", - "01747_join_view_filter_dictionary", - "01748_dictionary_table_dot", - "01753_direct_dictionary_simple_key", - "01754_direct_dictionary_complex_key", - "01756_optimize_skip_unused_shards_rewrite_in", - "01757_optimize_skip_unused_shards_limit", - "01759_dictionary_unique_attribute_names", - "01760_polygon_dictionaries", - "01760_system_dictionaries", - "01763_long_ttl_group_by", - "01765_hashed_dictionary_simple_key", - "01766_hashed_dictionary_complex_key", - "01774_tuple_null_in", - "01778_hierarchical_dictionaries", - "01778_mmap_cache_infra", - "01780_clickhouse_dictionary_source_loop", - "01781_merge_tree_deduplication", - "01785_dictionary_element_count", - "01786_explain_merge_tree", - "01801_distinct_group_by_shard", - "01802_rank_corr_mann_whitney_over_window", - "01802_test_postgresql_protocol_with_row_policy", /// It cannot parse DROP ROW POLICY - "01818_move_partition_simple", - "01821_table_comment", - "01823_explain_json", - "01837_database_memory_ddl_dictionaries", - "01839_join_to_subqueries_rewriter_columns_matcher", - "01840_tupleElement_formatting_fuzzer", - "01851_fix_row_policy_empty_result", - "01851_hedged_connections_external_tables", - "01852_cast_operator_bad_cases", - "01852_cast_operator", - "01861_explain_pipeline", - "01868_order_by_fill_with_datetime64", - "01870_buffer_flush", - "01871_merge_tree_compile_expressions", - "01872_functions_to_subcolumns", - "01881_union_header_mismatch_bug", - "01883_subcolumns_distributed", - "01889_postgresql_protocol_null_fields", - "01889_check_row_policy_defined_using_user_function", - "01892_setting_limit_offset_distributed", - "01902_dictionary_array_type", - "01903_ssd_cache_dictionary_array_type", - "01905_to_json_string", - "01913_fix_column_transformer_replace_format", - "01913_if_int_decimal", - "01913_join_push_down_bug", - "01921_with_fill_with_totals", - "01924_argmax_bitmap_state", - "01913_replace_dictionary", - "01914_exchange_dictionaries", - "01915_create_or_replace_dictionary", - "01913_names_of_tuple_literal", - "01925_merge_prewhere_table", - "01932_null_valid_identifier", - "01934_constexpr_aggregate_function_parameters", - "01932_alter_index_with_order", - "01936_quantiles_cannot_return_null" - ], "parallel": [ /// Pessimistic list of tests which work badly in parallel. diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index bd6453e406b..a6bf2843e9a 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -12,7 +12,6 @@ include(../cmake/limit_jobs.cmake) # Utils used in package add_subdirectory (config-processor) add_subdirectory (report) -add_subdirectory (syntax-analyzer) # Not used in package if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) diff --git a/src/Parsers/New/ClickHouseLexer.g4 b/utils/antlr/ClickHouseLexer.g4 similarity index 100% rename from src/Parsers/New/ClickHouseLexer.g4 rename to utils/antlr/ClickHouseLexer.g4 diff --git a/src/Parsers/New/ClickHouseParser.g4 b/utils/antlr/ClickHouseParser.g4 similarity index 100% rename from src/Parsers/New/ClickHouseParser.g4 rename to utils/antlr/ClickHouseParser.g4 diff --git a/src/Parsers/New/README.md b/utils/antlr/README.md similarity index 100% rename from src/Parsers/New/README.md rename to utils/antlr/README.md diff --git a/utils/syntax-analyzer/CMakeLists.txt b/utils/syntax-analyzer/CMakeLists.txt deleted file mode 100644 index 77068f528be..00000000000 --- a/utils/syntax-analyzer/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -add_executable(syntax-analyzer main.cpp) - -target_link_libraries(syntax-analyzer PRIVATE clickhouse_parsers_new dbms) diff --git a/utils/syntax-analyzer/main.cpp b/utils/syntax-analyzer/main.cpp deleted file mode 100644 index cf264160407..00000000000 --- a/utils/syntax-analyzer/main.cpp +++ /dev/null @@ -1,63 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -using namespace DB; - -int main(int argc, const char **) -{ - if (argc > 1) - { - std::cerr << "No arguments needed. Reads query from input until EOF" << std::endl; - return 1; - } - - std::istreambuf_iterator begin(std::cin), end; - std::string query(begin, end); - - { - std::vector queries; - splitMultipartQuery(query, queries, 10000000, 10000); - for (const auto & q : queries) - { - std::cout << std::endl << "Query:" << std::endl; - std::cout << q << std::endl; - - ParserQuery parser(q.data() + q.size()); - ASTPtr orig_ast = parseQuery(parser, q, 10000000, 10000); - - std::cout << std::endl << "New AST:" << std::endl; - auto new_ast = parseQuery(q, ""); - new_ast->dump(); - - auto old_ast = new_ast->convertToOld(); - if (orig_ast) - { - std::cout << std::endl << "Original AST:" << std::endl; - WriteBufferFromOStream buf(std::cout, 1); - orig_ast->dumpTree(buf); - std::cout << std::endl << "Original query:" << std::endl; - orig_ast->format({buf, false}); - std::cout << std::endl; - } - if (old_ast) - { - std::cout << std::endl << "Converted AST:" << std::endl; - WriteBufferFromOStream buf(std::cout, 1); - old_ast->dumpTree(buf); - std::cout << std::endl << "Converted query:" << std::endl; - old_ast->format({buf, false}); - std::cout << std::endl; - } - } - } -} From 8a8e72b77f0fc3683477c9e43eb4d611a6e2555a Mon Sep 17 00:00:00 2001 From: kssenii Date: Sat, 3 Jul 2021 18:44:17 +0000 Subject: [PATCH 714/931] Update .reference --- tests/queries/0_stateless/01414_optimize_any_bug.reference | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/01414_optimize_any_bug.reference b/tests/queries/0_stateless/01414_optimize_any_bug.reference index 573541ac970..e69de29bb2d 100644 --- a/tests/queries/0_stateless/01414_optimize_any_bug.reference +++ b/tests/queries/0_stateless/01414_optimize_any_bug.reference @@ -1 +0,0 @@ -0 From 56d695cd121d03bc9d014056d383253ffc27c5f7 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 3 Jul 2021 22:10:22 +0300 Subject: [PATCH 715/931] Update Settings.h --- src/Core/Settings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index f9765f0278f..597f59bcb71 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -93,7 +93,7 @@ class IColumn; M(Bool, distributed_directory_monitor_split_batch_on_failure, false, "Should StorageDistributed DirectoryMonitors try to split batch into smaller in case of failures.", 0) \ \ M(Bool, optimize_move_to_prewhere, true, "Allows disabling WHERE to PREWHERE optimization in SELECT queries from MergeTree.", 0) \ - M(Bool, optimize_move_to_prewhere_if_final, false, "If query has `final`, optimization `move_to_prewhere` is enabled only if `optimize_move_to_prewhere` and `optimize_move_to_prewhere_if_final` are enabled", 0) \ + M(Bool, optimize_move_to_prewhere_if_final, false, "If query has `FINAL`, the optimization `move_to_prewhere` is not always correct and it is enabled only if both settings `optimize_move_to_prewhere` and `optimize_move_to_prewhere_if_final` are turned on", 0) \ \ M(UInt64, replication_alter_partitions_sync, 1, "Wait for actions to manipulate the partitions. 0 - do not wait, 1 - wait for execution only of itself, 2 - wait for everyone.", 0) \ M(UInt64, replication_alter_columns_timeout, 60, "Wait for actions to change the table structure within the specified number of seconds. 0 - wait unlimited time.", 0) \ From c762e2247d3248556996b8c262e5d790f9123c65 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 00:00:50 +0300 Subject: [PATCH 716/931] More instrumentation for network interaction: add counters for recv/send bytes; add gauges for recvs/sends. --- src/Common/CurrentMetrics.cpp | 2 ++ src/Common/ProfileEvents.cpp | 6 ++++-- src/IO/ReadBufferFromPocoSocket.cpp | 11 +++++++++++ src/IO/WriteBufferFromPocoSocket.cpp | 10 ++++++++++ 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 1e482361f85..e9fa13e11e6 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -30,6 +30,8 @@ M(OpenFileForWrite, "Number of files open for writing") \ M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \ M(Write, "Number of write (write, pwrite, io_getevents, etc.) syscalls in fly") \ + M(NetworkReceive, "Number of threads receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ + M(NetworkSend, "Number of threads sending data to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ M(SendScalars, "Number of connections that are sending data for scalars to remote servers.") \ M(SendExternalTables, "Number of connections that are sending data for external tables to remote servers. External tables are used to implement GLOBAL IN and GLOBAL JOIN operators with distributed subqueries.") \ M(QueryThread, "Number of query processing threads") \ diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 86f06f27455..dffe2239e62 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -49,8 +49,10 @@ M(CreatedReadBufferMMapFailed, "") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ M(DiskWriteElapsedMicroseconds, "Total time spent waiting for write syscall. This include writes to page cache.") \ - M(NetworkReceiveElapsedMicroseconds, "") \ - M(NetworkSendElapsedMicroseconds, "") \ + M(NetworkReceiveElapsedMicroseconds, "Total time spent waiting for data to receive or receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ + M(NetworkSendElapsedMicroseconds, "Total time spent waiting for data to send to network or sending data to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries..") \ + M(NetworkReceiveBytes, "Total number of bytes received from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ + M(NetworkSendBytes, "Total number of bytes send to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ M(ThrottlerSleepMicroseconds, "Total time a query was sleeping to conform the 'max_network_bandwidth' setting.") \ \ M(QueryMaskingRulesMatch, "Number of times query masking rules was successfully matched.") \ diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index e043764d280..5e8e41d0c3e 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -5,11 +5,19 @@ #include #include #include +#include +#include namespace ProfileEvents { extern const Event NetworkReceiveElapsedMicroseconds; + extern const Event NetworkReceiveBytes; +} + +namespace CurrentMetrics +{ + extern const Metric NetworkReceive; } @@ -31,6 +39,8 @@ bool ReadBufferFromPocoSocket::nextImpl() /// Add more details to exceptions. try { + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); + /// If async_callback is specified, and read will block, run async_callback and try again later. /// It is expected that file descriptor may be polled externally. /// Note that receive timeout is not checked here. External code should check it while polling. @@ -57,6 +67,7 @@ bool ReadBufferFromPocoSocket::nextImpl() /// NOTE: it is quite inaccurate on high loads since the thread could be replaced by another one ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); if (bytes_read) working_buffer.resize(bytes_read); diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index 78705857ec4..a0e4de4c831 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -6,11 +6,19 @@ #include #include #include +#include +#include namespace ProfileEvents { extern const Event NetworkSendElapsedMicroseconds; + extern const Event NetworkSendBytes; +} + +namespace CurrentMetrics +{ + extern const Metric NetworkSend; } @@ -40,6 +48,7 @@ void WriteBufferFromPocoSocket::nextImpl() /// Add more details to exceptions. try { + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkSend); res = socket.impl()->sendBytes(working_buffer.begin() + bytes_written, offset() - bytes_written); } catch (const Poco::Net::NetException & e) @@ -62,6 +71,7 @@ void WriteBufferFromPocoSocket::nextImpl() } ProfileEvents::increment(ProfileEvents::NetworkSendElapsedMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::NetworkSendBytes, bytes_written); } WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size) From 77879f68266dbea1bde4f8df7cbdd180f05a839d Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 01:53:51 +0300 Subject: [PATCH 717/931] Better --- src/Interpreters/InterpreterSelectQuery.cpp | 9 ++++----- src/Interpreters/InterpreterSelectQuery.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 612fcac2dd0..6710d95b822 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -506,7 +506,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( result_header = getSampleBlockImpl(); }; - analyze(settings.optimize_move_to_prewhere && moveToPrewhereIfFinal()); + analyze(shouldMoveToPrewhere()); bool need_analyze_again = false; if (analysis_result.prewhere_constant_filter_description.always_false || analysis_result.prewhere_constant_filter_description.always_true) @@ -1532,23 +1532,22 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan( } } -bool InterpreterSelectQuery::moveToPrewhereIfFinal() +bool InterpreterSelectQuery::shouldMoveToPrewhere() { const Settings & settings = context->getSettingsRef(); const ASTSelectQuery & query = getSelectQuery(); - return !query.final() || settings.optimize_move_to_prewhere_if_final; + return settings.optimize_move_to_prewhere && (!query.final() || settings.optimize_move_to_prewhere_if_final); } void InterpreterSelectQuery::addPrewhereAliasActions() { - const Settings & settings = context->getSettingsRef(); auto & expressions = analysis_result; if (expressions.filter_info) { if (!expressions.prewhere_info) { const bool does_storage_support_prewhere = !input && !input_pipe && storage && storage->supportsPrewhere(); - if (does_storage_support_prewhere && settings.optimize_move_to_prewhere && moveToPrewhereIfFinal()) + if (does_storage_support_prewhere && shouldMoveToPrewhere()) { /// Execute row level filter in prewhere as a part of "move to prewhere" optimization. expressions.prewhere_info = std::make_shared( diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index ae0bd5d5681..aec3b0b8bd3 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -118,7 +118,7 @@ private: ASTSelectQuery & getSelectQuery() { return query_ptr->as(); } void addPrewhereAliasActions(); - bool moveToPrewhereIfFinal(); + bool shouldMoveToPrewhere(); Block getSampleBlockImpl(); From 1960c717ed9bfe9288cfae0180f32112e900fe48 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 01:59:28 +0300 Subject: [PATCH 718/931] A couple of tests --- ...01939_network_receive_bytes_metrics.reference | 1 + .../01939_network_receive_bytes_metrics.sh | 16 ++++++++++++++++ .../01939_network_send_bytes_metrics.reference | 1 + .../01939_network_send_bytes_metrics.sh | 16 ++++++++++++++++ 4 files changed, 34 insertions(+) create mode 100644 tests/queries/0_stateless/01939_network_receive_bytes_metrics.reference create mode 100755 tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh create mode 100644 tests/queries/0_stateless/01939_network_send_bytes_metrics.reference create mode 100755 tests/queries/0_stateless/01939_network_send_bytes_metrics.sh diff --git a/tests/queries/0_stateless/01939_network_receive_bytes_metrics.reference b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh new file mode 100755 index 00000000000..03babad40f3 --- /dev/null +++ b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" + +seq 1 1000 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" + +${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; + WITH ProfileEvents['NetworkReceiveBytes'] AS bytes + SELECT bytes >= 8000 AND bytes < 9000 ? 1 : bytes FROM system.query_log + WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE t" diff --git a/tests/queries/0_stateless/01939_network_send_bytes_metrics.reference b/tests/queries/0_stateless/01939_network_send_bytes_metrics.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01939_network_send_bytes_metrics.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01939_network_send_bytes_metrics.sh b/tests/queries/0_stateless/01939_network_send_bytes_metrics.sh new file mode 100755 index 00000000000..e862a273de4 --- /dev/null +++ b/tests/queries/0_stateless/01939_network_send_bytes_metrics.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" + +${CLICKHOUSE_CLIENT} --query "SELECT number FROM numbers(1000)" > /dev/null + +${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; + WITH ProfileEvents['NetworkSendBytes'] AS bytes + SELECT bytes >= 8000 AND bytes < 9000 ? 1 : bytes FROM system.query_log + WHERE current_database = currentDatabase() AND query_kind = 'Select' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE t" From 7054010cac2e0b18452fd5ae765d85b48e007df5 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 4 Jul 2021 02:04:43 +0300 Subject: [PATCH 719/931] Update DataTypeMap.cpp --- src/DataTypes/DataTypeMap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeMap.cpp b/src/DataTypes/DataTypeMap.cpp index 5ee5c90c59e..8fd375aa86e 100644 --- a/src/DataTypes/DataTypeMap.cpp +++ b/src/DataTypes/DataTypeMap.cpp @@ -71,7 +71,7 @@ void DataTypeMap::assertKeyType() const if (type_error) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Type of Map key must be a type, that can be represented by integer or String (possibly LowCardinality(String)) or UUID," + "Type of Map key must be a type, that can be represented by integer or String or FixedString (possibly LowCardinality) or UUID," " but {} given", key_type->getName()); } From e80a700cff3fce96578bbea3a71d4e7ba8cd4d29 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 02:13:32 +0300 Subject: [PATCH 720/931] More direct way to start ClickHouse --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 58b1d18a681..a7fb956bf94 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -35,7 +35,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then # simpliest way to forward env variables to server sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon else - service clickhouse-server start + sudo clickhouse start fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then From e866faa975013f70eb9c455a0497d66c4c73c56b Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 02:20:36 +0300 Subject: [PATCH 721/931] Fix --- src/Interpreters/ExpressionAnalyzer.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index d4d0c0d0a9b..0897efe08fb 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -291,7 +291,8 @@ void ExpressionAnalyzer::analyzeAggregation() /// Constant expressions are already removed during first 'analyze' run. /// So for second `analyze` information is taken from select_query. - has_const_aggregation_keys = select_query->group_by_with_constant_keys; + if (select_query) + has_const_aggregation_keys = select_query->group_by_with_constant_keys; for (const auto & desc : aggregate_descriptions) aggregated_columns.emplace_back(desc.column_name, desc.function->getReturnType()); From 07693664413311f1a635cc8dd5298c53bce0fd8e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 04:19:19 +0300 Subject: [PATCH 722/931] Remove part of trash --- src/Common/DiskStatisticsOS.cpp | 67 ++++++++---------------- src/Common/DiskStatisticsOS.h | 16 +++--- src/Common/MemoryInfoOS.cpp | 33 ++++++------ src/Common/MemoryInfoOS.h | 6 --- src/Interpreters/AsynchronousMetrics.cpp | 12 +++-- 5 files changed, 50 insertions(+), 84 deletions(-) diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp index 69f15b30a9e..1b404be07fe 100644 --- a/src/Common/DiskStatisticsOS.cpp +++ b/src/Common/DiskStatisticsOS.cpp @@ -1,14 +1,12 @@ #if defined(OS_LINUX) -#include "DiskStatisticsOS.h" +#include +#include -#include - -#include - -#include +#include #include + namespace DB { @@ -17,61 +15,38 @@ namespace ErrorCodes extern const int CANNOT_STATVFS; } -namespace -{ - void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); - } -} - -static constexpr auto mounts_filename = "/proc/mounts"; - -static constexpr std::size_t READ_BUFFER_BUF_SIZE = (64 << 10); - -DiskStatisticsOS::DiskStatisticsOS() {} - -DiskStatisticsOS::~DiskStatisticsOS() {} DiskStatisticsOS::Data DiskStatisticsOS::get() { - ReadBufferFromFile mounts_in(mounts_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); + ReadBufferFromFile mounts_in("/proc/mounts", 4096 /* arbitrary small buffer */); - DiskStatisticsOS::Data data = {0, 0}; + Data data{}; + + std::string fs_device; + std::string fs_path; while (!mounts_in.eof()) { - String filesystem = readNextFilesystem(mounts_in); + readStringUntilWhitespace(fs_device, mounts_in); + skipWhitespaceIfAny(mounts_in); + readStringUntilWhitespace(fs_path, mounts_in); + skipWhitespaceIfAny(mounts_in); - struct statvfs stat; + /// Only real devices + if (!fs_device.starts_with("/dev/") || fs_device.starts_with("/dev/loop")) + continue; - if (statvfs(filesystem.c_str(), &stat)) - throwFromErrno("Cannot statvfs", ErrorCodes::CANNOT_STATVFS); + struct statvfs stat = getStatVFS(fs_path); - uint64_t total_blocks = static_cast(stat.f_blocks); - uint64_t free_blocks = static_cast(stat.f_bfree); - uint64_t used_blocks = total_blocks - free_blocks; - uint64_t block_size = static_cast(stat.f_bsize); - - data.total += total_blocks * block_size; - data.used += used_blocks * block_size; + data.total_bytes += (stat.f_blocks) * stat.f_bsize; + data.used_bytes += (stat.f_blocks - stat.f_bfree) * stat.f_bsize; + data.total_inodes += stat.f_files; + data.used_inodes += stat.f_files - stat.f_ffree; } return data; } -String DiskStatisticsOS::readNextFilesystem(ReadBuffer & mounts_in) -{ - String filesystem, unused; - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(unused, mounts_in); - readStringUntilWhitespace(filesystem, mounts_in); - skipToNextLineOrEOF(mounts_in); - - return filesystem; -} - } #endif diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h index d4ec2417924..390846e4b6c 100644 --- a/src/Common/DiskStatisticsOS.h +++ b/src/Common/DiskStatisticsOS.h @@ -5,11 +5,13 @@ #include -#include namespace DB { +class ReadBuffer; + + /** Opens file /proc/mounts, reads all mounted filesystems and * calculates disk usage. */ @@ -19,17 +21,13 @@ public: // In bytes struct Data { - uint64_t total; - uint64_t used; + uint64_t total_bytes; + uint64_t used_bytes; + uint64_t total_inodes; + uint64_t used_inodes; }; - DiskStatisticsOS(); - ~DiskStatisticsOS(); - Data get(); - -private: - String readNextFilesystem(ReadBuffer & mounts_in); }; } diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp index 301fcb6ad15..7b712a0bb06 100644 --- a/src/Common/MemoryInfoOS.cpp +++ b/src/Common/MemoryInfoOS.cpp @@ -28,15 +28,27 @@ namespace readStringUntilWhitespace(s, buf); skipWhitespaceIfAny(buf); } + + std::pair readField(ReadBuffer & meminfo_in) + { + String key; + uint64_t val; + + readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); + readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); + skipToNextLineOrEOF(meminfo_in); + + // Delete the read ":" from the end + key.pop_back(); + + return std::make_pair(key, val); + } } static constexpr auto meminfo_filename = "/proc/meminfo"; static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); -MemoryInfoOS::MemoryInfoOS() {} - -MemoryInfoOS::~MemoryInfoOS() {} MemoryInfoOS::Data MemoryInfoOS::get() { @@ -63,21 +75,6 @@ MemoryInfoOS::Data MemoryInfoOS::get() return data; } -std::pair MemoryInfoOS::readField(ReadBuffer & meminfo_in) -{ - String key; - uint64_t val; - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); - readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); - skipToNextLineOrEOF(meminfo_in); - - // Delete the read ":" from the end - key.pop_back(); - - return std::make_pair(key, val); -} - } #endif diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h index 63cda5b5c37..4390c9d5697 100644 --- a/src/Common/MemoryInfoOS.h +++ b/src/Common/MemoryInfoOS.h @@ -33,13 +33,7 @@ public: uint64_t swap_cached; }; - MemoryInfoOS(); - ~MemoryInfoOS(); - Data get(); - -private: - std::pair readField(ReadBuffer & meminfo_in); }; } diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 8a4cc508328..9d869899d6f 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -245,17 +245,17 @@ void AsynchronousMetrics::update() MemoryInfoOS::Data data = memory_info.get(); new_values["MemoryTotal"] = data.total; - new_values["MemoryFree"] = data.free; + new_values["MemoryFreeWithoutCached"] = data.free; new_values["MemoryBuffers"] = data.buffers; new_values["MemoryCached"] = data.cached; - new_values["MemoryFreeAndCached"] = data.free_and_cached; + new_values["MemoryFreeOrCached"] = data.free_and_cached; new_values["MemorySwapTotal"] = data.swap_total; new_values["MemorySwapFree"] = data.swap_free; new_values["MemorySwapCached"] = data.swap_cached; } #endif - /// Process processor usage according to OS + /// Process CPU usage according to OS #if defined(OS_LINUX) { ProcessorStatisticsOS::Data data = proc_stat.get(); @@ -288,8 +288,10 @@ void AsynchronousMetrics::update() { DiskStatisticsOS::Data data = disk_stat.get(); - new_values["DiskTotal"] = data.total; - new_values["DiskUsed"] = data.used; + new_values["FilesystemsTotalBytes"] = data.total_bytes; + new_values["FilesystemsUsedBytes"] = data.used_bytes; + new_values["FilesystemsTotalINodes"] = data.total_inodes; + new_values["FilesystemsUsedINodes"] = data.used_inodes; } #endif From 935e0327a52ab32440e9fa52ed196e9dec979065 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 06:03:49 +0300 Subject: [PATCH 723/931] Development --- src/IO/ReadBufferFromFileDescriptor.cpp | 16 ++++- src/IO/ReadBufferFromFileDescriptor.h | 3 + src/Interpreters/AsynchronousMetrics.cpp | 86 +++++++++++++++++++++--- src/Interpreters/AsynchronousMetrics.h | 62 +++++++++-------- 4 files changed, 123 insertions(+), 44 deletions(-) diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index babdc953514..893c2bcb5d8 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -149,7 +149,7 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) off_t res = ::lseek(fd, new_pos, SEEK_SET); if (-1 == res) throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); file_offset_of_buffer_end = new_pos; watch.stop(); @@ -160,6 +160,20 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) } +void ReadBufferFromFileDescriptor::rewind() +{ + ProfileEvents::increment(ProfileEvents::Seek); + off_t res = ::lseek(fd, 0, SEEK_SET); + if (-1 == res) + throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + + /// Clearing the buffer with existing data. New data will be read on subsequent call to 'next'. + working_buffer.resize(0); + pos = working_buffer.begin(); +} + + /// Assuming file descriptor supports 'select', check that we have data to read or wait until timeout. bool ReadBufferFromFileDescriptor::poll(size_t timeout_microseconds) { diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index bf22bb3d4a3..1883c6802bc 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -39,6 +39,9 @@ public: /// If 'offset' is small enough to stay in buffer after seek, then true seek in file does not happen. off_t seek(off_t off, int whence) override; + /// Seek to the beginning, discarding already read data if any. Useful to reread file that changes on every read. + void rewind(); + off_t size(); void setProgressCallback(ContextPtr context); diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 9d869899d6f..4e46cdc27f2 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,49 @@ namespace CurrentMetrics namespace DB { +static void openFileIfExists(const char * filename, std::optional & out) +{ + static constexpr size_t small_buffer_size = 4096; + + /// Ignoring time of check is not time of use cases, as procfs/sysfs files are fairly persistent. + + std::error_code ec; + if (std::filesystem::is_regular_file(filename, ec)) + out.emplace(filename, small_buffer_size); +} + + +AsynchronousMetrics::AsynchronousMetrics( + ContextPtr global_context_, + int update_period_seconds, + std::shared_ptr> servers_to_start_before_tables_, + std::shared_ptr> servers_) + : WithContext(global_context_) + , update_period(update_period_seconds) + , servers_to_start_before_tables(servers_to_start_before_tables_) + , servers(servers_) +{ +#if defined(OS_LINUX) + openFileIfExists("/proc/meminfo", meminfo); + openFileIfExists("/proc/mounts", mounts); + openFileIfExists("/proc/loadavg", loadavg); + openFileIfExists("/proc/stat", proc_stat); + openFileIfExists("/proc/cpuinfo", cpuinfo); + openFileIfExists("/proc/schedstat", schedstat); + openFileIfExists("/proc/sockstat", sockstat); + openFileIfExists("/proc/netstat", netstat); + openFileIfExists("/proc/sys/fs/file-nr", file_nr); +#endif +} + +void AsynchronousMetrics::start() +{ + /// Update once right now, to make metrics available just after server start + /// (without waiting for asynchronous_metrics_update_period_s). + update(); + thread = std::make_unique([this] { run(); }); +} + AsynchronousMetrics::~AsynchronousMetrics() { try @@ -206,7 +250,7 @@ void AsynchronousMetrics::update() new_values["Uptime"] = getContext()->getUptimeSeconds(); - /// Process memory usage according to OS + /// Process process memory usage according to OS #if defined(OS_LINUX) { MemoryStatisticsOS::Data data = memory_stat.get(); @@ -239,19 +283,39 @@ void AsynchronousMetrics::update() } #endif - /// Process memory information according to OS #if defined(OS_LINUX) + if (loadavg) { - MemoryInfoOS::Data data = memory_info.get(); + loadavg->rewind(); + + Float64 loadavg1 = 0; + Float64 loadavg5 = 0; + Float64 loadavg15 = 0; + UInt64 threads_runnable = 0; + UInt64 threads_total = 0; + + readText(loadavg1, *loadavg); + skipWhitespaceIfAny(*loadavg); + readText(loadavg5, *loadavg); + skipWhitespaceIfAny(*loadavg); + readText(loadavg15, *loadavg); + skipWhitespaceIfAny(*loadavg); + readText(threads_runnable, *loadavg); + assertChar('/', *loadavg); + readText(threads_total, *loadavg); + + new_values["LoadAverage1"] = loadavg1; + new_values["LoadAverage5"] = loadavg5; + new_values["LoadAverage15"] = loadavg15; + new_values["OSThreadsRunnable"] = threads_runnable; + new_values["OSThreadsTotal"] = threads_total; + } + + if (meminfo) + { + meminfo->rewind(); + - new_values["MemoryTotal"] = data.total; - new_values["MemoryFreeWithoutCached"] = data.free; - new_values["MemoryBuffers"] = data.buffers; - new_values["MemoryCached"] = data.cached; - new_values["MemoryFreeOrCached"] = data.free_and_cached; - new_values["MemorySwapTotal"] = data.swap_total; - new_values["MemorySwapFree"] = data.swap_free; - new_values["MemorySwapCached"] = data.swap_cached; } #endif diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 36e0fabd8a9..7bb281842dd 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -6,13 +6,16 @@ #include #include #include +#include #include #include #include #include +#include #include + namespace DB { @@ -29,6 +32,23 @@ using AsynchronousMetricValues = std::unordered_map> servers_to_start_before_tables_, + std::shared_ptr> servers_); + + ~AsynchronousMetrics(); + + /// Separate method allows to initialize the `servers` variable beforehand. + void start(); + + /// Returns copy of all values. + AsynchronousMetricValues getValues() const; + #if defined(ARCADIA_BUILD) /// This constructor needs only to provide backward compatibility with some other projects (hello, Arcadia). /// Never use this in the ClickHouse codebase. @@ -41,35 +61,6 @@ public: } #endif - /// The default value of update_period_seconds is for ClickHouse-over-YT - /// in Arcadia -- it uses its own server implementation that also uses these - /// metrics. - AsynchronousMetrics( - ContextPtr global_context_, - int update_period_seconds, - std::shared_ptr> servers_to_start_before_tables_, - std::shared_ptr> servers_) - : WithContext(global_context_) - , update_period(update_period_seconds) - , servers_to_start_before_tables(servers_to_start_before_tables_) - , servers(servers_) - { - } - - ~AsynchronousMetrics(); - - /// Separate method allows to initialize the `servers` variable beforehand. - void start() - { - /// Update once right now, to make metrics available just after server start - /// (without waiting for asynchronous_metrics_update_period_s). - update(); - thread = std::make_unique([this] { run(); }); - } - - /// Returns copy of all values. - AsynchronousMetricValues getValues() const; - private: const std::chrono::seconds update_period; std::shared_ptr> servers_to_start_before_tables{nullptr}; @@ -82,9 +73,16 @@ private: #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; - MemoryInfoOS memory_info; - ProcessorStatisticsOS proc_stat; - DiskStatisticsOS disk_stat; + + std::optional meminfo; + std::optional mounts; + std::optional loadavg; + std::optional proc_stat; + std::optional cpuinfo; + std::optional schedstat; + std::optional sockstat; + std::optional netstat; + std::optional file_nr; #endif std::unique_ptr thread; From 1a83c45f24b939dd5c436e580ae3988ef4e1e9b9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 06:04:42 +0300 Subject: [PATCH 724/931] Automatically create links in Web UI --- programs/server/play.html | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index 0c039097ce1..dbbfcb81572 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -44,6 +44,7 @@ --table-header-color: #F8F8F8; --table-hover-color: #FFF8EF; --null-color: #A88; + --link-color: #08F; } [data-theme="dark"] { @@ -61,6 +62,7 @@ --table-header-color: #102020; --table-hover-color: #003333; --null-color: #A88; + --link-color: #4BDAF7; } html, body @@ -275,6 +277,12 @@ font-size: 110%; color: #080; } + + a, a:visited + { + color: var(--link-color); + text-decoration: none; + } @@ -482,6 +490,7 @@ let cell = response.data[row_idx][col_idx]; let is_null = (cell === null); + let is_link = false; /// Test: SELECT number, toString(number) AS str, number % 2 ? number : NULL AS nullable, range(number) AS arr, CAST((['hello', 'world'], [number, number % 2]) AS Map(String, UInt64)) AS map FROM numbers(10) let text; @@ -491,9 +500,23 @@ text = JSON.stringify(cell); } else { text = cell; + + /// If it looks like URL, create a link. This is for convenience. + if (typeof(cell) == 'string' && cell.match(/^https?:\/\/\S+$/)) { + is_link = true; + } } - td.appendChild(document.createTextNode(text)); + let node = document.createTextNode(text); + if (is_link) { + let link = document.createElement('a'); + link.appendChild(node); + link.href = text; + link.setAttribute('target', '_blank'); + node = link; + } + + td.appendChild(node); td.className = column_classes[col_idx]; if (is_null) { td.className += ' null'; From 0bc58b7056a3313647a5cb7fdfcd694e1d7ab584 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 06:09:44 +0300 Subject: [PATCH 725/931] Better color --- programs/server/play.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index dbbfcb81572..066cd09d16a 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -44,7 +44,7 @@ --table-header-color: #F8F8F8; --table-hover-color: #FFF8EF; --null-color: #A88; - --link-color: #08F; + --link-color: #06D; } [data-theme="dark"] { From b39888f199607d45b725f29668d48735d576507a Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 08:54:41 +0000 Subject: [PATCH 726/931] Do not allow inconsistency in publication --- .../DatabaseMaterializedPostgreSQL.cpp | 16 +++- .../PostgreSQLReplicationHandler.cpp | 75 ++++++++++++++++--- .../PostgreSQL/PostgreSQLReplicationHandler.h | 2 +- .../test.py | 26 +------ 4 files changed, 83 insertions(+), 36 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 4cfb5a4d137..256affc68c8 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -29,6 +29,7 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; } DatabaseMaterializedPostgreSQL::DatabaseMaterializedPostgreSQL( @@ -63,7 +64,19 @@ void DatabaseMaterializedPostgreSQL::startSynchronization() settings->materialized_postgresql_tables_list.value); postgres::Connection connection(connection_info); - std::unordered_set tables_to_replicate = replication_handler->fetchRequiredTables(connection.getRef()); + NameSet tables_to_replicate; + try + { + tables_to_replicate = replication_handler->fetchRequiredTables(connection); + } + catch (...) + { + LOG_ERROR(log, "Unable to load replicated tables list"); + throw; + } + + if (tables_to_replicate.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty list of tables to replicate"); for (const auto & table_name : tables_to_replicate) { @@ -156,6 +169,7 @@ void DatabaseMaterializedPostgreSQL::createTable(ContextPtr local_context, const void DatabaseMaterializedPostgreSQL::shutdown() { stopReplication(); + DatabaseAtomic::shutdown(); } diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 1d8ab04cfec..67026d345eb 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -20,6 +20,7 @@ static const auto BACKOFF_TRESHOLD_MS = 10000; namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( @@ -434,25 +435,75 @@ void PostgreSQLReplicationHandler::shutdownFinal() /// Used by MaterializedPostgreSQL database engine. -NameSet PostgreSQLReplicationHandler::fetchRequiredTables(pqxx::connection & connection_) +NameSet PostgreSQLReplicationHandler::fetchRequiredTables(postgres::Connection & connection_) { - pqxx::work tx(connection_); - bool publication_exists = isPublicationExist(tx); + pqxx::work tx(connection_.getRef()); + bool publication_exists_before_startup = isPublicationExist(tx); NameSet result_tables; - if (tables_list.empty() && !publication_exists) + Strings expected_tables; + if (!tables_list.empty()) { - /// Fetch all tables list from database. Publication does not exist yet, which means - /// that no replication took place. Publication will be created in - /// startSynchronization method. - result_tables = fetchPostgreSQLTablesList(tx); + splitInto<','>(expected_tables, tables_list); + if (expected_tables.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse tables list: {}", tables_list); + for (auto & table_name : expected_tables) + boost::trim(table_name); + } + + if (publication_exists_before_startup) + { + if (tables_list.empty()) + { + /// There is no tables list, but publication already exists, then the expected behaviour + /// is to replicate the whole database. But it could be a server restart, so we can't drop it. + LOG_WARNING(log, + "Publication {} already exists and tables list is empty. Assuming publication is correct", + publication_name); + + result_tables = fetchPostgreSQLTablesList(tx); + } + /// Check tables list from publication is the same as expected tables list. + /// If not - drop publication and return expected tables list. + else + { + result_tables = fetchTablesFromPublication(tx); + NameSet diff; + std::set_symmetric_difference(expected_tables.begin(), expected_tables.end(), + result_tables.begin(), result_tables.end(), + std::inserter(diff, diff.begin())); + if (!diff.empty()) + { + String diff_tables; + for (const auto & table_name : diff) + { + if (!diff_tables.empty()) + diff_tables += ", "; + diff_tables += table_name; + } + + LOG_WARNING(log, + "Publication {} already exists, but specified tables list differs from publication tables list in tables: {}", + publication_name, diff_tables); + + connection->execWithRetry([&](pqxx::nontransaction & tx_){ dropPublication(tx_); }); + } + } } else { - if (!publication_exists) - createPublicationIfNeeded(tx, /* create_without_check = */ true); - - result_tables = fetchTablesFromPublication(tx); + if (!tables_list.empty()) + { + tx.commit(); + return NameSet(expected_tables.begin(), expected_tables.end()); + } + else + { + /// Fetch all tables list from database. Publication does not exist yet, which means + /// that no replication took place. Publication will be created in + /// startSynchronization method. + result_tables = fetchPostgreSQLTablesList(tx); + } } tx.commit(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 6ae9ec31626..4b6321338b8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -42,7 +42,7 @@ public: void addStorage(const std::string & table_name, StorageMaterializedPostgreSQL * storage); /// Fetch list of tables which are going to be replicated. Used for database engine. - NameSet fetchRequiredTables(pqxx::connection & connection_); + NameSet fetchRequiredTables(postgres::Connection & connection_); /// Start replication setup immediately. void startSynchronization(bool throw_on_error); diff --git a/tests/integration/test_postgresql_replica_database_engine/test.py b/tests/integration/test_postgresql_replica_database_engine/test.py index 2d8689f31e8..3a6bb29585e 100644 --- a/tests/integration/test_postgresql_replica_database_engine/test.py +++ b/tests/integration/test_postgresql_replica_database_engine/test.py @@ -72,11 +72,8 @@ def create_materialized_db(ip, port, instance.query(create_query) assert materialized_database in instance.query('SHOW DATABASES') -def drop_materialized_db(materialized_database='test_database', sync=False): - if sync: - instance.query('DROP DATABASE IF EXISTS {} SYNC'.format(materialized_database)) - else: - instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) +def drop_materialized_db(materialized_database='test_database'): + instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) assert materialized_database not in instance.query('SHOW DATABASES') def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): @@ -108,7 +105,6 @@ queries = [ ] -@pytest.mark.timeout(30) def assert_nested_table_is_created(table_name, materialized_database='test_database'): database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database)) while table_name not in database_tables: @@ -117,7 +113,6 @@ def assert_nested_table_is_created(table_name, materialized_database='test_datab assert(table_name in database_tables) -@pytest.mark.timeout(30) def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database'): assert_nested_table_is_created(table_name, materialized_database) @@ -149,9 +144,8 @@ def started_cluster(): cluster.shutdown() -@pytest.mark.timeout(120) def test_load_and_sync_all_database_tables(started_cluster): - drop_materialized_db(sync=True) + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -177,7 +171,6 @@ def test_load_and_sync_all_database_tables(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_replicating_dml(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -221,7 +214,6 @@ def test_replicating_dml(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_different_data_types(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -307,9 +299,8 @@ def test_different_data_types(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_load_and_sync_subset_of_database_tables(started_cluster): - drop_materialized_db(sync=True) + drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) @@ -358,7 +349,6 @@ def test_load_and_sync_subset_of_database_tables(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_changing_replica_identity_value(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -378,7 +368,6 @@ def test_changing_replica_identity_value(started_cluster): drop_materialized_db() -@pytest.mark.timeout(320) def test_clickhouse_restart(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -407,7 +396,6 @@ def test_clickhouse_restart(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_replica_identity_index(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -436,7 +424,6 @@ def test_replica_identity_index(started_cluster): drop_materialized_db() -@pytest.mark.timeout(320) def test_table_schema_changes(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -492,7 +479,6 @@ def test_table_schema_changes(started_cluster): instance.query("DROP DATABASE test_database") -@pytest.mark.timeout(120) def test_many_concurrent_queries(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -572,7 +558,6 @@ def test_many_concurrent_queries(started_cluster): drop_materialized_db() -@pytest.mark.timeout(120) def test_single_transaction(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -692,7 +677,6 @@ def test_multiple_databases(started_cluster): drop_materialized_db('test_database_2') -@pytest.mark.timeout(320) def test_concurrent_transactions(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -737,7 +721,6 @@ def test_concurrent_transactions(started_cluster): drop_materialized_db() -@pytest.mark.timeout(320) def test_abrupt_connection_loss_while_heavy_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, @@ -839,7 +822,6 @@ def test_restart_server_while_replication_startup_not_finished(started_cluster): drop_materialized_db() -@pytest.mark.timeout(320) def test_abrupt_server_restart_while_heavy_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, From 520c4a8f8a0ed65649fce49d9eea99af657a584c Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 12:10:16 +0300 Subject: [PATCH 727/931] Fix according to review --- src/Functions/FunctionSQLJSON.h | 4 +++- src/Functions/JSONPath/CMakeLists.txt | 4 ++-- src/Functions/JSONPath/Generator/CMakeLists.txt | 8 ++++++++ .../{Generators => Generator}/GeneratorJSONPath.h | 12 ++++++------ .../JSONPath/{Generators => Generator}/IGenerator.h | 4 ++-- .../{Generators => Generator}/IGenerator_fwd.h | 2 +- .../JSONPath/{Generators => Generator}/IVisitor.h | 2 +- .../VisitorJSONPathMemberAccess.h | 8 +++----- .../{Generators => Generator}/VisitorJSONPathRange.h | 4 ++-- .../{Generators => Generator}/VisitorJSONPathRoot.h | 4 ++-- .../{Generators => Generator}/VisitorJSONPathStar.h | 4 ++-- .../{Generators => Generator}/VisitorStatus.h | 0 src/Functions/JSONPath/Generators/CMakeLists.txt | 8 -------- .../JSONPath/Parsers/ParserJSONPathMemberAccess.cpp | 6 +----- src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h | 5 +---- .../0_stateless/01889_sql_json_functions.reference | 1 + .../queries/0_stateless/01889_sql_json_functions.sql | 1 + 17 files changed, 36 insertions(+), 41 deletions(-) create mode 100644 src/Functions/JSONPath/Generator/CMakeLists.txt rename src/Functions/JSONPath/{Generators => Generator}/GeneratorJSONPath.h (90%) rename src/Functions/JSONPath/{Generators => Generator}/IGenerator.h (81%) rename src/Functions/JSONPath/{Generators => Generator}/IGenerator_fwd.h (83%) rename src/Functions/JSONPath/{Generators => Generator}/IVisitor.h (94%) rename src/Functions/JSONPath/{Generators => Generator}/VisitorJSONPathMemberAccess.h (87%) rename src/Functions/JSONPath/{Generators => Generator}/VisitorJSONPathRange.h (94%) rename src/Functions/JSONPath/{Generators => Generator}/VisitorJSONPathRoot.h (87%) rename src/Functions/JSONPath/{Generators => Generator}/VisitorJSONPathStar.h (92%) rename src/Functions/JSONPath/{Generators => Generator}/VisitorStatus.h (100%) delete mode 100644 src/Functions/JSONPath/Generators/CMakeLists.txt diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index a6024a27e95..9e469c4ebac 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -257,6 +257,8 @@ public: else if (status == VisitorStatus::Error) { /// ON ERROR + /// Here it is possible to handle errors with ON ERROR (as described in ISO/IEC TR 19075-6), + /// however this functionality is not implemented yet } current_element = root; } diff --git a/src/Functions/JSONPath/CMakeLists.txt b/src/Functions/JSONPath/CMakeLists.txt index 8e65f7c8c6d..a1f5bf9bf2c 100644 --- a/src/Functions/JSONPath/CMakeLists.txt +++ b/src/Functions/JSONPath/CMakeLists.txt @@ -1,8 +1,8 @@ add_subdirectory(ASTs) target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_asts) -add_subdirectory(Generators) -target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_generators) +add_subdirectory(Generator) +target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_generator) add_subdirectory(Parsers) target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_jsonpath_parsers) diff --git a/src/Functions/JSONPath/Generator/CMakeLists.txt b/src/Functions/JSONPath/Generator/CMakeLists.txt new file mode 100644 index 00000000000..11215ba1078 --- /dev/null +++ b/src/Functions/JSONPath/Generator/CMakeLists.txt @@ -0,0 +1,8 @@ +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") +add_headers_and_sources(clickhouse_functions_jsonpath_generator .) +add_library(clickhouse_functions_jsonpath_generator ${clickhouse_functions_jsonpath_generator_sources} ${clickhouse_functions_jsonpath_generator_headers}) +target_link_libraries(clickhouse_functions_jsonpath_generator PRIVATE dbms) + +if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) + target_compile_options(clickhouse_functions_jsonpath_generator PRIVATE "-g0") +endif() diff --git a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h b/src/Functions/JSONPath/Generator/GeneratorJSONPath.h similarity index 90% rename from src/Functions/JSONPath/Generators/GeneratorJSONPath.h rename to src/Functions/JSONPath/Generator/GeneratorJSONPath.h index b918ceac003..291150f6df4 100644 --- a/src/Functions/JSONPath/Generators/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generator/GeneratorJSONPath.h @@ -1,11 +1,11 @@ #pragma once -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include diff --git a/src/Functions/JSONPath/Generators/IGenerator.h b/src/Functions/JSONPath/Generator/IGenerator.h similarity index 81% rename from src/Functions/JSONPath/Generators/IGenerator.h rename to src/Functions/JSONPath/Generator/IGenerator.h index d2cef9fe27b..323145e07e1 100644 --- a/src/Functions/JSONPath/Generators/IGenerator.h +++ b/src/Functions/JSONPath/Generator/IGenerator.h @@ -1,7 +1,7 @@ #pragma once -#include -#include +#include +#include #include namespace DB diff --git a/src/Functions/JSONPath/Generators/IGenerator_fwd.h b/src/Functions/JSONPath/Generator/IGenerator_fwd.h similarity index 83% rename from src/Functions/JSONPath/Generators/IGenerator_fwd.h rename to src/Functions/JSONPath/Generator/IGenerator_fwd.h index 57ed04d0f6f..bb5f64cd6f9 100644 --- a/src/Functions/JSONPath/Generators/IGenerator_fwd.h +++ b/src/Functions/JSONPath/Generator/IGenerator_fwd.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB { diff --git a/src/Functions/JSONPath/Generators/IVisitor.h b/src/Functions/JSONPath/Generator/IVisitor.h similarity index 94% rename from src/Functions/JSONPath/Generators/IVisitor.h rename to src/Functions/JSONPath/Generator/IVisitor.h index 1461b842829..1a94106a435 100644 --- a/src/Functions/JSONPath/Generators/IVisitor.h +++ b/src/Functions/JSONPath/Generator/IVisitor.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB { diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h b/src/Functions/JSONPath/Generator/VisitorJSONPathMemberAccess.h similarity index 87% rename from src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h rename to src/Functions/JSONPath/Generator/VisitorJSONPathMemberAccess.h index b0c601458b6..5fe35e75a84 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathMemberAccess.h +++ b/src/Functions/JSONPath/Generator/VisitorJSONPathMemberAccess.h @@ -1,8 +1,8 @@ #pragma once #include -#include -#include +#include +#include namespace DB { @@ -25,19 +25,17 @@ public: VisitorStatus visit(typename JSONParser::Element & element) override { + this->setExhausted(true); if (!element.isObject()) { - this->setExhausted(true); return VisitorStatus::Error; } typename JSONParser::Element result; if (!element.getObject().find(std::string_view(member_access_ptr->member_name), result)) { - this->setExhausted(true); return VisitorStatus::Error; } apply(element); - this->setExhausted(true); return VisitorStatus::Ok; } diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h b/src/Functions/JSONPath/Generator/VisitorJSONPathRange.h similarity index 94% rename from src/Functions/JSONPath/Generators/VisitorJSONPathRange.h rename to src/Functions/JSONPath/Generator/VisitorJSONPathRange.h index 57e208271d0..40d4f6ad95e 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathRange.h +++ b/src/Functions/JSONPath/Generator/VisitorJSONPathRange.h @@ -1,8 +1,8 @@ #pragma once #include -#include -#include +#include +#include namespace DB { diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h b/src/Functions/JSONPath/Generator/VisitorJSONPathRoot.h similarity index 87% rename from src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h rename to src/Functions/JSONPath/Generator/VisitorJSONPathRoot.h index d8b88ce0255..5c48c12782f 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathRoot.h +++ b/src/Functions/JSONPath/Generator/VisitorJSONPathRoot.h @@ -1,8 +1,8 @@ #pragma once #include -#include -#include +#include +#include namespace DB { diff --git a/src/Functions/JSONPath/Generators/VisitorJSONPathStar.h b/src/Functions/JSONPath/Generator/VisitorJSONPathStar.h similarity index 92% rename from src/Functions/JSONPath/Generators/VisitorJSONPathStar.h rename to src/Functions/JSONPath/Generator/VisitorJSONPathStar.h index bc840597f2a..4a54a76c199 100644 --- a/src/Functions/JSONPath/Generators/VisitorJSONPathStar.h +++ b/src/Functions/JSONPath/Generator/VisitorJSONPathStar.h @@ -1,8 +1,8 @@ #pragma once #include -#include -#include +#include +#include namespace DB { diff --git a/src/Functions/JSONPath/Generators/VisitorStatus.h b/src/Functions/JSONPath/Generator/VisitorStatus.h similarity index 100% rename from src/Functions/JSONPath/Generators/VisitorStatus.h rename to src/Functions/JSONPath/Generator/VisitorStatus.h diff --git a/src/Functions/JSONPath/Generators/CMakeLists.txt b/src/Functions/JSONPath/Generators/CMakeLists.txt deleted file mode 100644 index 76a116132fd..00000000000 --- a/src/Functions/JSONPath/Generators/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") -add_headers_and_sources(clickhouse_functions_jsonpath_generators .) -add_library(clickhouse_functions_jsonpath_generators ${clickhouse_functions_jsonpath_generators_sources} ${clickhouse_functions_jsonpath_generators_headers}) -target_link_libraries(clickhouse_functions_jsonpath_generators PRIVATE dbms) - -if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) - target_compile_options(clickhouse_functions_jsonpath_generators PRIVATE "-g0") -endif() diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp index 85b43217867..c7f047eb8fb 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathMemberAccess.cpp @@ -36,11 +36,7 @@ bool ParserJSONPathMemberAccess::parseImpl(Pos & pos, ASTPtr & node, Expected & auto member_access = std::make_shared(); node = member_access; - if (!tryGetIdentifierNameInto(member_name, member_access->member_name)) - { - return false; - } - return true; + return tryGetIdentifierNameInto(member_name, member_access->member_name); } } diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h index cffec125c70..fbe7321562e 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathQuery.h @@ -9,9 +9,6 @@ class ParserJSONPathQuery : public IParserBase { protected: const char * getName() const override { return "ParserJSONPathQuery"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - /// backlog: strict or lax mode + bool parseImpl(Pos & pos, ASTPtr & query, Expected & expected) override; }; } diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index e38058ffc50..8ab94781237 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -19,6 +19,7 @@ null [["world","world2"]] [{"world":"!"}] +[0, 1, 4, 0, -1, -4] --JSON_EXISTS-- 1 diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index a1749b3be24..98378a0090c 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -21,6 +21,7 @@ SELECT JSON_QUERY('$.hello', '{"hello":["world","world2"]}'); SELECT JSON_QUERY('$.hello', '{"hello":{"world":"!"}}'); SELECT JSON_QUERY('$.hello', '{hello:{"world":"!"}}}'); -- invalid json => default value (empty string) SELECT JSON_QUERY('$.hello', ''); +SELECT JSON_QUERY('$.array[*][0 to 2, 4]', '{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}'); SELECT '--JSON_EXISTS--'; SELECT JSON_EXISTS('$', '{"hello":1}'); From c060963875e673d9db54134fab3384b92c9dbca6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 13:31:44 +0300 Subject: [PATCH 728/931] Better --- src/Functions/FunctionsJSON.h | 8 ++------ tests/queries/0_stateless/00918_json_functions.reference | 4 +++- tests/queries/0_stateless/00918_json_functions.sql | 4 +++- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h index 4b087feac9c..a8b1f014383 100644 --- a/src/Functions/FunctionsJSON.h +++ b/src/Functions/FunctionsJSON.h @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -679,11 +678,8 @@ struct JSONExtractTree return false; const auto * type = assert_cast *>(data_type.get()); - std::stringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - ss << std::setprecision(type->getPrecision()) << element.getDouble(); - auto str = ss.str(); - ReadBufferFromString res(str); - assert_cast *>(type->getDefaultSerialization().get())->deserializeText(dest, res, {}); + auto result = convertToDecimal, DataTypeDecimal>(element.getDouble(), type->getScale()); + assert_cast &>(dest).insert(result); return true; } private: diff --git a/tests/queries/0_stateless/00918_json_functions.reference b/tests/queries/0_stateless/00918_json_functions.reference index acf53b1df19..d9b34df8bf8 100644 --- a/tests/queries/0_stateless/00918_json_functions.reference +++ b/tests/queries/0_stateless/00918_json_functions.reference @@ -63,7 +63,9 @@ hello (3333.6,'test') (3333.6333333333,'test') 123456.1234 Decimal(20, 4) -123456789012345.1250 +123456789012345.1136 123456789012345.1136 +1234567890.12345677879616925706 (1234567890.12345677879616925706,'test') +1234567890.123456695758468374595199311875 (1234567890.123456695758468374595199311875,'test') --JSONExtractKeysAndValues-- [('a','hello')] [('b',[-100,200,300])] diff --git a/tests/queries/0_stateless/00918_json_functions.sql b/tests/queries/0_stateless/00918_json_functions.sql index 86fad885e9b..f548b9e5e66 100644 --- a/tests/queries/0_stateless/00918_json_functions.sql +++ b/tests/queries/0_stateless/00918_json_functions.sql @@ -72,7 +72,9 @@ SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'a', 'LowCardinali SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(10,1), b LowCardinality(String))'); SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(20,10), b LowCardinality(String))'); SELECT JSONExtract('{"a":123456.123456}', 'a', 'Decimal(20, 4)') as a, toTypeName(a); -SELECT JSONExtract('{"a":123456789012345.12}', 'a', 'Decimal(30, 4)'); +SELECT toDecimal64(123456789012345.12, 4), JSONExtract('{"a":123456789012345.12}', 'a', 'Decimal(30, 4)'); +SELECT toDecimal128(1234567890.12345678901234567890, 20), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(35,20), b LowCardinality(String))'); +SELECT toDecimal256(1234567890.123456789012345678901234567890, 30), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(45,30), b LowCardinality(String))'); SELECT '--JSONExtractKeysAndValues--'; SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'String'); From ae5cb2c8bfb12e42536f3832cc5c99f72331b17e Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 13:43:22 +0300 Subject: [PATCH 729/931] Fix test (empty string) --- tests/queries/0_stateless/01889_sql_json_functions.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index 8ab94781237..1ae1fccdd56 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -19,8 +19,8 @@ null [["world","world2"]] [{"world":"!"}] -[0, 1, 4, 0, -1, -4] +[0, 1, 4, 0, -1, -4] --JSON_EXISTS-- 1 0 From 28d56460afb8f50e05ea0f1c8cae35d3b17f1a90 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 4 Jul 2021 15:36:45 +0300 Subject: [PATCH 730/931] Update version_date.tsv after release 21.6.6.51 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 3df6b8e6616..e617f436dfa 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v21.6.6.51-stable 2021-07-02 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 v21.6.3.14-stable 2021-06-04 From 9ee9592168400b726c8e161df95554f79a6d6af4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 4 Jul 2021 15:55:23 +0300 Subject: [PATCH 731/931] Update version_date.tsv after release 21.5.8.21 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e617f436dfa..096c575a9f3 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -2,6 +2,7 @@ v21.6.6.51-stable 2021-07-02 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 v21.6.3.14-stable 2021-06-04 +v21.5.8.21-stable 2021-07-02 v21.5.7.9-stable 2021-06-22 v21.5.6.6-stable 2021-05-29 v21.5.5.12-stable 2021-05-20 From 0955d001cdca38473d32330444e2dbf284ab66f4 Mon Sep 17 00:00:00 2001 From: Evgeniia Sudarikova Date: Sun, 4 Jul 2021 16:11:29 +0300 Subject: [PATCH 732/931] added array to types --- .../external-dictionaries/external-dicts-dict-structure.md | 2 +- .../external-dictionaries/external-dicts-dict-structure.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index a7ab23da7cb..bee77a382d7 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -159,7 +159,7 @@ Configuration fields: | Tag | Description | Required | |------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| | `name` | Column name. | Yes | -| `type` | ClickHouse data type: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes | +| `type` | ClickHouse data type: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md), [Array](../../../sql-reference/data-types/array.md).
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes | | `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../../syntax.md#null-literal) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | | `expression` | [Expression](../../../sql-reference/syntax.md#syntax-expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | | `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md).

Default value: `false`. | No | diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 2feb088b4d9..197fde71279 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -159,7 +159,7 @@ CREATE DICTIONARY somename ( | Тег | Описание | Обязательный | |------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | `name` | Имя столбца. | Да | -| `type` | Тип данных ClickHouse: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md).
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`.
[Nullable](../../../sql-reference/data-types/nullable.md) в настоящее время поддерживается для словарей [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache). Для словарей [IPTrie](external-dicts-dict-layout.md#ip-trie) `Nullable`-типы не поддерживаются. | Да | +| `type` | Тип данных ClickHouse: [UInt8](../../../sql-reference/data-types/int-uint.md), [UInt16](../../../sql-reference/data-types/int-uint.md), [UInt32](../../../sql-reference/data-types/int-uint.md), [UInt64](../../../sql-reference/data-types/int-uint.md), [Int8](../../../sql-reference/data-types/int-uint.md), [Int16](../../../sql-reference/data-types/int-uint.md), [Int32](../../../sql-reference/data-types/int-uint.md), [Int64](../../../sql-reference/data-types/int-uint.md), [Float32](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md), [UUID](../../../sql-reference/data-types/uuid.md), [Decimal32](../../../sql-reference/data-types/decimal.md), [Decimal64](../../../sql-reference/data-types/decimal.md), [Decimal128](../../../sql-reference/data-types/decimal.md), [Decimal256](../../../sql-reference/data-types/decimal.md), [String](../../../sql-reference/data-types/string.md), [Array](../../../sql-reference/data-types/array.md).
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`.
[Nullable](../../../sql-reference/data-types/nullable.md) в настоящее время поддерживается для словарей [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache). Для словарей [IPTrie](external-dicts-dict-layout.md#ip-trie) `Nullable`-типы не поддерживаются. | Да | | `null_value` | Значение по умолчанию для несуществующего элемента.
В примере это пустая строка. Значение [NULL](../../syntax.md#null-literal) можно указывать только для типов `Nullable` (см. предыдущую строку с описанием типов). | Да | | `expression` | [Выражение](../../syntax.md#syntax-expressions), которое ClickHouse выполняет со значением.
Выражением может быть имя столбца в удаленной SQL базе. Таким образом, вы можете использовать его для создания псевдонима удаленного столбца.

Значение по умолчанию: нет выражения. | Нет | | `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external-dicts-dict-hierarchical.md).

Значение по умолчанию: `false`. | Нет | From 0dda5b67c40fe6e0ff456afed673dbba8fd7b0ca Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 16:14:27 +0300 Subject: [PATCH 733/931] Fix set SQL_SELECT_LIMIT --- src/Server/MySQLHandler.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index f06ae2cb8f1..beace5dd576 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -66,6 +66,7 @@ static const size_t SSL_REQUEST_PAYLOAD_SIZE = 32; static String selectEmptyReplacementQuery(const String & query); static String showTableStatusReplacementQuery(const String & query); static String killConnectionIdReplacementQuery(const String & query); +static String selectLimitReplacementQuery(const String & query); MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, bool ssl_enabled, size_t connection_id_) @@ -83,6 +84,7 @@ MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & so replacements.emplace("KILL QUERY", killConnectionIdReplacementQuery); replacements.emplace("SHOW TABLE STATUS LIKE", showTableStatusReplacementQuery); replacements.emplace("SHOW VARIABLES", selectEmptyReplacementQuery); + replacements.emplace("SET SQL_SELECT_LIMIT", selectLimitReplacementQuery); } void MySQLHandler::run() @@ -461,6 +463,14 @@ static String showTableStatusReplacementQuery(const String & query) return query; } +static String selectLimitReplacementQuery(const String & query) +{ + const String prefix = "SET SQL_SELECT_LIMIT"; + if (query.starts_with(prefix)) + return "SET limit" + std::string(query.data() + prefix.length()); + return query; +} + /// Replace "KILL QUERY [connection_id]" into "KILL QUERY WHERE query_id = 'mysql:[connection_id]'". static String killConnectionIdReplacementQuery(const String & query) { From e22e12beb672b02e7bdc6ba6088815e05ac894e8 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 4 Jul 2021 16:51:51 +0300 Subject: [PATCH 734/931] Update version_date.tsv after release 21.3.14.1 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 096c575a9f3..541dea23698 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -11,6 +11,7 @@ v21.4.6.55-stable 2021-04-30 v21.4.5.46-stable 2021-04-24 v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 +v21.3.14.1-lts 2021-07-01 v21.3.13.9-lts 2021-06-22 v21.3.12.2-lts 2021-05-25 v21.3.11.5-lts 2021-05-14 From 15a36af4d6f331dc448bc3e7a231584a639ca12b Mon Sep 17 00:00:00 2001 From: zxc111 Date: Sun, 4 Jul 2021 22:26:09 +0800 Subject: [PATCH 735/931] Add empty/notEmpty support UUID #3446 --- src/DataTypes/IDataType.h | 1 + src/Functions/CRC.cpp | 5 +++++ src/Functions/EmptyImpl.h | 6 ++++++ src/Functions/FunctionStringOrArrayToT.h | 15 +++++++++++++-- src/Functions/array/length.cpp | 5 +++++ src/Functions/isValidUTF8.cpp | 5 +++++ src/Functions/lengthUTF8.cpp | 5 +++++ 7 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 8e54036a21c..fb410336dcb 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -357,6 +357,7 @@ inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_t inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); } inline bool isMap(const DataTypePtr & data_type) { return WhichDataType(data_type).isMap(); } inline bool isNothing(const DataTypePtr & data_type) { return WhichDataType(data_type).isNothing(); } +inline bool isUUID(const DataTypePtr & data_type) { return WhichDataType(data_type).isUUID(); } template inline bool isUInt8(const T & data_type) diff --git a/src/Functions/CRC.cpp b/src/Functions/CRC.cpp index 6083e5ef16f..00aa631c85b 100644 --- a/src/Functions/CRC.cpp +++ b/src/Functions/CRC.cpp @@ -110,6 +110,11 @@ struct CRCFunctionWrapper throw Exception("Cannot apply function " + std::string(Impl::name) + " to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + [[noreturn]] static void uuid(const ColumnUUID::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + { + throw Exception("Cannot apply function " + std::string(Impl::name) + " to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + private: static ReturnType doCRC(const ColumnString::Chars & buf, size_t offset, size_t size) { diff --git a/src/Functions/EmptyImpl.h b/src/Functions/EmptyImpl.h index 164441135bd..c3117e0e52d 100644 --- a/src/Functions/EmptyImpl.h +++ b/src/Functions/EmptyImpl.h @@ -54,6 +54,12 @@ struct EmptyImpl prev_offset = offsets[i]; } } + + static void uuid(const ColumnUUID::Container & container, size_t n, PaddedPODArray & res) + { + for (size_t i = 0; i < n; ++i) + res[i] = negative ^ (container.data()->toUnderType() == 0); + } }; } diff --git a/src/Functions/FunctionStringOrArrayToT.h b/src/Functions/FunctionStringOrArrayToT.h index 158179fffe9..69f0741a741 100644 --- a/src/Functions/FunctionStringOrArrayToT.h +++ b/src/Functions/FunctionStringOrArrayToT.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB @@ -43,7 +44,9 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (!isStringOrFixedString(arguments[0]) - && !isArray(arguments[0]) && !isMap(arguments[0])) + && !isArray(arguments[0]) + && !isMap(arguments[0]) + && !isUUID(arguments[0])) throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return std::make_shared>(); @@ -51,7 +54,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; if (const ColumnString * col = checkAndGetColumn(column.get())) @@ -104,6 +107,14 @@ public: Impl::array(col_nested.getOffsets(), vec_res); return col_res; } + else if (const ColumnUUID * col_uuid = checkAndGetColumn(column.get())) + { + auto col_res = ColumnVector::create(); + typename ColumnVector::Container & vec_res = col_res->getData(); + vec_res.resize(col_uuid->size()); + Impl::uuid(col_uuid->getData(), input_rows_count, vec_res); + return col_res; + } else throw Exception("Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN); diff --git a/src/Functions/array/length.cpp b/src/Functions/array/length.cpp index a7fe2fb0662..d11364577a4 100644 --- a/src/Functions/array/length.cpp +++ b/src/Functions/array/length.cpp @@ -35,6 +35,11 @@ struct LengthImpl for (size_t i = 0; i < size; ++i) res[i] = offsets[i] - offsets[i - 1]; } + + [[noreturn]] static void uuid(const ColumnUUID ::Container & , size_t &, PaddedPODArray & ) + { + throw Exception("Cannot apply function length to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } }; diff --git a/src/Functions/isValidUTF8.cpp b/src/Functions/isValidUTF8.cpp index e3158bb709c..abdda53990d 100644 --- a/src/Functions/isValidUTF8.cpp +++ b/src/Functions/isValidUTF8.cpp @@ -317,6 +317,11 @@ SOFTWARE. { throw Exception("Cannot apply function isValidUTF8 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) + { + throw Exception("Cannot apply function isValidUTF8 to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } }; struct NameIsValidUTF8 diff --git a/src/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp index c067fd4db3c..455f66ea95d 100644 --- a/src/Functions/lengthUTF8.cpp +++ b/src/Functions/lengthUTF8.cpp @@ -53,6 +53,11 @@ struct LengthUTF8Impl { throw Exception("Cannot apply function lengthUTF8 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + + [[noreturn]] static void uuid(const ColumnUUID ::Container &, size_t &, PaddedPODArray &) + { + throw Exception("Cannot apply function lengthUTF8 to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } }; struct NameLengthUTF8 From faa174c6c43cd152323ecb76ab243d5d81b1e4bb Mon Sep 17 00:00:00 2001 From: zxc111 Date: Sun, 4 Jul 2021 22:40:09 +0800 Subject: [PATCH 736/931] Add test for function empty support uuid --- ...1936_empty_function_support_uuid.reference | 3 ++ .../01936_empty_function_support_uuid.sql | 28 +++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 tests/queries/0_stateless/01936_empty_function_support_uuid.reference create mode 100644 tests/queries/0_stateless/01936_empty_function_support_uuid.sql diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.reference b/tests/queries/0_stateless/01936_empty_function_support_uuid.reference new file mode 100644 index 00000000000..8bfb10ca779 --- /dev/null +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.reference @@ -0,0 +1,3 @@ +1 +0 +2 1 diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql new file mode 100644 index 00000000000..f969211e014 --- /dev/null +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql @@ -0,0 +1,28 @@ +SELECT empty(toUUID('00000000-0000-0000-0000-000000000000')); +SELECT uniqIf(uuid, empty(uuid)) +FROM +( + SELECT toUUID('00000000-0000-0000-0000-000000000002') AS uuid + UNION ALL + SELECT toUUID('00000000-0000-0000-0000-000000000001') AS uuid +); + +CREATE DATABASE uuid_empty; +CREATE TABLE uuid_empty.users (user_id UUID) ENGINE = Memory; +CREATE TABLE uuid_empty.orders (order_id UUID, user_id UUID) ENGINE = Memory; +INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000001'); +INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000002'); +INSERT INTO uuid_empty.orders VALUES ('00000000-0000-0000-0000-000000000003', '00000000-0000-0000-0000-000000000001'); + +SET joined_subquery_requires_alias = 0; +SELECT + uniq(user_id) AS users, + uniqIf(order_id, notEmpty(order_id)) AS orders +FROM +( + SELECT * FROM uuid_empty.users +) ALL LEFT JOIN ( + SELECT * FROM uuid_empty.orders +) USING (user_id); + +DROP DATABASE uuid_empty; From 91aa1e6c7bb1cdb895b75dcdc0a9e6fb83cf9ba4 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 4 Jul 2021 18:13:20 +0300 Subject: [PATCH 737/931] Update 01936_empty_function_support_uuid.sql --- .../0_stateless/01936_empty_function_support_uuid.sql | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql index f969211e014..753d0b43b4a 100644 --- a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql @@ -14,15 +14,14 @@ INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000001'); INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000002'); INSERT INTO uuid_empty.orders VALUES ('00000000-0000-0000-0000-000000000003', '00000000-0000-0000-0000-000000000001'); -SET joined_subquery_requires_alias = 0; SELECT uniq(user_id) AS users, uniqIf(order_id, notEmpty(order_id)) AS orders FROM ( SELECT * FROM uuid_empty.users -) ALL LEFT JOIN ( +) t1 ALL LEFT JOIN ( SELECT * FROM uuid_empty.orders -) USING (user_id); +) t2 USING (user_id); DROP DATABASE uuid_empty; From a51067e7ff248ebb134bb47d778ad7027dfdb70e Mon Sep 17 00:00:00 2001 From: zxc111 Date: Sun, 4 Jul 2021 23:25:51 +0800 Subject: [PATCH 738/931] update 01936_empty_function_support_uuid --- .../0_stateless/01936_empty_function_support_uuid.reference | 1 + .../0_stateless/01936_empty_function_support_uuid.sql | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.reference b/tests/queries/0_stateless/01936_empty_function_support_uuid.reference index 8bfb10ca779..30373074c1f 100644 --- a/tests/queries/0_stateless/01936_empty_function_support_uuid.reference +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.reference @@ -1,3 +1,4 @@ 1 0 +1 2 2 1 diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql index 753d0b43b4a..e3a409ee06e 100644 --- a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql @@ -1,9 +1,12 @@ SELECT empty(toUUID('00000000-0000-0000-0000-000000000000')); -SELECT uniqIf(uuid, empty(uuid)) +SELECT notEmpty(toUUID('00000000-0000-0000-0000-000000000000')); +SELECT uniqIf(uuid, empty(uuid)), uniqIf(uuid, notEmpty(uuid)) FROM ( SELECT toUUID('00000000-0000-0000-0000-000000000002') AS uuid UNION ALL + SELECT toUUID('00000000-0000-0000-0000-000000000000') AS uuid + UNION ALL SELECT toUUID('00000000-0000-0000-0000-000000000001') AS uuid ); From faac204a1f262154254ab8bcc44c7f2e11993e0f Mon Sep 17 00:00:00 2001 From: zxc111 Date: Sun, 4 Jul 2021 23:55:22 +0800 Subject: [PATCH 739/931] fix style --- src/Functions/array/length.cpp | 7 +++++-- src/Functions/lengthUTF8.cpp | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Functions/array/length.cpp b/src/Functions/array/length.cpp index d11364577a4..768590c6313 100644 --- a/src/Functions/array/length.cpp +++ b/src/Functions/array/length.cpp @@ -5,7 +5,10 @@ namespace DB { - +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} /** Calculates the length of a string in bytes. */ @@ -36,7 +39,7 @@ struct LengthImpl res[i] = offsets[i] - offsets[i - 1]; } - [[noreturn]] static void uuid(const ColumnUUID ::Container & , size_t &, PaddedPODArray & ) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) { throw Exception("Cannot apply function length to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } diff --git a/src/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp index 455f66ea95d..349635160a6 100644 --- a/src/Functions/lengthUTF8.cpp +++ b/src/Functions/lengthUTF8.cpp @@ -54,7 +54,7 @@ struct LengthUTF8Impl throw Exception("Cannot apply function lengthUTF8 to Array argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } - [[noreturn]] static void uuid(const ColumnUUID ::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) { throw Exception("Cannot apply function lengthUTF8 to UUID argument", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } From e73a908dd37ca4beba8de3d2dffd76ca15293a0e Mon Sep 17 00:00:00 2001 From: feng lv Date: Sun, 4 Jul 2021 15:41:33 +0000 Subject: [PATCH 740/931] fix test fix style --- src/TableFunctions/TableFunctionMerge.cpp | 11 +++++++---- tests/queries/skip_list.json | 3 ++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 40d0d1b7921..81dde4a12a4 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -18,7 +18,7 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int UNKNOWN_TABLE; + extern const int BAD_ARGUMENTS; } namespace @@ -26,9 +26,12 @@ namespace [[noreturn]] void throwNoTablesMatchRegexp(const String & source_database_regexp, const String & source_table_regexp) { throw Exception( - "Error while executing table function merge. Neither no one database matches regular expression " + source_database_regexp - + " nor in database matches " + source_database_regexp + " no one table matches regular expression: " + source_table_regexp, - ErrorCodes::UNKNOWN_TABLE); + ErrorCodes::BAD_ARGUMENTS, + "Error while executing table function merge. Either there is no database, which matches regular expression `{}`, or there are " + "no tables in database matches `{}`, which fit tables expression: {}", + source_database_regexp, + source_database_regexp, + source_table_regexp); } } diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index be52bee71b1..a3e4c32c08e 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -854,6 +854,7 @@ "01914_exchange_dictionaries", "01915_create_or_replace_dictionary", "01925_test_storage_merge_aliases", - "01933_client_replxx_convert_history" /// Uses non unique history file + "01933_client_replxx_convert_history", /// Uses non unique history file + "01902_table_function_merge_db_repr" ] } From df1dba5a7d02dce71d90c08975dd1826ed2c0481 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 4 Jul 2021 21:05:33 +0300 Subject: [PATCH 741/931] Fixed Arcadia --- src/Interpreters/JIT/CHJIT.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/JIT/CHJIT.h b/src/Interpreters/JIT/CHJIT.h index 3e53f83b92d..0a553be6bb3 100644 --- a/src/Interpreters/JIT/CHJIT.h +++ b/src/Interpreters/JIT/CHJIT.h @@ -9,9 +9,9 @@ #include #include -#include -#include -#include +#include // Y_IGNORE +#include // Y_IGNORE +#include // Y_IGNORE namespace DB { From 15b43e1e45e21a1dbe5e4ab6862cb3b9919dc489 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 4 Jul 2021 21:09:55 +0300 Subject: [PATCH 742/931] Fix libunwind lock free stack --- contrib/libunwind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind b/contrib/libunwind index cdcc3d8c6f6..6b816d2fba3 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit cdcc3d8c6f6e80a0886082704a0902d61d8d3ffe +Subproject commit 6b816d2fba3991f8fd6aaec17d92f68947eab667 From 7e17290acddc9710a8b4daba5e4e79a0fd055773 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 29 Jun 2021 21:03:39 +0300 Subject: [PATCH 743/931] Fix sharding_key from column w/o function for remote() P.S. that code looks redundant, and I'm not even sure that it was required when it was first added in 325cc47ca54142b7d4018c740286b71ea7c0b278. --- src/TableFunctions/TableFunctionRemote.cpp | 5 ----- .../01932_remote_sharding_key_column.reference | 0 .../01932_remote_sharding_key_column.sql | 15 +++++++++++++++ 3 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/01932_remote_sharding_key_column.reference create mode 100644 tests/queries/0_stateless/01932_remote_sharding_key_column.sql diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index 4d3524c7563..40bfa2cbb6b 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -153,11 +153,6 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr if (arg_num < args.size()) throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - /// ExpressionAnalyzer will be created in InterpreterSelectQuery that will meet these `Identifier` when processing the request. - /// We need to mark them as the name of the database or table, because the default value is column. - for (auto ast : args) - setIdentifierSpecial(ast); - if (!cluster_name.empty()) { /// Use an existing cluster from the main config diff --git a/tests/queries/0_stateless/01932_remote_sharding_key_column.reference b/tests/queries/0_stateless/01932_remote_sharding_key_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01932_remote_sharding_key_column.sql b/tests/queries/0_stateless/01932_remote_sharding_key_column.sql new file mode 100644 index 00000000000..ded2f187821 --- /dev/null +++ b/tests/queries/0_stateless/01932_remote_sharding_key_column.sql @@ -0,0 +1,15 @@ +-- regression test for the following query: +-- +-- select * from remote('127.1', system.one, dummy) +-- +-- that produce the following error before: +-- +-- Unknown column: dummy, there are only columns . +-- +-- NOTE: that wrapping column into any function works before. +select * from remote('127.1', system.one, dummy) format Null; +select * from remote('127.1', system.one, identity(dummy)) format Null; +select * from remote('127.1', view(select * from system.one), identity(dummy)) format Null; +select * from remote('127.{1,2}', view(select * from system.one), identity(dummy)) format Null; +select * from remote('127.1', view(select * from system.one), dummy) format Null; +select * from remote('127.{1,2}', view(select * from system.one), dummy) format Null; From 7bcb57afe1f8f011d3a2e1bf6e3b6526ae958ff4 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:10:01 +0300 Subject: [PATCH 744/931] Fix failing special builds (probably) --- src/Functions/JSONPath/Parsers/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Functions/JSONPath/Parsers/CMakeLists.txt b/src/Functions/JSONPath/Parsers/CMakeLists.txt index ecabe5cc13b..e9cdbbfea32 100644 --- a/src/Functions/JSONPath/Parsers/CMakeLists.txt +++ b/src/Functions/JSONPath/Parsers/CMakeLists.txt @@ -2,6 +2,7 @@ include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(clickhouse_functions_jsonpath_parsers .) add_library(clickhouse_functions_jsonpath_parsers ${clickhouse_functions_jsonpath_parsers_sources} ${clickhouse_functions_jsonpath_parsers_headers}) target_link_libraries(clickhouse_functions_jsonpath_parsers PRIVATE dbms) +target_link_libraries(clickhouse_functions_jsonpath_parsers PRIVATE clickhouse_parsers) if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) target_compile_options(clickhouse_functions_jsonpath_parsers PRIVATE "-g0") From 3f5ef4afe0594bf97fd9e995ebf01d229508c556 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 4 Jul 2021 22:14:22 +0300 Subject: [PATCH 745/931] Update 01936_empty_function_support_uuid.sql --- .../01936_empty_function_support_uuid.sql | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql index e3a409ee06e..c67f38b776a 100644 --- a/tests/queries/0_stateless/01936_empty_function_support_uuid.sql +++ b/tests/queries/0_stateless/01936_empty_function_support_uuid.sql @@ -10,21 +10,26 @@ FROM SELECT toUUID('00000000-0000-0000-0000-000000000001') AS uuid ); -CREATE DATABASE uuid_empty; -CREATE TABLE uuid_empty.users (user_id UUID) ENGINE = Memory; -CREATE TABLE uuid_empty.orders (order_id UUID, user_id UUID) ENGINE = Memory; -INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000001'); -INSERT INTO uuid_empty.users VALUES ('00000000-0000-0000-0000-000000000002'); -INSERT INTO uuid_empty.orders VALUES ('00000000-0000-0000-0000-000000000003', '00000000-0000-0000-0000-000000000001'); +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS orders; + +CREATE TABLE users (user_id UUID) ENGINE = Memory; +CREATE TABLE orders (order_id UUID, user_id UUID) ENGINE = Memory; + +INSERT INTO users VALUES ('00000000-0000-0000-0000-000000000001'); +INSERT INTO users VALUES ('00000000-0000-0000-0000-000000000002'); +INSERT INTO orders VALUES ('00000000-0000-0000-0000-000000000003', '00000000-0000-0000-0000-000000000001'); SELECT uniq(user_id) AS users, uniqIf(order_id, notEmpty(order_id)) AS orders FROM ( - SELECT * FROM uuid_empty.users + SELECT * FROM users ) t1 ALL LEFT JOIN ( - SELECT * FROM uuid_empty.orders + SELECT * FROM orders ) t2 USING (user_id); -DROP DATABASE uuid_empty; +DROP TABLE users; +DROP TABLE orders; + From 618a77fafaa52912c37d652bbe3ade7f1d6ac306 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 1 Jul 2021 17:41:59 +0300 Subject: [PATCH 746/931] Improve logging in integration tests. --- tests/integration/ci-runner.py | 103 ++++++++++++------ tests/integration/helpers/cluster.py | 37 +++++-- .../pytest_xdist_logging_to_separate_files.py | 28 +++++ tests/integration/pytest.ini | 12 +- tests/integration/runner | 6 +- 5 files changed, 134 insertions(+), 52 deletions(-) create mode 100644 tests/integration/helpers/pytest_xdist_logging_to_separate_files.py diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py index 0af76fe2648..97d076f698e 100755 --- a/tests/integration/ci-runner.py +++ b/tests/integration/ci-runner.py @@ -3,6 +3,7 @@ import logging import subprocess import os +import glob import time import shutil from collections import defaultdict @@ -17,7 +18,6 @@ SLEEP_BETWEEN_RETRIES = 5 PARALLEL_GROUP_SIZE = 100 CLICKHOUSE_BINARY_PATH = "/usr/bin/clickhouse" CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH = "/usr/bin/clickhouse-odbc-bridge" -DOCKERD_LOGS_PATH = "/ClickHouse/tests/integration/dockerd.log" CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH = "/usr/bin/clickhouse-library-bridge" TRIES_COUNT = 10 @@ -256,8 +256,8 @@ class ClickhouseIntegrationTestsRunner: shutil.copy(CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH, result_path_library_bridge) return None, None - def _compress_logs(self, path, result_path): - subprocess.check_call("tar czf {} -C {} .".format(result_path, path), shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + def _compress_logs(self, dir, relpaths, result_path): + subprocess.check_call("tar czf {} -C {} {}".format(result_path, dir, ' '.join(relpaths)), shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL def _get_all_tests(self, repo_path): image_cmd = self._get_runner_image_cmd(repo_path) @@ -336,6 +336,27 @@ class ClickhouseIntegrationTestsRunner: logging.info("Cannot run with custom docker image version :(") return image_cmd + def _find_test_data_dirs(self, repo_path, test_names): + relpaths = {} + for test_name in test_names: + if '/' in test_name: + test_dir = test_name[:test_name.find('/')] + else: + test_dir = test_name + if os.path.isdir(os.path.join(repo_path, "tests/integration", test_dir)): + for name in os.listdir(os.path.join(repo_path, "tests/integration", test_dir)): + relpath = os.path.join(os.path.join(test_dir, name)) + mtime = os.path.getmtime(os.path.join(repo_path, "tests/integration", relpath)) + relpaths[relpath] = mtime + return relpaths + + def _get_test_data_dirs_difference(self, new_snapshot, old_snapshot): + res = set() + for path in new_snapshot: + if (not path in old_snapshot) or (old_snapshot[path] != new_snapshot[path]): + res.add(path) + return res + def run_test_group(self, repo_path, test_group, tests_in_group, num_tries, num_workers): counters = { "ERROR": [], @@ -355,18 +376,14 @@ class ClickhouseIntegrationTestsRunner: image_cmd = self._get_runner_image_cmd(repo_path) test_group_str = test_group.replace('/', '_').replace('.', '_') + log_paths = [] + test_data_dirs = {} for i in range(num_tries): logging.info("Running test group %s for the %s retry", test_group, i) clear_ip_tables_and_restart_daemons() - output_path = os.path.join(str(self.path()), "test_output_" + test_group_str + "_" + str(i) + ".log") - log_name = "integration_run_" + test_group_str + "_" + str(i) + ".txt" - log_path = os.path.join(str(self.path()), log_name) - log_paths.append(log_path) - logging.info("Will wait output inside %s", output_path) - test_names = set([]) for test_name in tests_in_group: if test_name not in counters["PASSED"]: @@ -375,11 +392,19 @@ class ClickhouseIntegrationTestsRunner: else: test_names.add(test_name) + if i == 0: + test_data_dirs = self._find_test_data_dirs(repo_path, test_names) + + info_basename = test_group_str + "_" + str(i) + ".nfo" + info_path = os.path.join(repo_path, "tests/integration", info_basename) + test_cmd = ' '.join([test for test in sorted(test_names)]) parallel_cmd = " --parallel {} ".format(num_workers) if num_workers > 0 else "" - cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format( - repo_path, image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), output_path) + cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format( + repo_path, image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), info_path) + log_basename = test_group_str + "_" + str(i) + ".log" + log_path = os.path.join(repo_path, "tests/integration", log_basename) with open(log_path, 'w') as log: logging.info("Executing cmd: %s", cmd) retcode = subprocess.Popen(cmd, shell=True, stderr=log, stdout=log).wait() @@ -388,15 +413,41 @@ class ClickhouseIntegrationTestsRunner: else: logging.info("Some tests failed") - if os.path.exists(output_path): - lines = parse_test_results_output(output_path) + extra_logs_names = [log_basename] + log_result_path = os.path.join(str(self.path()), 'integration_run_' + log_basename) + shutil.copy(log_path, log_result_path) + log_paths.append(log_result_path) + + for pytest_log_path in glob.glob(os.path.join(repo_path, "tests/integration/pytest*.log")): + new_name = test_group_str + "_" + str(i) + "_" + os.path.basename(pytest_log_path) + os.rename(pytest_log_path, os.path.join(repo_path, "tests/integration", new_name)) + extra_logs_names.append(new_name) + + dockerd_log_path = os.path.join(repo_path, "tests/integration/dockerd.log") + if os.path.exists(dockerd_log_path): + new_name = test_group_str + "_" + str(i) + "_" + os.path.basename(dockerd_log_path) + os.rename(dockerd_log_path, os.path.join(repo_path, "tests/integration", new_name)) + extra_logs_names.append(new_name) + + if os.path.exists(info_path): + extra_logs_names.append(info_basename) + lines = parse_test_results_output(info_path) new_counters = get_counters(lines) - times_lines = parse_test_times(output_path) + times_lines = parse_test_times(info_path) new_tests_times = get_test_times(times_lines) self._update_counters(counters, new_counters) for test_name, test_time in new_tests_times.items(): tests_times[test_name] = test_time - os.remove(output_path) + + test_data_dirs_new = self._find_test_data_dirs(repo_path, test_names) + test_data_dirs_diff = self._get_test_data_dirs_difference(test_data_dirs_new, test_data_dirs) + test_data_dirs = test_data_dirs_new + + if extra_logs_names or test_data_dirs_diff: + extras_result_path = os.path.join(str(self.path()), "integration_run_" + test_group_str + "_" + str(i) + ".tar.gz") + self._compress_logs(os.path.join(repo_path, "tests/integration"), extra_logs_names + list(test_data_dirs_diff), extras_result_path) + log_paths.append(extras_result_path) + if len(counters["PASSED"]) + len(counters["FLAKY"]) == len(tests_in_group): logging.info("All tests from group %s passed", test_group) break @@ -459,15 +510,6 @@ class ClickhouseIntegrationTestsRunner: break time.sleep(5) - logging.info("Finally all tests done, going to compress test dir") - test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz") - self._compress_logs("{}/tests/integration".format(repo_path), test_logs) - logging.info("Compression finished") - - result_path_dockerd_logs = os.path.join(str(self.path()), "dockerd.log") - if os.path.exists(result_path_dockerd_logs): - shutil.copy(DOCKERD_LOGS_PATH, result_path_dockerd_logs) - test_result = [] for state in ("ERROR", "FAILED", "PASSED", "SKIPPED", "FLAKY"): if state == "PASSED": @@ -479,7 +521,7 @@ class ClickhouseIntegrationTestsRunner: test_result += [(c + ' (✕' + str(final_retry) + ')', text_state, "{:.2f}".format(tests_times[c])) for c in counters[state]] status_text = description_prefix + ', '.join([str(n).lower().replace('failed', 'fail') + ': ' + str(len(c)) for n, c in counters.items()]) - return result_state, status_text, test_result, [test_logs] + logs + return result_state, status_text, test_result, logs def run_impl(self, repo_path, build_path): if self.flaky_check: @@ -539,15 +581,6 @@ class ClickhouseIntegrationTestsRunner: logging.info("Collected more than 20 failed/error tests, stopping") break - logging.info("Finally all tests done, going to compress test dir") - test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz") - self._compress_logs("{}/tests/integration".format(repo_path), test_logs) - logging.info("Compression finished") - - result_path_dockerd_logs = os.path.join(str(self.path()), "dockerd.log") - if os.path.exists(result_path_dockerd_logs): - shutil.copy(DOCKERD_LOGS_PATH, result_path_dockerd_logs) - if counters["FAILED"] or counters["ERROR"]: logging.info("Overall status failure, because we have tests in FAILED or ERROR state") result_state = "failure" @@ -580,7 +613,7 @@ class ClickhouseIntegrationTestsRunner: if '(memory)' in self.params['context_name']: result_state = "success" - return result_state, status_text, test_result, [test_logs] + return result_state, status_text, test_result, [] def write_results(results_file, status_file, results, status): with open(results_file, 'w') as f: diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 8863492dd12..1db9e07a69e 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -30,6 +30,7 @@ from kazoo.client import KazooClient from kazoo.exceptions import KazooException from minio import Minio from helpers.test_tools import assert_eq_with_retry +from helpers import pytest_xdist_logging_to_separate_files import docker @@ -56,22 +57,22 @@ def run_and_check(args, env=None, shell=False, stdout=subprocess.PIPE, stderr=su subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=env, shell=shell) return + logging.debug(f"Command:{args}") res = subprocess.run(args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout) out = res.stdout.decode('utf-8') err = res.stderr.decode('utf-8') - if res.returncode != 0: - # check_call(...) from subprocess does not print stderr, so we do it manually - logging.debug(f"Command:{args}") - logging.debug(f"Stderr:{err}") + # check_call(...) from subprocess does not print stderr, so we do it manually + if out: logging.debug(f"Stdout:{out}") - logging.debug(f"Env: {env}") + if err: + logging.debug(f"Stderr:{err}") + if res.returncode != 0: + logging.debug(f"Exitcode:{res.returncode}") + if env: + logging.debug(f"Env:{env}") if not nothrow: raise Exception(f"Command {args} return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}") - else: - logging.debug(f"Command:{args}") - logging.debug(f"Stderr: {err}") - logging.debug(f"Stdout: {out}") - return out + return out # Based on https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309 def get_free_port(): @@ -192,6 +193,7 @@ class ClickHouseCluster: zookeeper_keyfile=None, zookeeper_certfile=None): for param in list(os.environ.keys()): logging.debug("ENV %40s %s" % (param, os.environ[param])) + self.base_path = base_path self.base_dir = p.dirname(base_path) self.name = name if name is not None else '' @@ -1290,6 +1292,9 @@ class ClickHouseCluster: raise Exception("Can't wait Cassandra to start") def start(self, destroy_dirs=True): + pytest_xdist_logging_to_separate_files.setup() + logging.info("Running tests in {}".format(self.base_path)) + logging.debug("Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs)) if self.is_up: return @@ -1771,12 +1776,14 @@ class ClickHouseInstance: # Connects to the instance via clickhouse-client, sends a query (1st argument) and returns the answer def query(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, database=None, ignore_error=False): + logging.debug(f"Executing query {sql} on {self.name}") return self.client.query(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, password=password, database=database, ignore_error=ignore_error) def query_with_retry(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, database=None, ignore_error=False, retry_count=20, sleep_time=0.5, check_callback=lambda x: True): + logging.debug(f"Executing query {sql} on {self.name}") result = None for i in range(retry_count): try: @@ -1794,23 +1801,27 @@ class ClickHouseInstance: raise Exception("Can't execute query {}".format(sql)) # As query() but doesn't wait response and returns response handler - def get_query_request(self, *args, **kwargs): - return self.client.get_query_request(*args, **kwargs) + def get_query_request(self, sql, *args, **kwargs): + logging.debug(f"Executing query {sql} on {self.name}") + return self.client.get_query_request(sql, *args, **kwargs) # Connects to the instance via clickhouse-client, sends a query (1st argument), expects an error and return its code def query_and_get_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, database=None): + logging.debug(f"Executing query {sql} on {self.name}") return self.client.query_and_get_error(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, password=password, database=database) # The same as query_and_get_error but ignores successful query. def query_and_get_answer_with_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, database=None): + logging.debug(f"Executing query {sql} on {self.name}") return self.client.query_and_get_answer_with_error(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, password=password, database=database) # Connects to the instance via HTTP interface, sends a query and returns the answer def http_query(self, sql, data=None, params=None, user=None, password=None, expect_fail_and_get_error=False): + logging.debug(f"Executing query {sql} on {self.name} via HTTP interface") if params is None: params = {} else: @@ -1845,11 +1856,13 @@ class ClickHouseInstance: # Connects to the instance via HTTP interface, sends a query and returns the answer def http_request(self, url, method='GET', params=None, data=None, headers=None): + logging.debug(f"Sending HTTP request {url} to {self.name}") url = "http://" + self.ip_address + ":8123/" + url return requests.request(method=method, url=url, params=params, data=data, headers=headers) # Connects to the instance via HTTP interface, sends a query, expects an error and return the error message def http_query_and_get_error(self, sql, data=None, params=None, user=None, password=None): + logging.debug(f"Executing query {sql} on {self.name} via HTTP interface") return self.http_query(sql=sql, data=data, params=params, user=user, password=password, expect_fail_and_get_error=True) diff --git a/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py b/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py new file mode 100644 index 00000000000..ee9a52e042c --- /dev/null +++ b/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py @@ -0,0 +1,28 @@ +import logging +import os.path + +# Makes the parallel workers of pytest-xdist to log to separate files. +# Without this function all workers will log to the same log file +# and mix everything together making it much more difficult for troubleshooting. +def setup(): + worker_name = os.environ.get('PYTEST_XDIST_WORKER', 'master') + if worker_name == 'master': + return + logger = logging.getLogger('') + new_handlers = [] + handlers_to_remove = [] + for handler in logger.handlers: + if isinstance(handler, logging.FileHandler): + filename, ext = os.path.splitext(handler.baseFilename) + if not filename.endswith('-' + worker_name): + new_filename = filename + '-' + worker_name + new_handler = logging.FileHandler(new_filename + ext) + new_handler.setFormatter(handler.formatter) + new_handler.setLevel(handler.level) + new_handlers.append(new_handler) + handlers_to_remove.append(handler) + for new_handler in new_handlers: + logger.addHandler(new_handler) + for handler in handlers_to_remove: + handler.flush() + logger.removeHandler(handler) diff --git a/tests/integration/pytest.ini b/tests/integration/pytest.ini index 6d451adf7eb..9a6ddd1be4b 100644 --- a/tests/integration/pytest.ini +++ b/tests/integration/pytest.ini @@ -4,10 +4,14 @@ norecursedirs = _instances* timeout = 1800 junit_duration_report = call junit_suite_name = integration -log_cli = 1 +log_level = DEBUG +log_format = %(asctime)s %(levelname)s : %(message)s (%(filename)s:%(lineno)s, %(funcName)s) +log_date_format=%Y-%m-%d %H:%M:%S +log_cli = true log_cli_level = CRITICAL -log_cli_format = %%(asctime)s [%(levelname)8s] %(funcName)s %(message)s (%(filename)s:%(lineno)s) +log_cli_format = %(asctime)s %(levelname)s : %(message)s (%(filename)s:%(lineno)s, %(funcName)s) +log_cli_date_format=%Y-%m-%d %H:%M:%S log_file = pytest.log log_file_level = DEBUG -log_file_format = %(asctime)s [%(levelname)8s] %(funcName)s %(message)s (%(filename)s:%(lineno)s) -log_file_date_format=%Y-%m-%d %H:%M:%S +log_file_format = %(asctime)s %(levelname)s : %(message)s (%(filename)s:%(lineno)s, %(funcName)s) +log_file_date_format = %Y-%m-%d %H:%M:%S diff --git a/tests/integration/runner b/tests/integration/runner index 160c4a23652..1bef8f60db9 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -3,6 +3,7 @@ import subprocess import os import getpass +import glob import argparse import logging import signal @@ -99,7 +100,7 @@ signal.signal(signal.SIGINT, docker_kill_handler_handler) # 2) path of runner script is used to determine paths for trivial case, when we run it from repository if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') + logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s : %(message)s (%(filename)s:%(lineno)s, %(funcName)s)') parser = argparse.ArgumentParser(description="ClickHouse integration tests runner") parser.add_argument( @@ -257,6 +258,9 @@ if __name__ == "__main__": if sys.stdout.isatty() and sys.stdin.isatty(): tty = "-it" + # Remove old logs. + for old_log_path in glob.glob(args.cases_dir + "/pytest*.log"): + os.remove(old_log_path) cmd = "docker run {net} {tty} --rm --name {name} --privileged \ --volume={odbc_bridge_bin}:/clickhouse-odbc-bridge --volume={bin}:/clickhouse \ From a48f500956b769e3b6fd70f7d658e6484d927d94 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:28:05 +0300 Subject: [PATCH 747/931] Add tests with multiple rows --- .../0_stateless/01889_sql_json_functions.reference | 4 ++++ .../queries/0_stateless/01889_sql_json_functions.sql | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index 1ae1fccdd56..fae7418b818 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -37,3 +37,7 @@ null 1 0 1 +--MANY ROWS-- +["Vasily", "Kostya"] +["Katya", "Anatoliy"] +["Tihon", "Ernest"] diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 98378a0090c..9ee4ee1b95c 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -38,4 +38,13 @@ SELECT JSON_EXISTS('$.hello[0]', '{"hello":["world"]}'); SELECT JSON_EXISTS('$.hello[1]', '{"hello":["world"]}'); SELECT JSON_EXISTS('$.a[*].b', '{"a":[{"b":1},{"c":2}]}'); SELECT JSON_EXISTS('$.a[*].f', '{"a":[{"b":1},{"c":2}]}'); -SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}'); \ No newline at end of file +SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}'); + +SELECT '--MANY ROWS--' +DROP TABLE IF EXISTS 01889_sql_json; +CREATE TABLE 01889_sql_json (json String) ENGINE = MergeTree ORDER BY json; +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}') +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}') +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}') +SELECT JSON_QUERY('$.friends[0 to 2]', json) +DROP TABLE 01889_sql_json From 9f05b387e541f03301fb240728a75064bbbf8747 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:33:13 +0300 Subject: [PATCH 748/931] Add colons --- .../queries/0_stateless/01889_sql_json_functions.sql | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 9ee4ee1b95c..9b978c2223d 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -40,11 +40,11 @@ SELECT JSON_EXISTS('$.a[*].b', '{"a":[{"b":1},{"c":2}]}'); SELECT JSON_EXISTS('$.a[*].f', '{"a":[{"b":1},{"c":2}]}'); SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}'); -SELECT '--MANY ROWS--' +SELECT '--MANY ROWS--'; DROP TABLE IF EXISTS 01889_sql_json; CREATE TABLE 01889_sql_json (json String) ENGINE = MergeTree ORDER BY json; -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}') -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}') -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}') -SELECT JSON_QUERY('$.friends[0 to 2]', json) -DROP TABLE 01889_sql_json +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +SELECT JSON_QUERY('$.friends[0 to 2]', json); +DROP TABLE 01889_sql_json; From 3f8a22c35d23e4c1f7b7b1eea6fc95b8d797a16f Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:34:34 +0300 Subject: [PATCH 749/931] Fix syntax --- tests/queries/0_stateless/01889_sql_json_functions.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 9b978c2223d..0589a330280 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -46,5 +46,5 @@ CREATE TABLE 01889_sql_json (json String) ENGINE = MergeTree ORDER BY json; INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); -SELECT JSON_QUERY('$.friends[0 to 2]', json); +SELECT JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json; DROP TABLE 01889_sql_json; From 4ef27cfc2d20c8ab32f6024e6e536bb51fd6cca9 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:36:35 +0300 Subject: [PATCH 750/931] Fix ans order --- tests/queries/0_stateless/01889_sql_json_functions.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index fae7418b818..d361dea6b18 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- -["Vasily", "Kostya"] ["Katya", "Anatoliy"] +["Vasily", "Kostya"] ["Tihon", "Ernest"] From 3d1e2fe55078b807f214cbea06e982dcc86a3d22 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:37:44 +0300 Subject: [PATCH 751/931] Fix order again --- tests/queries/0_stateless/01889_sql_json_functions.reference | 2 +- tests/queries/0_stateless/01889_sql_json_functions.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index d361dea6b18..bf70961b1f8 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- -["Katya", "Anatoliy"] ["Vasily", "Kostya"] ["Tihon", "Ernest"] +["Katya", "Anatoliy"] diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 0589a330280..fe37a3dc53e 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -43,8 +43,8 @@ SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}' SELECT '--MANY ROWS--'; DROP TABLE IF EXISTS 01889_sql_json; CREATE TABLE 01889_sql_json (json String) ENGINE = MergeTree ORDER BY json; -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); SELECT JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json; DROP TABLE 01889_sql_json; From a0b6281790e5e72b3ed839aa1b0e25da0a22c3a7 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:38:43 +0300 Subject: [PATCH 752/931] Final fix order --- tests/queries/0_stateless/01889_sql_json_functions.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index bf70961b1f8..8fb2f9430e0 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- -["Vasily", "Kostya"] ["Tihon", "Ernest"] ["Katya", "Anatoliy"] +["Vasily", "Kostya"] From 7965c638d2ba2d2db783a88ef63f6bb9975f825a Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:44:18 +0300 Subject: [PATCH 753/931] Another try --- .../0_stateless/01889_sql_json_functions.reference | 2 +- tests/queries/0_stateless/01889_sql_json_functions.sql | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index 8fb2f9430e0..bf70961b1f8 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- +["Vasily", "Kostya"] ["Tihon", "Ernest"] ["Katya", "Anatoliy"] -["Vasily", "Kostya"] diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index fe37a3dc53e..79af60a15a4 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -42,9 +42,9 @@ SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}' SELECT '--MANY ROWS--'; DROP TABLE IF EXISTS 01889_sql_json; -CREATE TABLE 01889_sql_json (json String) ENGINE = MergeTree ORDER BY json; -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); -INSERT INTO 01889_sql_json(json) VALUES('{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); +CREATE TABLE 01889_sql_json (id UInt8, json String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 01889_sql_json(json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); +INSERT INTO 01889_sql_json(json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +INSERT INTO 01889_sql_json(json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); SELECT JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json; DROP TABLE 01889_sql_json; From fd07dbe1f7379f7e7511576d8c339005b551e523 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:46:45 +0300 Subject: [PATCH 754/931] Please just work --- tests/queries/0_stateless/01889_sql_json_functions.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 79af60a15a4..77cb32352cf 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -43,8 +43,8 @@ SELECT JSON_EXISTS('$.a[*][0].h', '{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}' SELECT '--MANY ROWS--'; DROP TABLE IF EXISTS 01889_sql_json; CREATE TABLE 01889_sql_json (id UInt8, json String) ENGINE = MergeTree ORDER BY id; -INSERT INTO 01889_sql_json(json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); -INSERT INTO 01889_sql_json(json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); -INSERT INTO 01889_sql_json(json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); SELECT JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json; DROP TABLE 01889_sql_json; From 32b7d7b75037547a49da30a68e2fda4cd677769c Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 4 Jul 2021 14:56:31 +0000 Subject: [PATCH 755/931] More correct startup on create query --- src/Core/PostgreSQL/Connection.cpp | 1 + src/Databases/DatabaseFactory.cpp | 2 +- .../DatabaseMaterializedPostgreSQL.cpp | 3 + .../DatabaseMaterializedPostgreSQL.h | 2 + .../PostgreSQLReplicationHandler.cpp | 107 +++++++++++------- .../PostgreSQL/PostgreSQLReplicationHandler.h | 14 ++- .../StorageMaterializedPostgreSQL.cpp | 1 + tests/integration/helpers/cluster.py | 2 +- 8 files changed, 80 insertions(+), 52 deletions(-) diff --git a/src/Core/PostgreSQL/Connection.cpp b/src/Core/PostgreSQL/Connection.cpp index ad54bbe9dca..c423d75981e 100644 --- a/src/Core/PostgreSQL/Connection.cpp +++ b/src/Core/PostgreSQL/Connection.cpp @@ -23,6 +23,7 @@ void Connection::execWithRetry(const std::function { pqxx::nontransaction tx(getRef()); exec(tx); + break; } catch (const pqxx::broken_connection & e) { diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 802d50d11c2..48b923c4756 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -293,7 +293,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String postgresql_replica_settings->loadFromQuery(*engine_define); return std::make_shared( - context, metadata_path, uuid, engine_define, + context, metadata_path, uuid, engine_define, create.attach, database_name, postgres_database_name, connection_info, std::move(postgresql_replica_settings)); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 256affc68c8..742eb28c7a4 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -37,12 +37,14 @@ DatabaseMaterializedPostgreSQL::DatabaseMaterializedPostgreSQL( const String & metadata_path_, UUID uuid_, const ASTStorage * database_engine_define_, + bool is_attach_, const String & database_name_, const String & postgres_database_name, const postgres::ConnectionInfo & connection_info_, std::unique_ptr settings_) : DatabaseAtomic(database_name_, metadata_path_, uuid_, "DatabaseMaterializedPostgreSQL (" + database_name_ + ")", context_) , database_engine_define(database_engine_define_->clone()) + , is_attach(is_attach_) , remote_database_name(postgres_database_name) , connection_info(connection_info_) , settings(std::move(settings_)) @@ -58,6 +60,7 @@ void DatabaseMaterializedPostgreSQL::startSynchronization() database_name, connection_info, getContext(), + is_attach, settings->materialized_postgresql_max_block_size.value, settings->materialized_postgresql_allow_automatic_update, /* is_materialized_postgresql_database = */ true, diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h index f998a0c54de..7ca84f079ed 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h @@ -33,6 +33,7 @@ public: const String & metadata_path_, UUID uuid_, const ASTStorage * database_engine_define_, + bool is_attach_, const String & database_name_, const String & postgres_database_name, const postgres::ConnectionInfo & connection_info, @@ -63,6 +64,7 @@ private: void startSynchronization(); ASTPtr database_engine_define; + bool is_attach; String remote_database_name; postgres::ConnectionInfo connection_info; std::unique_ptr settings; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 67026d345eb..4c614d8fd5a 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -29,12 +29,14 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, ContextPtr context_, + bool is_attach_, const size_t max_block_size_, bool allow_automatic_update_, bool is_materialized_postgresql_database_, const String tables_list_) : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , context(context_) + , is_attach(is_attach_) , remote_database_name(remote_database_name_) , current_database_name(current_database_name_) , connection_info(connection_info_) @@ -145,10 +147,8 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { initial_sync(); } - /// Replication slot depends on publication, so if replication slot exists and new - /// publication was just created - drop that replication slot and start from scratch. - /// TODO: tests - else if (new_publication_created) + /// Always drop replication slot if it is CREATE query and not ATTACH. + else if (!is_attach || new_publication) { dropReplicationSlot(tx); initial_sync(); @@ -285,22 +285,25 @@ bool PostgreSQLReplicationHandler::isPublicationExist(pqxx::work & tx) std::string query_str = fmt::format("SELECT exists (SELECT 1 FROM pg_publication WHERE pubname = '{}')", publication_name); pqxx::result result{tx.exec(query_str)}; assert(!result.empty()); - bool publication_exists = (result[0][0].as() == "t"); - - if (publication_exists) - LOG_INFO(log, "Publication {} already exists. Using existing version", publication_name); - - return publication_exists; + return result[0][0].as() == "t"; } -void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bool create_without_check) +void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx) { - /// For database engine a publication can be created earlier than in startReplication(). - if (new_publication_created) - return; + auto publication_exists = isPublicationExist(tx); - if (create_without_check || !isPublicationExist(tx)) + if (!is_attach && publication_exists) + { + /// This is a case for single Materialized storage. In case of database engine this check is done in advance. + LOG_WARNING(log, + "Publication {} already exists, but it is a CREATE query, not ATTACH. Publication will be dropped", + publication_name); + + connection->execWithRetry([&](pqxx::nontransaction & tx_){ dropPublication(tx_); }); + } + + if (!is_attach || !publication_exists) { if (tables_list.empty()) { @@ -320,8 +323,8 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo try { tx.exec(query_str); - new_publication_created = true; LOG_TRACE(log, "Created publication {} with tables list: {}", publication_name, tables_list); + new_publication = true; } catch (Exception & e) { @@ -329,6 +332,10 @@ void PostgreSQLReplicationHandler::createPublicationIfNeeded(pqxx::work & tx, bo throw; } } + else + { + LOG_TRACE(log, "Using existing publication ({}) version", publication_name); + } } @@ -401,6 +408,7 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) { std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); tx.exec(query_str); + LOG_TRACE(log, "Dropped publication: {}", publication_name); } @@ -438,9 +446,11 @@ void PostgreSQLReplicationHandler::shutdownFinal() NameSet PostgreSQLReplicationHandler::fetchRequiredTables(postgres::Connection & connection_) { pqxx::work tx(connection_.getRef()); - bool publication_exists_before_startup = isPublicationExist(tx); NameSet result_tables; + bool publication_exists_before_startup = isPublicationExist(tx); + LOG_DEBUG(log, "Publication exists: {}, is attach: {}", publication_exists_before_startup, is_attach); + Strings expected_tables; if (!tables_list.empty()) { @@ -453,49 +463,58 @@ NameSet PostgreSQLReplicationHandler::fetchRequiredTables(postgres::Connection & if (publication_exists_before_startup) { - if (tables_list.empty()) + if (!is_attach) { - /// There is no tables list, but publication already exists, then the expected behaviour - /// is to replicate the whole database. But it could be a server restart, so we can't drop it. LOG_WARNING(log, - "Publication {} already exists and tables list is empty. Assuming publication is correct", + "Publication {} already exists, but it is a CREATE query, not ATTACH. Publication will be dropped", publication_name); - result_tables = fetchPostgreSQLTablesList(tx); + connection->execWithRetry([&](pqxx::nontransaction & tx_){ dropPublication(tx_); }); } - /// Check tables list from publication is the same as expected tables list. - /// If not - drop publication and return expected tables list. else { - result_tables = fetchTablesFromPublication(tx); - NameSet diff; - std::set_symmetric_difference(expected_tables.begin(), expected_tables.end(), - result_tables.begin(), result_tables.end(), - std::inserter(diff, diff.begin())); - if (!diff.empty()) + if (tables_list.empty()) { - String diff_tables; - for (const auto & table_name : diff) - { - if (!diff_tables.empty()) - diff_tables += ", "; - diff_tables += table_name; - } - LOG_WARNING(log, - "Publication {} already exists, but specified tables list differs from publication tables list in tables: {}", - publication_name, diff_tables); + "Publication {} already exists and tables list is empty. Assuming publication is correct.", + publication_name); - connection->execWithRetry([&](pqxx::nontransaction & tx_){ dropPublication(tx_); }); + result_tables = fetchPostgreSQLTablesList(tx); + } + /// Check tables list from publication is the same as expected tables list. + /// If not - drop publication and return expected tables list. + else + { + result_tables = fetchTablesFromPublication(tx); + NameSet diff; + std::set_symmetric_difference(expected_tables.begin(), expected_tables.end(), + result_tables.begin(), result_tables.end(), + std::inserter(diff, diff.begin())); + if (!diff.empty()) + { + String diff_tables; + for (const auto & table_name : diff) + { + if (!diff_tables.empty()) + diff_tables += ", "; + diff_tables += table_name; + } + + LOG_WARNING(log, + "Publication {} already exists, but specified tables list differs from publication tables list in tables: {}.", + publication_name, diff_tables); + + connection->execWithRetry([&](pqxx::nontransaction & tx_){ dropPublication(tx_); }); + } } } } - else + + if (result_tables.empty()) { if (!tables_list.empty()) { - tx.commit(); - return NameSet(expected_tables.begin(), expected_tables.end()); + result_tables = NameSet(expected_tables.begin(), expected_tables.end()); } else { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 4b6321338b8..95ac12b3786 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -24,6 +24,7 @@ public: const String & current_database_name_, const postgres::ConnectionInfo & connection_info_, ContextPtr context_, + bool is_attach_, const size_t max_block_size_, bool allow_automatic_update_, bool is_materialized_postgresql_database_, @@ -54,7 +55,7 @@ private: bool isPublicationExist(pqxx::work & tx); - void createPublicationIfNeeded(pqxx::work & tx, bool create_without_check = false); + void createPublicationIfNeeded(pqxx::work & tx); NameSet fetchTablesFromPublication(pqxx::work & tx); @@ -83,6 +84,12 @@ private: Poco::Logger * log; ContextPtr context; + /// If it is not attach, i.e. a create query, then if publication already exists - always drop it. + bool is_attach; + + /// If new publication is created at start up - always drop replication slot if it exists. + bool new_publication = false; + const String remote_database_name, current_database_name; /// Connection string and address for logs. @@ -113,11 +120,6 @@ private: std::atomic stop_synchronization = false; - /// For database engine there are 2 places where it is checked for publication: - /// 1. to fetch tables list from already created publication when database is loaded - /// 2. at replication startup - bool new_publication_created = false; - /// MaterializedPostgreSQL tables. Used for managing all operations with its internal nested tables. MaterializedStorages materialized_storages; diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index 252059f606d..78f7fefd5dc 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -70,6 +70,7 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( table_id_.database_name, connection_info, getContext(), + is_attach, replication_settings->materialized_postgresql_max_block_size.value, /* allow_automatic_update */ false, /* is_materialized_postgresql_database */false); } diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 2617a7ade40..162ffc53e20 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1068,7 +1068,7 @@ class ClickHouseCluster: logging.error("Can't connect to MySQL:{}".format(errors)) raise Exception("Cannot wait MySQL container") - def wait_postgres_to_start(self, timeout=180): + def wait_postgres_to_start(self, timeout=260): self.postgres_ip = self.get_instance_ip(self.postgres_host) start = time.time() while time.time() - start < timeout: From 57972410c794a504fe407a5ec293a8484007ee6e Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:54:43 +0300 Subject: [PATCH 756/931] =?UTF-8?q?=F0=9F=98=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../queries/0_stateless/01889_sql_json_functions.reference | 6 +++--- tests/queries/0_stateless/01889_sql_json_functions.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index bf70961b1f8..7d400b4f6e7 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- -["Vasily", "Kostya"] -["Tihon", "Ernest"] -["Katya", "Anatoliy"] +0 ["Vasily", "Kostya"] +1 ["Tihon", "Ernest"] +2 ["Katya", "Anatoliy"] diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index 77cb32352cf..1c5069ccfde 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -46,5 +46,5 @@ CREATE TABLE 01889_sql_json (id UInt8, json String) ENGINE = MergeTree ORDER BY INSERT INTO 01889_sql_json(id, json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); INSERT INTO 01889_sql_json(id, json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); INSERT INTO 01889_sql_json(id, json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); -SELECT JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json; +SELECT id, JSON_QUERY('$.friends[0 to 2]', json) FROM 01889_sql_json ORDER BY id; DROP TABLE 01889_sql_json; From 7d1c561a7b37634f70ba36c97d14dde51d79d0f4 Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Sun, 4 Jul 2021 22:57:16 +0300 Subject: [PATCH 757/931] =?UTF-8?q?=F0=9F=98=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../queries/0_stateless/01889_sql_json_functions.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index 7d400b4f6e7..593f2fb2d20 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -38,6 +38,6 @@ null 0 1 --MANY ROWS-- -0 ["Vasily", "Kostya"] -1 ["Tihon", "Ernest"] -2 ["Katya", "Anatoliy"] +0 ["Vasily", "Kostya"] +1 ["Tihon", "Ernest"] +2 ["Katya", "Anatoliy"] From c4675285bffd547a9cf328fdcfb99fff19681ba4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Jul 2021 23:49:36 +0300 Subject: [PATCH 758/931] Development --- src/Common/ErrorCodes.cpp | 1 + src/Interpreters/AsynchronousMetrics.cpp | 368 ++++++++++++++++++----- src/Interpreters/AsynchronousMetrics.h | 40 ++- 3 files changed, 340 insertions(+), 69 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index f4ceef2896a..8301ea656bf 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -557,6 +557,7 @@ M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \ M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \ M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \ + M(590, CANNOT_SYSCONF) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 4e46cdc27f2..89196b5e25f 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -36,10 +36,16 @@ namespace CurrentMetrics namespace DB { +namespace ErrorCodes +{ + extern const int CORRUPTED_DATA; + extern const int CANNOT_SYSCONF; +} + +static constexpr size_t small_buffer_size = 4096; + static void openFileIfExists(const char * filename, std::optional & out) { - static constexpr size_t small_buffer_size = 4096; - /// Ignoring time of check is not time of use cases, as procfs/sysfs files are fairly persistent. std::error_code ec; @@ -47,6 +53,15 @@ static void openFileIfExists(const char * filename, std::optional openFileIfExists(const std::string & filename) +{ + std::error_code ec; + if (std::filesystem::is_regular_file(filename, ec)) + return std::make_unique(filename, small_buffer_size); + return {}; +} + + AsynchronousMetrics::AsynchronousMetrics( ContextPtr global_context_, @@ -60,7 +75,6 @@ AsynchronousMetrics::AsynchronousMetrics( { #if defined(OS_LINUX) openFileIfExists("/proc/meminfo", meminfo); - openFileIfExists("/proc/mounts", mounts); openFileIfExists("/proc/loadavg", loadavg); openFileIfExists("/proc/stat", proc_stat); openFileIfExists("/proc/cpuinfo", cpuinfo); @@ -68,6 +82,17 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/proc/sockstat", sockstat); openFileIfExists("/proc/netstat", netstat); openFileIfExists("/proc/sys/fs/file-nr", file_nr); + openFileIfExists("/proc/uptime", uptime); + + size_t thermal_device_index = 0; + while (true) + { + std::unique_ptr file = openFileIfExists(fmt::format("/sys/class/thermal/thermal_zone{}/temp", thermal_device_index)); + if (!file) + break; + thermal.emplace_back(std::move(file)); + ++thermal_device_index; + } #endif } @@ -211,6 +236,63 @@ static void saveAllArenasMetric(AsynchronousMetricValues & values, } #endif + +#if defined(OS_LINUX) + +void AsynchronousMetrics::ProcStatValuesCPU::read(ReadBuffer & in) +{ + readText(user, in); + skipWhitespaceIfAny(in); + readText(nice, in); + skipWhitespaceIfAny(in); + readText(system, in); + skipWhitespaceIfAny(in); + readText(idle, in); + skipWhitespaceIfAny(in); + readText(iowait, in); + skipWhitespaceIfAny(in); + readText(irq, in); + skipWhitespaceIfAny(in); + readText(softirq, in); + skipWhitespaceIfAny(in); + readText(steal, in); + skipWhitespaceIfAny(in); + readText(guest, in); + skipWhitespaceIfAny(in); + readText(guest_nice, in); + skipToNextLineOrEOF(in); +} + +AsynchronousMetrics::ProcStatValuesCPU +AsynchronousMetrics::ProcStatValuesCPU::operator-(const AsynchronousMetrics::ProcStatValuesCPU & other) const +{ + ProcStatValuesCPU res{}; + res.user = user - other.user; + res.nice = nice - other.nice; + res.system = system - other.system; + res.idle = idle - other.idle; + res.iowait = iowait - other.iowait; + res.irq = irq - other.irq; + res.softirq = softirq - other.softirq; + res.steal = steal - other.steal; + res.guest = guest - other.guest; + res.guest_nice = guest_nice - other.guest_nice; + return res; +} + +AsynchronousMetrics::ProcStatValuesOther +AsynchronousMetrics::ProcStatValuesOther::operator-(const AsynchronousMetrics::ProcStatValuesOther & other) const +{ + ProcStatValuesOther res{}; + res.interrupts = interrupts - other.interrupts; + res.context_switches = context_switches - other.context_switches; + res.processes_created = processes_created - other.processes_created; + return res; +} + +#endif + + void AsynchronousMetrics::update() { AsynchronousMetricValues new_values; @@ -311,42 +393,234 @@ void AsynchronousMetrics::update() new_values["OSThreadsTotal"] = threads_total; } + if (uptime) + { + uptime->rewind(); + + Float64 uptime_seconds = 0; + readText(uptime_seconds, *uptime); + + new_values["OSUptime"] = uptime_seconds; + } + + if (proc_stat) + { + proc_stat->rewind(); + + int64_t hz = sysconf(_SC_CLK_TCK); + if (-1 == hz) + throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF); + + double multiplier = 1.0 / hz / update_period.count(); + + ProcStatValuesOther current_other_values{}; + + while (!proc_stat->eof()) + { + String name; + readStringUntilWhitespace(name, *proc_stat); + skipWhitespaceIfAny(*proc_stat); + + if (name.starts_with("cpu")) + { + String cpu_num_str = name.substr(strlen("cpu")); + UInt64 cpu_num = 0; + if (!cpu_num_str.empty()) + { + cpu_num = parse(cpu_num_str); + + if (cpu_num > 1000000) /// Safety check, arbitrary large number, suitable for supercomputing applications. + throw Exception(ErrorCodes::CORRUPTED_DATA, "Too many CPUs (at least {}) in '/proc/stat' file", cpu_num); + + if (proc_stat_values_per_cpu.size() <= cpu_num) + proc_stat_values_per_cpu.resize(cpu_num + 1); + } + + ProcStatValuesCPU current_values{}; + current_values.read(*proc_stat); + + ProcStatValuesCPU & prev_values = !cpu_num_str.empty() ? proc_stat_values_per_cpu[cpu_num] : proc_stat_values_all_cpus; + + if (!first_run) + { + ProcStatValuesCPU delta_values = current_values - prev_values; + + String cpu_suffix; + if (!cpu_num_str.empty()) + cpu_suffix = "CPU" + cpu_num_str; + + new_values["OSUserTime" + cpu_suffix] = delta_values.user * multiplier; + new_values["OSNiceTime" + cpu_suffix] = delta_values.nice * multiplier; + new_values["OSSystemTime" + cpu_suffix] = delta_values.system * multiplier; + new_values["OSIdleTime" + cpu_suffix] = delta_values.idle * multiplier; + new_values["OSIOWaitTime" + cpu_suffix] = delta_values.iowait * multiplier; + new_values["OSIrqTime" + cpu_suffix] = delta_values.irq * multiplier; + new_values["OSSoftIrqTime" + cpu_suffix] = delta_values.softirq * multiplier; + new_values["OSStealTime" + cpu_suffix] = delta_values.steal * multiplier; + new_values["OSGuestTime" + cpu_suffix] = delta_values.guest * multiplier; + new_values["OSGuestNiceTime" + cpu_suffix] = delta_values.guest_nice * multiplier; + } + + prev_values = current_values; + } + else if (name == "intr") + { + readText(current_other_values.interrupts, *proc_stat); + skipToNextLineOrEOF(*proc_stat); + } + else if (name == "ctxt") + { + readText(current_other_values.context_switches, *proc_stat); + skipToNextLineOrEOF(*proc_stat); + } + else if (name == "processes") + { + readText(current_other_values.processes_created, *proc_stat); + skipToNextLineOrEOF(*proc_stat); + } + else if (name == "procs_running") + { + UInt64 processes_running = 0; + readText(processes_running, *proc_stat); + skipToNextLineOrEOF(*proc_stat); + new_values["OSProcessesRunning"] = processes_running; + } + else if (name == "procs_blocked") + { + UInt64 processes_blocked = 0; + readText(processes_blocked, *proc_stat); + skipToNextLineOrEOF(*proc_stat); + new_values["OSProcessesBlocked"] = processes_blocked; + } + else + skipToNextLineOrEOF(*proc_stat); + } + + if (!first_run) + { + ProcStatValuesOther delta_values = current_other_values - proc_stat_values_other; + + new_values["OSInterrupts"] = delta_values.interrupts * multiplier; + new_values["OSContextSwitches"] = delta_values.context_switches * multiplier; + new_values["OSProcessesCreated"] = delta_values.processes_created * multiplier; + } + + proc_stat_values_other = current_other_values; + } + if (meminfo) { meminfo->rewind(); + uint64_t free_plus_cached_bytes = 0; + while (!meminfo->eof()) + { + String name; + readStringUntilWhitespace(name, *meminfo); + skipWhitespaceIfAny(*meminfo); + + uint64_t kb = 0; + readText(kb, *meminfo); + if (kb) + { + skipWhitespaceIfAny(*meminfo); + assertString("kB", *meminfo); + + uint64_t bytes = kb * 1024; + + if (name == "MemTotal:") + { + new_values["OSMemoryTotal"] = bytes; + } + else if (name == "MemFree:") + { + free_plus_cached_bytes += bytes; + new_values["OSMemoryFreeWithoutCached"] = bytes; + } + else if (name == "MemAvailable:") + { + new_values["OSMemoryAvailable"] = bytes; + } + else if (name == "Buffers:") + { + new_values["OSMemoryBuffers"] = bytes; + } + else if (name == "Cached:") + { + free_plus_cached_bytes += bytes; + new_values["OSMemoryCached"] = bytes; + } + else if (name == "SwapCached:") + { + new_values["OSMemorySwapCached"] = bytes; + } + } + + skipToNextLineOrEOF(*meminfo); + } + + new_values["OSMemoryFreePlusCached"] = free_plus_cached_bytes; } -#endif - /// Process CPU usage according to OS -#if defined(OS_LINUX) + // Try to add processor frequencies, ignoring errors. + if (cpuinfo) { - ProcessorStatisticsOS::Data data = proc_stat.get(); + try + { + cpuinfo->rewind(); - new_values["LoadAvg1"] = data.loadavg.avg1; - new_values["LoadAvg5"] = data.loadavg.avg5; - new_values["LoadAvg15"] = data.loadavg.avg15; + // We need the following lines: + // processor : 4 + // cpu MHz : 4052.941 + // They contain tabs and are interspersed with other info. - new_values["FreqMin"] = data.freq.min; - new_values["FreqMax"] = data.freq.max; - new_values["FreqAvg"] = data.freq.avg; + int core_id = 0; + while (!cpuinfo->eof()) + { + std::string s; + // We don't have any backslash escape sequences in /proc/cpuinfo, so + // this function will read the line until EOL, which is exactly what + // we need. + readEscapedStringUntilEOL(s, *cpuinfo); + // It doesn't read the EOL itself. + ++cpuinfo->position(); - new_values["TimeLoadUser"] = data.stload.user_time; - new_values["TimeLoadNice"] = data.stload.nice_time; - new_values["TimeLoadSystem"] = data.stload.system_time; - new_values["TimeLoadIDLE"] = data.stload.idle_time; - new_values["TimeLoadIowait"] = data.stload.iowait_time; - new_values["TimeLoadSteal"] = data.stload.steal_time; - new_values["TimeLoadGuest"] = data.stload.guest_time; - new_values["TimeLoadGuestNice"] = data.stload.guest_nice_time; + if (s.rfind("processor", 0) == 0) + { + if (auto colon = s.find_first_of(':')) + { + core_id = std::stoi(s.substr(colon + 2)); + } + } + else if (s.rfind("cpu MHz", 0) == 0) + { + if (auto colon = s.find_first_of(':')) + { + auto mhz = std::stod(s.substr(colon + 2)); + new_values[fmt::format("CPUFrequencyMHz_{}", core_id)] = mhz; + } + } + } + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } - new_values["Processes"] = data.stload.processes; - new_values["ProcessesRunning"] = data.stload.procs_running; - new_values["ProcessesBlocked"] = data.stload.procs_blocked; + if (file_nr) + { + file_nr->rewind(); + + uint64_t open_files = 0; + readText(open_files, *file_nr); + new_values["OSOpenFiles"] = open_files; } #endif + + /// Process disk usage according to OS #if defined(OS_LINUX) { @@ -530,50 +804,6 @@ void AsynchronousMetrics::update() saveAllArenasMetric(new_values, "muzzy_purged"); #endif -#if defined(OS_LINUX) - // Try to add processor frequencies, ignoring errors. - try - { - ReadBufferFromFile buf("/proc/cpuinfo", 32768 /* buf_size */); - - // We need the following lines: - // processor : 4 - // cpu MHz : 4052.941 - // They contain tabs and are interspersed with other info. - int core_id = 0; - while (!buf.eof()) - { - std::string s; - // We don't have any backslash escape sequences in /proc/cpuinfo, so - // this function will read the line until EOL, which is exactly what - // we need. - readEscapedStringUntilEOL(s, buf); - // It doesn't read the EOL itself. - ++buf.position(); - - if (s.rfind("processor", 0) == 0) - { - if (auto colon = s.find_first_of(':')) - { - core_id = std::stoi(s.substr(colon + 2)); - } - } - else if (s.rfind("cpu MHz", 0) == 0) - { - if (auto colon = s.find_first_of(':')) - { - auto mhz = std::stod(s.substr(colon + 2)); - new_values[fmt::format("CPUFrequencyMHz_{}", core_id)] = mhz; - } - } - } - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } -#endif - /// Add more metrics as you wish. // Log the new metrics. @@ -582,6 +812,8 @@ void AsynchronousMetrics::update() log->addValues(new_values); } + first_run = false; + // Finally, update the current metrics. std::lock_guard lock(mutex); values = new_values; diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 7bb281842dd..9f6e63f6ce6 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,7 @@ namespace DB { class ProtocolServerAdapter; +class ReadBuffer; using AsynchronousMetricValue = double; using AsynchronousMetricValues = std::unordered_map; @@ -71,11 +73,14 @@ private: bool quit {false}; AsynchronousMetricValues values; + /// Some values are incremental and we have to calculate the difference. + /// On first run we will only collect the values to subtract later. + bool first_run = true; + #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; std::optional meminfo; - std::optional mounts; std::optional loadavg; std::optional proc_stat; std::optional cpuinfo; @@ -83,6 +88,39 @@ private: std::optional sockstat; std::optional netstat; std::optional file_nr; + std::optional uptime; + std::vector> thermal; + + struct ProcStatValuesCPU + { + uint64_t user; + uint64_t nice; + uint64_t system; + uint64_t idle; + uint64_t iowait; + uint64_t irq; + uint64_t softirq; + uint64_t steal; + uint64_t guest; + uint64_t guest_nice; + + void read(ReadBuffer & in); + ProcStatValuesCPU operator-(const ProcStatValuesCPU & other) const; + }; + + struct ProcStatValuesOther + { + uint64_t interrupts; + uint64_t context_switches; + uint64_t processes_created; + + ProcStatValuesOther operator-(const ProcStatValuesOther & other) const; + }; + + ProcStatValuesCPU proc_stat_values_all_cpus{}; + ProcStatValuesOther proc_stat_values_other{}; + std::vector proc_stat_values_per_cpu; + #endif std::unique_ptr thread; From 08aca329bd111cea866ba8bba26504dc9e7cff34 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 00:22:58 +0300 Subject: [PATCH 759/931] Development --- src/Common/DiskStatisticsOS.cpp | 52 ------ src/Common/DiskStatisticsOS.h | 35 ---- src/Common/MemoryInfoOS.cpp | 80 ---------- src/Common/MemoryInfoOS.h | 41 ----- src/Common/ProcessorStatisticsOS.cpp | 194 ----------------------- src/Common/ProcessorStatisticsOS.h | 90 ----------- src/Interpreters/AsynchronousMetrics.cpp | 59 +++++-- src/Interpreters/AsynchronousMetrics.h | 5 +- 8 files changed, 48 insertions(+), 508 deletions(-) delete mode 100644 src/Common/DiskStatisticsOS.cpp delete mode 100644 src/Common/DiskStatisticsOS.h delete mode 100644 src/Common/MemoryInfoOS.cpp delete mode 100644 src/Common/MemoryInfoOS.h delete mode 100644 src/Common/ProcessorStatisticsOS.cpp delete mode 100644 src/Common/ProcessorStatisticsOS.h diff --git a/src/Common/DiskStatisticsOS.cpp b/src/Common/DiskStatisticsOS.cpp deleted file mode 100644 index 1b404be07fe..00000000000 --- a/src/Common/DiskStatisticsOS.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#if defined(OS_LINUX) - -#include -#include - -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_STATVFS; -} - - -DiskStatisticsOS::Data DiskStatisticsOS::get() -{ - ReadBufferFromFile mounts_in("/proc/mounts", 4096 /* arbitrary small buffer */); - - Data data{}; - - std::string fs_device; - std::string fs_path; - - while (!mounts_in.eof()) - { - readStringUntilWhitespace(fs_device, mounts_in); - skipWhitespaceIfAny(mounts_in); - readStringUntilWhitespace(fs_path, mounts_in); - skipWhitespaceIfAny(mounts_in); - - /// Only real devices - if (!fs_device.starts_with("/dev/") || fs_device.starts_with("/dev/loop")) - continue; - - struct statvfs stat = getStatVFS(fs_path); - - data.total_bytes += (stat.f_blocks) * stat.f_bsize; - data.used_bytes += (stat.f_blocks - stat.f_bfree) * stat.f_bsize; - data.total_inodes += stat.f_files; - data.used_inodes += stat.f_files - stat.f_ffree; - } - - return data; -} - -} - -#endif diff --git a/src/Common/DiskStatisticsOS.h b/src/Common/DiskStatisticsOS.h deleted file mode 100644 index 390846e4b6c..00000000000 --- a/src/Common/DiskStatisticsOS.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once -#if defined (OS_LINUX) - -#include - -#include - - -namespace DB -{ - -class ReadBuffer; - - -/** Opens file /proc/mounts, reads all mounted filesystems and - * calculates disk usage. - */ -class DiskStatisticsOS -{ -public: - // In bytes - struct Data - { - uint64_t total_bytes; - uint64_t used_bytes; - uint64_t total_inodes; - uint64_t used_inodes; - }; - - Data get(); -}; - -} - -#endif diff --git a/src/Common/MemoryInfoOS.cpp b/src/Common/MemoryInfoOS.cpp deleted file mode 100644 index 7b712a0bb06..00000000000 --- a/src/Common/MemoryInfoOS.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#if defined(OS_LINUX) - -#include -#include -#include - -#include "MemoryInfoOS.h" - -#include - -#include -#include - -namespace DB -{ - -namespace -{ - template - void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) - { - readIntText(x, buf); - skipWhitespaceIfAny(buf); - } - - void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); - } - - std::pair readField(ReadBuffer & meminfo_in) - { - String key; - uint64_t val; - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(key, meminfo_in); - readIntTextAndSkipWhitespaceIfAny(val, meminfo_in); - skipToNextLineOrEOF(meminfo_in); - - // Delete the read ":" from the end - key.pop_back(); - - return std::make_pair(key, val); - } -} - -static constexpr auto meminfo_filename = "/proc/meminfo"; - -static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); - - -MemoryInfoOS::Data MemoryInfoOS::get() -{ - ReadBufferFromFile meminfo_in(meminfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - - MemoryInfoOS::Data data; - String field_name; - - std::unordered_map meminfo; - - while (!meminfo_in.eof()) - meminfo.insert(readField(meminfo_in)); - - data.total = meminfo["MemTotal"]; - data.free = meminfo["MemFree"]; - data.buffers = meminfo["Buffers"]; - data.cached = meminfo["Cached"]; - data.swap_total = meminfo["SwapTotal"]; - data.swap_cached = meminfo["SwapCached"]; - data.swap_free = meminfo["SwapFree"]; - - data.free_and_cached = data.free + data.cached; - - return data; -} - -} - -#endif diff --git a/src/Common/MemoryInfoOS.h b/src/Common/MemoryInfoOS.h deleted file mode 100644 index 4390c9d5697..00000000000 --- a/src/Common/MemoryInfoOS.h +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once -#if defined(OS_LINUX) - -#include -#include -#include - -#include - -#include - -namespace DB -{ - -/** Opens file /proc/meminfo and reads statistics about memory usage. - * This is Linux specific. - * See: man procfs - */ -class MemoryInfoOS -{ -public: - // In kB - struct Data - { - uint64_t total; - uint64_t free; - uint64_t buffers; - uint64_t cached; - uint64_t free_and_cached; - - uint64_t swap_total; - uint64_t swap_free; - uint64_t swap_cached; - }; - - Data get(); -}; - -} - -#endif diff --git a/src/Common/ProcessorStatisticsOS.cpp b/src/Common/ProcessorStatisticsOS.cpp deleted file mode 100644 index 9b43fa428a9..00000000000 --- a/src/Common/ProcessorStatisticsOS.cpp +++ /dev/null @@ -1,194 +0,0 @@ -#if defined(OS_LINUX) - -#include -#include -#include -#include - -#include "ProcessorStatisticsOS.h" - -#include "Poco/String.h" - -#include - -#include - -#include - -#include -#include - -namespace DB -{ - -namespace -{ - template - void readIntTextAndSkipWhitespaceIfAny(T & x, ReadBuffer & buf) - { - readIntText(x, buf); - skipWhitespaceIfAny(buf); - } - - void readStringAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readString(s, buf); - skipWhitespaceIfAny(buf); - } - - void readStringUntilWhitespaceAndSkipWhitespaceIfAny(String & s, ReadBuffer & buf) - { - readStringUntilWhitespace(s, buf); - skipWhitespaceIfAny(buf); - } - - void readCharAndSkipWhitespaceIfAny(char & c, ReadBuffer & buf) - { - readChar(c, buf); - skipWhitespaceIfAny(buf); - } - - void readFloatAndSkipWhitespaceIfAny(float & f, ReadBuffer & buf) - { - readFloatText(f, buf); - skipWhitespaceIfAny(buf); - } -} - -static constexpr auto loadavg_filename = "/proc/loadavg"; -static constexpr auto procst_filename = "/proc/stat"; -static constexpr auto cpuinfo_filename = "/proc/cpuinfo"; - -static const uint64_t USER_HZ = static_cast(sysconf(_SC_CLK_TCK)); - -static constexpr size_t READ_BUFFER_BUF_SIZE = (64 << 10); - -ProcessorStatisticsOS::ProcessorStatisticsOS() -{ - ProcStLoad unused; - calcStLoad(unused); -} - -ProcessorStatisticsOS::~ProcessorStatisticsOS() {} - -ProcessorStatisticsOS::Data ProcessorStatisticsOS::ProcessorStatisticsOS::get() -{ - Data data; - readLoadavg(data.loadavg); - calcStLoad(data.stload); - readFreq(data.freq); - return data; -} - -void ProcessorStatisticsOS::readLoadavg(ProcLoadavg& loadavg) -{ - ReadBufferFromFile loadavg_in(loadavg_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - - readFloatAndSkipWhitespaceIfAny(loadavg.avg1, loadavg_in); - readFloatAndSkipWhitespaceIfAny(loadavg.avg5, loadavg_in); - readFloatAndSkipWhitespaceIfAny(loadavg.avg15, loadavg_in); -} - -void ProcessorStatisticsOS::calcStLoad(ProcStLoad & stload) -{ - ProcTime cur_proc_time; - readProcTimeAndProcesses(cur_proc_time, stload); - - std::time_t cur_time = std::time(nullptr); - float time_dif = static_cast(cur_time - last_stload_call_time); - - stload.user_time = (cur_proc_time.user - last_proc_time.user) / time_dif; - stload.nice_time = (cur_proc_time.nice - last_proc_time.nice) / time_dif; - stload.system_time = (cur_proc_time.system - last_proc_time.system) / time_dif; - stload.idle_time = (cur_proc_time.idle - last_proc_time.idle) / time_dif; - stload.iowait_time = (cur_proc_time.iowait - last_proc_time.iowait) / time_dif; - stload.steal_time = (cur_proc_time.steal - last_proc_time.steal) / time_dif; - stload.guest_time = (cur_proc_time.guest - last_proc_time.guest) / time_dif; - stload.guest_nice_time = (cur_proc_time.guest_nice - last_proc_time.guest_nice) / time_dif; - - last_stload_call_time = cur_time; - last_proc_time = cur_proc_time; -} - -void ProcessorStatisticsOS::readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad & stload) -{ - ReadBufferFromFile procst_in(procst_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - - String field_name, field_val; - uint64_t unused; - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - - readIntTextAndSkipWhitespaceIfAny(proc_time.user, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.nice, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.system, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.idle, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.iowait, procst_in); - proc_time.user /= USER_HZ; - proc_time.nice /= USER_HZ; - proc_time.system /= USER_HZ; - proc_time.idle /= USER_HZ; - proc_time.iowait /= USER_HZ; - - readIntTextAndSkipWhitespaceIfAny(unused, procst_in); - readIntTextAndSkipWhitespaceIfAny(unused, procst_in); - - readIntTextAndSkipWhitespaceIfAny(proc_time.steal, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.guest, procst_in); - readIntTextAndSkipWhitespaceIfAny(proc_time.guest_nice, procst_in); - proc_time.steal /= USER_HZ; - proc_time.guest /= USER_HZ; - proc_time.guest_nice /= USER_HZ; - - do - { - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readStringAndSkipWhitespaceIfAny(field_val, procst_in); - } while (field_name != String("processes")); - - stload.processes = static_cast(std::stoul(field_val)); - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readIntTextAndSkipWhitespaceIfAny(stload.procs_running, procst_in); - - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_name, procst_in); - readIntTextAndSkipWhitespaceIfAny(stload.procs_blocked, procst_in); -} - -void ProcessorStatisticsOS::readFreq(ProcFreq & freq) -{ - ReadBufferFromFile cpuinfo_in(cpuinfo_filename, READ_BUFFER_BUF_SIZE, O_RDONLY | O_CLOEXEC); - - String field_name, field_val; - char unused; - int cpu_count = 0; - freq.max = freq.min = freq.avg = 0; - - do - { - do - { - readStringAndSkipWhitespaceIfAny(field_name, cpuinfo_in); - } while (!cpuinfo_in.eof() && field_name != String("cpu MHz")); - - if (cpuinfo_in.eof()) - break; - - readCharAndSkipWhitespaceIfAny(unused, cpuinfo_in); - readStringUntilWhitespaceAndSkipWhitespaceIfAny(field_val, cpuinfo_in); - - cpu_count++; - - float cur_cpu_freq = stof(field_val); - - freq.avg += cur_cpu_freq; - freq.max = (cpu_count == 1 ? cur_cpu_freq : std::max(freq.max, cur_cpu_freq)); - freq.min = (cpu_count == 1 ? cur_cpu_freq : std::min(freq.min, cur_cpu_freq)); - } while (true); - - freq.avg /= static_cast(cpu_count); -} - -} - -#endif diff --git a/src/Common/ProcessorStatisticsOS.h b/src/Common/ProcessorStatisticsOS.h deleted file mode 100644 index 10b6d050b8c..00000000000 --- a/src/Common/ProcessorStatisticsOS.h +++ /dev/null @@ -1,90 +0,0 @@ -#pragma once -#if defined(OS_LINUX) - -#include -#include - -#include - -#include - -namespace DB -{ - -/** Opens files: /proc/loadavg, /proc/stat, /proc/cpuinfo and reads processor statistics in get() method. - * This is Linux specific. - * See: man procfs - */ -class ProcessorStatisticsOS -{ -public: - struct ProcLoadavg - { - float avg1; - float avg5; - float avg15; - }; - - struct ProcStLoad - { - float user_time; - float nice_time; - float system_time; - float idle_time; - float iowait_time; - float steal_time; - float guest_time; - float guest_nice_time; - - uint32_t processes; - uint32_t procs_running; - uint32_t procs_blocked; - }; - - struct ProcFreq - { - float max; - float min; - float avg; - }; - - struct Data - { - ProcLoadavg loadavg; - ProcStLoad stload; - ProcFreq freq; - }; - - ProcessorStatisticsOS(); - ~ProcessorStatisticsOS(); - - Data get(); - -private: - struct ProcTime - { - // The amount of time, measured in seconds - uint64_t user; - uint64_t nice; - uint64_t system; - uint64_t idle; - uint64_t iowait; - uint64_t steal; - uint64_t guest; - uint64_t guest_nice; - }; - - void readLoadavg(ProcLoadavg & loadavg); - void calcStLoad(ProcStLoad & stload); - void readFreq(ProcFreq & freq); - - void readProcTimeAndProcesses(ProcTime & proc_time, ProcStLoad & stload); - -private: - std::time_t last_stload_call_time; - ProcTime last_proc_time; -}; - -} - -#endif diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 89196b5e25f..f7e54c661b4 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -78,9 +79,6 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/proc/loadavg", loadavg); openFileIfExists("/proc/stat", proc_stat); openFileIfExists("/proc/cpuinfo", cpuinfo); - openFileIfExists("/proc/schedstat", schedstat); - openFileIfExists("/proc/sockstat", sockstat); - openFileIfExists("/proc/netstat", netstat); openFileIfExists("/proc/sys/fs/file-nr", file_nr); openFileIfExists("/proc/uptime", uptime); @@ -617,22 +615,57 @@ void AsynchronousMetrics::update() readText(open_files, *file_nr); new_values["OSOpenFiles"] = open_files; } -#endif - - - /// Process disk usage according to OS -#if defined(OS_LINUX) + for (size_t i = 0, size = thermal.size(); i < size; ++i) { - DiskStatisticsOS::Data data = disk_stat.get(); + ReadBufferFromFile & in = *thermal[i]; - new_values["FilesystemsTotalBytes"] = data.total_bytes; - new_values["FilesystemsUsedBytes"] = data.used_bytes; - new_values["FilesystemsTotalINodes"] = data.total_inodes; - new_values["FilesystemsUsedINodes"] = data.used_inodes; + in.rewind(); + uint64_t temperature = 0; + readText(temperature, in); + new_values[fmt::format("Temperature{}", i)] = temperature * 0.001; } #endif + /// Free space in filesystems at data path and logs path. + { + auto stat = getStatVFS(getContext()->getPath()); + + new_values["FilesystemMainPathTotalBytes"] = stat.f_blocks * stat.f_bsize; + new_values["FilesystemMainPathAvailableBytes"] = stat.f_bavail * stat.f_bsize; + new_values["FilesystemMainPathUsedBytes"] = (stat.f_blocks - stat.f_bavail) * stat.f_bsize; + new_values["FilesystemMainPathTotalINodes"] = stat.f_files; + new_values["FilesystemMainPathAvailableINodes"] = stat.f_favail; + new_values["FilesystemMainPathUsedINodes"] = stat.f_files - stat.f_favail; + } + + { + auto stat = getStatVFS("."); + + new_values["FilesystemLogsPathTotalBytes"] = stat.f_blocks * stat.f_bsize; + new_values["FilesystemLogsPathAvailableBytes"] = stat.f_bavail * stat.f_bsize; + new_values["FilesystemLogsPathUsedBytes"] = (stat.f_blocks - stat.f_bavail) * stat.f_bsize; + new_values["FilesystemLogsPathTotalINodes"] = stat.f_files; + new_values["FilesystemLogsPathAvailableINodes"] = stat.f_favail; + new_values["FilesystemLogsPathUsedINodes"] = stat.f_files - stat.f_favail; + } + + /// Free and total space on every configured disk. + { + DisksMap disks_map = getContext()->getDisksMap(); + for (const auto & [name, disk] : disks_map) + { + auto total = disk->getTotalSpace(); + auto available = disk->getAvailableSpace(); + auto unreserved = disk->getUnreservedSpace(); + + new_values[fmt::format("DiskTotal_{}", name)] = total; + new_values[fmt::format("DiskUsed_{}", name)] = total - available; + new_values[fmt::format("DiskAvailable_{}", name)] = available; + new_values[fmt::format("DiskUnreserved_{}", name)] = unreserved; + } + } + { auto databases = DatabaseCatalog::instance().getDatabases(); diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 9f6e63f6ce6..247c9858129 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -84,13 +84,12 @@ private: std::optional loadavg; std::optional proc_stat; std::optional cpuinfo; - std::optional schedstat; - std::optional sockstat; - std::optional netstat; std::optional file_nr; std::optional uptime; std::vector> thermal; + /// TODO: IO load, Network rx/tx, sockets, EDAC. + struct ProcStatValuesCPU { uint64_t user; From 3a10d3802b1faf5c60a584ac602e206237093e65 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 00:33:00 +0300 Subject: [PATCH 760/931] Development --- src/Interpreters/AsynchronousMetrics.cpp | 4 ++++ src/Interpreters/AsynchronousMetrics.h | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index f7e54c661b4..add448b129b 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -533,6 +533,10 @@ void AsynchronousMetrics::update() } else if (name == "MemFree:") { + /// We cannot simply name this metric "Free", because it confuses users. + /// See https://www.linuxatemyram.com/ + /// For convenience we also provide OSMemoryFreePlusCached, that should be somewhat similar to OSMemoryAvailable. + free_plus_cached_bytes += bytes; new_values["OSMemoryFreeWithoutCached"] = bytes; } diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 247c9858129..2a2d434c007 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -2,9 +2,6 @@ #include #include -#include -#include -#include #include #include From b5840210c1c2def061ddec2201ca77d9b0007a8c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 00:54:46 +0300 Subject: [PATCH 761/931] Adjustments --- src/Interpreters/AsynchronousMetrics.cpp | 69 +++++++++++++++++++----- 1 file changed, 55 insertions(+), 14 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index add448b129b..ec814f96da1 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -240,24 +240,39 @@ static void saveAllArenasMetric(AsynchronousMetricValues & values, void AsynchronousMetrics::ProcStatValuesCPU::read(ReadBuffer & in) { readText(user, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(nice, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(system, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(idle, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(iowait, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(irq, in); - skipWhitespaceIfAny(in); + skipWhitespaceIfAny(in, true); readText(softirq, in); - skipWhitespaceIfAny(in); - readText(steal, in); - skipWhitespaceIfAny(in); - readText(guest, in); - skipWhitespaceIfAny(in); - readText(guest_nice, in); + + /// Just in case for old Linux kernels, we check if these values present. + + if (!checkChar('\n', in)) + { + skipWhitespaceIfAny(in, true); + readText(steal, in); + } + + if (!checkChar('\n', in)) + { + skipWhitespaceIfAny(in, true); + readText(guest, in); + } + + if (!checkChar('\n', in)) + { + skipWhitespaceIfAny(in, true); + readText(guest_nice, in); + } + skipToNextLineOrEOF(in); } @@ -410,8 +425,10 @@ void AsynchronousMetrics::update() throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF); double multiplier = 1.0 / hz / update_period.count(); + size_t num_cpus = 0; ProcStatValuesOther current_other_values{}; + ProcStatValuesCPU delta_values_all_cpus{}; while (!proc_stat->eof()) { @@ -445,7 +462,12 @@ void AsynchronousMetrics::update() String cpu_suffix; if (!cpu_num_str.empty()) + { cpu_suffix = "CPU" + cpu_num_str; + ++num_cpus; + } + else + delta_values_all_cpus = delta_values; new_values["OSUserTime" + cpu_suffix] = delta_values.user * multiplier; new_values["OSNiceTime" + cpu_suffix] = delta_values.nice * multiplier; @@ -501,6 +523,20 @@ void AsynchronousMetrics::update() new_values["OSInterrupts"] = delta_values.interrupts * multiplier; new_values["OSContextSwitches"] = delta_values.context_switches * multiplier; new_values["OSProcessesCreated"] = delta_values.processes_created * multiplier; + + /// Also write values normalized to 0..1 by diving to the number of CPUs. + /// These values are good to be averaged across the cluster of non-uniform servers. + + new_values["OSUserTimeNormalized"] = delta_values_all_cpus.user * multiplier / num_cpus; + new_values["OSNiceTimeNormalized"] = delta_values_all_cpus.nice * multiplier / num_cpus; + new_values["OSSystemTimeNormalized"] = delta_values_all_cpus.system * multiplier / num_cpus; + new_values["OSIdleTimeNormalized"] = delta_values_all_cpus.idle * multiplier / num_cpus; + new_values["OSIOWaitTimeNormalized"] = delta_values_all_cpus.iowait * multiplier / num_cpus; + new_values["OSIrqTimeNormalized"] = delta_values_all_cpus.irq * multiplier / num_cpus; + new_values["OSSoftIrqTimeNormalized"] = delta_values_all_cpus.softirq * multiplier / num_cpus; + new_values["OSStealTimeNormalized"] = delta_values_all_cpus.steal * multiplier / num_cpus; + new_values["OSGuestTimeNormalized"] = delta_values_all_cpus.guest * multiplier / num_cpus; + new_values["OSGuestNiceTimeNormalized"] = delta_values_all_cpus.guest_nice * multiplier / num_cpus; } proc_stat_values_other = current_other_values; @@ -516,13 +552,13 @@ void AsynchronousMetrics::update() { String name; readStringUntilWhitespace(name, *meminfo); - skipWhitespaceIfAny(*meminfo); + skipWhitespaceIfAny(*meminfo, true); uint64_t kb = 0; readText(kb, *meminfo); if (kb) { - skipWhitespaceIfAny(*meminfo); + skipWhitespaceIfAny(*meminfo, true); assertString("kB", *meminfo); uint64_t bytes = kb * 1024; @@ -660,6 +696,11 @@ void AsynchronousMetrics::update() for (const auto & [name, disk] : disks_map) { auto total = disk->getTotalSpace(); + + /// Some disks don't support information about the space. + if (!total) + continue; + auto available = disk->getAvailableSpace(); auto unreserved = disk->getUnreservedSpace(); From 0f8ea9b8f6dbf4170352e4ac4feed1afd1ea0488 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 01:33:32 +0300 Subject: [PATCH 762/931] More metrics --- src/Interpreters/AsynchronousMetricLog.cpp | 2 +- src/Interpreters/AsynchronousMetrics.cpp | 25 ++++++++++++++++------ src/Interpreters/AsynchronousMetrics.h | 3 ++- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/AsynchronousMetricLog.cpp b/src/Interpreters/AsynchronousMetricLog.cpp index 79e2d513d5c..c7003cff169 100644 --- a/src/Interpreters/AsynchronousMetricLog.cpp +++ b/src/Interpreters/AsynchronousMetricLog.cpp @@ -18,7 +18,7 @@ NamesAndTypesList AsynchronousMetricLogElement::getNamesAndTypes() {"event_date", std::make_shared()}, {"event_time", std::make_shared()}, {"event_time_microseconds", std::make_shared(6)}, - {"name", std::make_shared(std::make_shared())}, + {"metric", std::make_shared(std::make_shared())}, {"value", std::make_shared(),} }; } diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index ec814f96da1..26c9a2ad65c 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -98,7 +98,7 @@ void AsynchronousMetrics::start() { /// Update once right now, to make metrics available just after server start /// (without waiting for asynchronous_metrics_update_period_s). - update(); + update(std::chrono::system_clock::now()); thread = std::make_unique([this] { run(); }); } @@ -158,10 +158,12 @@ void AsynchronousMetrics::run() while (true) { + auto next_update_time = get_next_update_time(update_period); + { // Wait first, so that the first metric collection is also on even time. std::unique_lock lock{mutex}; - if (wait_cond.wait_until(lock, get_next_update_time(update_period), + if (wait_cond.wait_until(lock, next_update_time, [this] { return quit; })) { break; @@ -170,7 +172,7 @@ void AsynchronousMetrics::run() try { - update(); + update(next_update_time); } catch (...) { @@ -306,10 +308,19 @@ AsynchronousMetrics::ProcStatValuesOther::operator-(const AsynchronousMetrics::P #endif -void AsynchronousMetrics::update() +void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_time) { + Stopwatch watch; + AsynchronousMetricValues new_values; + auto current_time = std::chrono::system_clock::now(); + auto time_after_previous_update = current_time - previous_update_time; + previous_update_time = update_time; + + /// This is also a good indicator of system responsiveness. + new_values["Jitter"] = std::chrono::duration_cast(current_time - update_time).count() / 1e9; + { if (auto mark_cache = getContext()->getMarkCache()) { @@ -424,7 +435,7 @@ void AsynchronousMetrics::update() if (-1 == hz) throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF); - double multiplier = 1.0 / hz / update_period.count(); + double multiplier = 1.0 / hz / (std::chrono::duration_cast(time_after_previous_update).count() / 1e9); size_t num_cpus = 0; ProcStatValuesOther current_other_values{}; @@ -884,7 +895,9 @@ void AsynchronousMetrics::update() /// Add more metrics as you wish. - // Log the new metrics. + new_values["AsynchronousMetricsCalculationTimeSpent"] = watch.elapsedSeconds(); + + /// Log the new metrics. if (auto log = getContext()->getAsynchronousMetricLog()) { log->addValues(new_values); diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 2a2d434c007..95ba5492d86 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -73,6 +73,7 @@ private: /// Some values are incremental and we have to calculate the difference. /// On first run we will only collect the values to subtract later. bool first_run = true; + std::chrono::system_clock::time_point previous_update_time; #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; @@ -122,7 +123,7 @@ private: std::unique_ptr thread; void run(); - void update(); + void update(std::chrono::system_clock::time_point update_time); }; } From c059d0a0ee1e13c73cdefb821cb40aa01f6981c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 01:41:09 +0300 Subject: [PATCH 763/931] More metrics --- programs/server/Server.cpp | 2 +- programs/server/config.xml | 4 +-- src/Interpreters/AsynchronousMetrics.cpp | 35 +++++++++--------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 88f7564a7f2..28cf085e699 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1159,7 +1159,7 @@ int Server::main(const std::vector & /*args*/) { /// This object will periodically calculate some metrics. AsynchronousMetrics async_metrics( - global_context, config().getUInt("asynchronous_metrics_update_period_s", 60), servers_to_start_before_tables, servers); + global_context, config().getUInt("asynchronous_metrics_update_period_s", 1), servers_to_start_before_tables, servers); attachSystemTablesAsync(*DatabaseCatalog::instance().getSystemDatabase(), async_metrics); for (const auto & listen_host : listen_hosts) diff --git a/programs/server/config.xml b/programs/server/config.xml index dd50a693403..6f0b228dda7 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -583,7 +583,7 @@ 9019 --> - + @@ -917,7 +917,7 @@ Asynchronous metrics are updated once a minute, so there is no need to flush more often. --> - 60000 + 7000 0 + 0 diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 3835fd58c77..ad5c67d88d4 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -185,8 +185,8 @@ public: auto * denominator_type = toNativeType(b); static constexpr size_t denominator_offset = offsetof(Fraction, denominator); - auto * denominator_dst_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, denominator_offset), denominator_type->getPointerTo()); - auto * denominator_src_ptr = b.CreatePointerCast(b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, denominator_offset), denominator_type->getPointerTo()); + auto * denominator_dst_ptr = b.CreatePointerCast(b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_dst_ptr, denominator_offset), denominator_type->getPointerTo()); + auto * denominator_src_ptr = b.CreatePointerCast(b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_src_ptr, denominator_offset), denominator_type->getPointerTo()); auto * denominator_dst_value = b.CreateLoad(denominator_type, denominator_dst_ptr); auto * denominator_src_value = b.CreateLoad(denominator_type, denominator_src_ptr); diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index 80e18f1a141..68d48803718 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -74,7 +74,7 @@ public: auto * denominator_type = toNativeType(b); static constexpr size_t denominator_offset = offsetof(Fraction, denominator); - auto * denominator_offset_ptr = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, denominator_offset); + auto * denominator_offset_ptr = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, denominator_offset); auto * denominator_ptr = b.CreatePointerCast(denominator_offset_ptr, denominator_type->getPointerTo()); auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type); diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index e7c48c8988c..c074daf45be 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -139,7 +139,7 @@ public: if constexpr (result_is_nullable) b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); b.CreateBr(join_block); @@ -290,7 +290,7 @@ public: if constexpr (result_is_nullable) b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values); b.CreateBr(join_block); diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 23dad0c097c..cc670fdf823 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -199,7 +199,7 @@ public: static constexpr size_t value_offset_from_structure = offsetof(SingleValueDataFixed, value); auto * type = toNativeType(builder); - auto * value_ptr_with_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, value_offset_from_structure); + auto * value_ptr_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, value_offset_from_structure); auto * value_ptr = b.CreatePointerCast(value_ptr_with_offset, type->getPointerTo()); return value_ptr; diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index b7a67f2cc1b..7890e96ef66 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -207,7 +207,7 @@ public: if constexpr (result_is_nullable) b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->prefix_size, llvm::assumeAligned(this->alignOfData())); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileCreate(b, aggregate_data_ptr_with_prefix_size_offset); } @@ -225,8 +225,8 @@ public: b.CreateStore(is_null_result_value, aggregate_data_dst_ptr); } - auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_dst_ptr, this->prefix_size); - auto * aggregate_data_src_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_src_ptr, this->prefix_size); + auto * aggregate_data_dst_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_dst_ptr, this->prefix_size); + auto * aggregate_data_src_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_src_ptr, this->prefix_size); this->nested_function->compileMerge(b, aggregate_data_dst_ptr_with_prefix_size_offset, aggregate_data_src_ptr_with_prefix_size_offset); } @@ -260,7 +260,7 @@ public: b.CreateBr(join_block); b.SetInsertPoint(if_not_null); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); auto * nested_result = this->nested_function->compileGetResult(builder, aggregate_data_ptr_with_prefix_size_offset); b.CreateStore(b.CreateInsertValue(nullable_value, nested_result, {0}), nullable_value_ptr); b.CreateBr(join_block); @@ -351,7 +351,7 @@ public: if constexpr (result_is_nullable) b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value }); b.CreateBr(join_block); @@ -479,7 +479,7 @@ public: if constexpr (result_is_nullable) b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values); b.CreateBr(join_block); @@ -488,7 +488,7 @@ public: else { b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr); - auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstGEP1_32(nullptr, aggregate_data_ptr, this->prefix_size); + auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_ptr, this->prefix_size); this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values); } } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 31eaeaadbeb..28e46160a98 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -108,7 +108,7 @@ class IColumn; M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ - M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggreagte expressions before they are JIT-compiled", 0) \ + M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 766c2290e42..18a2400d22f 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -168,7 +168,7 @@ static void compileFunction(llvm::Module & module, const IFunctionBase & functio for (size_t i = 0; i <= arg_types.size(); ++i) { const auto & type = i == arg_types.size() ? function.getResultType() : arg_types[i]; - auto * data = b.CreateLoad(data_type, b.CreateConstInBoundsGEP1_32(data_type, columns_arg, i)); + auto * data = b.CreateLoad(data_type, b.CreateConstInBoundsGEP1_64(data_type, columns_arg, i)); columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(type))->getPointerTo()); columns[i].null_init = type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; } @@ -236,9 +236,9 @@ static void compileFunction(llvm::Module & module, const IFunctionBase & functio auto * cur_block = b.GetInsertBlock(); for (auto & col : columns) { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + col.data->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.data, 1), cur_block); if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + col.null->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.null, 1), cur_block); } auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); @@ -295,7 +295,7 @@ static void compileCreateAggregateStatesFunctions(llvm::Module & module, const s { size_t aggregate_function_offset = function_to_compile.aggregate_data_offset; const auto * aggregate_function = function_to_compile.function; - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_arg, aggregate_function_offset); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_place_arg, aggregate_function_offset); aggregate_function->compileCreate(b, aggregation_place_with_offset); } @@ -338,7 +338,7 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const for (size_t column_argument_index = 0; column_argument_index < function_arguments_size; ++column_argument_index) { const auto & argument_type = argument_types[column_argument_index]; - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, previous_columns_size + column_argument_index)); + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_arg, previous_columns_size + column_argument_index)); data_placeholder.data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(argument_type))->getPointerTo()); data_placeholder.null_init = argument_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; columns.emplace_back(data_placeholder); @@ -408,7 +408,7 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const arguments_values[column_argument_index] = nullable_value; } - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregation_place, aggregate_function_offset); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregation_place, aggregate_function_offset); aggregate_function_ptr->compileAdd(b, aggregation_place_with_offset, arguments_types, arguments_values); previous_columns_size += function_arguments_size; @@ -419,13 +419,13 @@ static void compileAddIntoAggregateStatesFunctions(llvm::Module & module, const auto * cur_block = b.GetInsertBlock(); for (auto & col : columns) { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + col.data->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.data, 1), cur_block); if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + col.null->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.null, 1), cur_block); } - places_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, places_phi, 1), cur_block); + places_phi->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, places_phi, 1), cur_block); auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1)); counter_phi->addIncoming(value, cur_block); @@ -457,8 +457,8 @@ static void compileMergeAggregatesStates(llvm::Module & module, const std::vecto size_t aggregate_function_offset = function_to_compile.aggregate_data_offset; const auto * aggregate_function_ptr = function_to_compile.function; - auto * aggregate_data_place_merge_dst_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_dst_arg, aggregate_function_offset); - auto * aggregate_data_place_merge_src_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_src_arg, aggregate_function_offset); + auto * aggregate_data_place_merge_dst_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_place_dst_arg, aggregate_function_offset); + auto * aggregate_data_place_merge_src_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_place_src_arg, aggregate_function_offset); aggregate_function_ptr->compileMerge(b, aggregate_data_place_merge_dst_with_offset, aggregate_data_place_merge_src_with_offset); } @@ -490,7 +490,7 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons for (size_t i = 0; i < functions.size(); ++i) { auto return_type = functions[i].function->getReturnType(); - auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_32(column_data_type, columns_arg, i)); + auto * data = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_arg, i)); columns[i].data_init = b.CreatePointerCast(b.CreateExtractValue(data, {0}), toNativeType(b, removeNullable(return_type))->getPointerTo()); columns[i].null_init = return_type->isNullable() ? b.CreateExtractValue(data, {1}) : nullptr; } @@ -526,7 +526,7 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons const auto * aggregate_function_ptr = functions[i].function; auto * aggregate_data_place = b.CreateLoad(b.getInt8Ty()->getPointerTo(), aggregate_data_place_phi); - auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place, aggregate_function_offset); + auto * aggregation_place_with_offset = b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_place, aggregate_function_offset); auto * final_value = aggregate_function_ptr->compileGetResult(b, aggregation_place_with_offset); @@ -546,16 +546,16 @@ static void compileInsertAggregatesIntoResultColumns(llvm::Module & module, cons auto * cur_block = b.GetInsertBlock(); for (auto & col : columns) { - col.data->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.data, 1), cur_block); + col.data->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.data, 1), cur_block); if (col.null) - col.null->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, col.null, 1), cur_block); + col.null->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, col.null, 1), cur_block); } auto * value = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1), "", true, true); counter_phi->addIncoming(value, cur_block); - aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_32(nullptr, aggregate_data_place_phi, 1), cur_block); + aggregate_data_place_phi->addIncoming(b.CreateConstInBoundsGEP1_64(nullptr, aggregate_data_place_phi, 1), cur_block); b.CreateCondBr(b.CreateICmpEQ(value, rows_count_arg), end, loop); diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index 31b621f7258..21683ef2004 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -3,6 +3,11 @@ hits_100m_single + + 1 + 0 + + CREATE TABLE jit_test_memory ( key UInt64, From cc137878882e5b42ea8e8bbb226416a9169d7563 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Jul 2021 12:50:33 +0300 Subject: [PATCH 780/931] Minor style changes for (un)bin/hex --- src/Functions/FunctionsCoding.h | 95 ++++++++++++--------------------- 1 file changed, 33 insertions(+), 62 deletions(-) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index f2e340aaeef..e9ec013d6eb 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -65,11 +65,6 @@ namespace ErrorCodes constexpr size_t uuid_bytes_length = 16; constexpr size_t uuid_text_length = 36; -namespace ErrorCodes -{ -extern const int NOT_IMPLEMENTED; -} - class FunctionIPv6NumToString : public IFunction { public: @@ -955,13 +950,15 @@ public: } }; +/// Encode number or string to string with binary or hexadecimal representation template -class Conversion : public IFunction +class EncodeToBinaryRepr : public IFunction { public: static constexpr auto name = Impl::name; static constexpr size_t word_size = Impl::word_size; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override { return name; } @@ -1012,7 +1009,7 @@ public: } template - bool tryExecuteUInt(const IColumn *col, ColumnPtr &col_res) const + bool tryExecuteUInt(const IColumn * col, ColumnPtr & col_res) const { const ColumnVector * col_vec = checkAndGetColumn>(col); @@ -1071,16 +1068,8 @@ public: size_t size = in_offsets.size(); out_offsets.resize(size); - if (getName() == "bin") - { - out_vec.resize((in_vec.size() - size) * word_size + size); - } else if (getName() == "hex") - { - out_vec.resize(in_vec.size() * word_size - size); - } else - { - throw Exception("new function is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED); - } + /// reserve `word_size` bytes for each non trailing zero byte from input + `size` bytes for trailing zeros + out_vec.resize((in_vec.size() - size) * word_size + size); char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; @@ -1187,13 +1176,14 @@ public: } }; +/// Decode number or string from string with binary or hexadecimal representation template -class UnConversion : public IFunction +class DecodeFromBinaryRepr : public IFunction { public: static constexpr auto name = Impl::name; static constexpr size_t word_size = Impl::word_size; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override { return name; } @@ -1227,18 +1217,7 @@ public: size_t size = in_offsets.size(); out_offsets.resize(size); - if (getName() == "unhex") - { - out_vec.resize(in_vec.size() / 2 + size); - } - else if (getName() == "unbin") - { - out_vec.resize(in_vec.size() / 8 + size); - } - else - { - throw Exception("new function is not implemented for " + getName(), ErrorCodes::NOT_IMPLEMENTED); - } + out_vec.resize(in_vec.size() / word_size + size); char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; @@ -1248,7 +1227,7 @@ public: { size_t new_offset = in_offsets[i]; - Impl::unConversion(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); + Impl::decode(reinterpret_cast(&in_vec[prev_offset]), reinterpret_cast(&in_vec[new_offset - 1]), pos); out_offsets[i] = pos - begin; @@ -1270,9 +1249,9 @@ public: struct HexImpl { -public: static constexpr auto name = "hex"; - static const size_t word_size = 2; + static constexpr size_t word_size = 2; + template static void executeOneUInt(T x, char *& out) { @@ -1287,7 +1266,7 @@ public: was_nonzero = true; writeHexByteUppercase(byte, out); - out += 2; + out += word_size; } *out = '\0'; ++out; @@ -1299,7 +1278,7 @@ public: { writeHexByteUppercase(*pos, out); ++pos; - out += 2; + out += word_size; } *out = '\0'; ++out; @@ -1334,15 +1313,10 @@ public: struct UnhexImpl { -public: static constexpr auto name = "unhex"; + static constexpr size_t word_size = 2; - static String getName() - { - return name; - } - - static void unConversion(const char * pos, const char * end, char *& out) + static void decode(const char * pos, const char * end, char *& out) { if ((end - pos) & 1) { @@ -1353,7 +1327,7 @@ public: while (pos < end) { *out = unhex2(pos); - pos += 2; + pos += word_size; ++out; } *out = '\0'; @@ -1363,16 +1337,16 @@ public: struct BinImpl { -public: static constexpr auto name = "bin"; static constexpr size_t word_size = 8; + template static void executeOneUInt(T x, char *& out) { bool was_nonzero = false; T t = 1; - for (int8_t offset = sizeof(x) * 8 - 1; offset >= 0; --offset) + for (Int8 offset = sizeof(x) * 8 - 1; offset >= 0; --offset) { t = t << offset; if ((x & t) == t) @@ -1401,7 +1375,7 @@ public: template static void executeFloatAndDecimal(const T & in_vec, ColumnPtr & col_res, const size_t type_size_in_bytes) { - const size_t hex_length = type_size_in_bytes * 8 + 1; /// Including trailing zero byte. + const size_t hex_length = type_size_in_bytes * word_size + 1; /// Including trailing zero byte. auto col_str = ColumnString::create(); ColumnString::Chars & out_vec = col_str->getChars(); @@ -1412,8 +1386,7 @@ public: out_vec.resize(size * hex_length); size_t pos = 0; - char * begin = reinterpret_cast(out_vec.data()); - char * out = begin; + char * out = reinterpret_cast(out_vec.data()); for (size_t i = 0; i < size; ++i) { const UInt8 * in_pos = reinterpret_cast(&in_vec[i]); @@ -1440,12 +1413,10 @@ public: struct UnbinImpl { -public: static constexpr auto name = "unbin"; + static constexpr size_t word_size = 8; - static String getName() { return name; } - - static void unConversion(const char * pos, const char * end, char *& out) + static void decode(const char * pos, const char * end, char *& out) { UInt8 left = 0; @@ -1454,7 +1425,7 @@ public: /// e.g. the length is 9 and the input is "101000001", /// first left_cnt is 1, left is 0, right shift, pos is 1, left = 1 /// then, left_cnt is 0, remain input is '01000001'. - for (uint8_t left_cnt = (end - pos) & 7; left_cnt > 0; --left_cnt) + for (UInt8 left_cnt = (end - pos) & 7; left_cnt > 0; --left_cnt) { left = left << 1; if (*pos != '0') @@ -1469,12 +1440,12 @@ public: ++out; } - /// input character encoding is UTF-8. And - /// remain bits mod 8 is zero. + assert((end - pos) % 8 == 0); + while (end - pos != 0) { UInt8 c = 0; - for (uint8_t i = 0; i < 8; ++i) + for (UInt8 i = 0; i < 8; ++i) { c = c << 1; if (*pos != '0') @@ -1492,10 +1463,10 @@ public: } }; -using FunctionHex = Conversion; -using FunctionUnhex = UnConversion; -using FunctionBin = Conversion; -using FunctionUnbin = UnConversion; +using FunctionHex = EncodeToBinaryRepr; +using FunctionUnhex = DecodeFromBinaryRepr; +using FunctionBin = EncodeToBinaryRepr; +using FunctionUnbin = DecodeFromBinaryRepr; class FunctionChar : public IFunction { From 237b834629c286a9c576af09bf2909bc495088d3 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 5 Jul 2021 13:29:36 +0300 Subject: [PATCH 781/931] fix clang-tidy --- src/Functions/toJSONString.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/toJSONString.cpp b/src/Functions/toJSONString.cpp index eea4fb0bb9f..8e5e67219af 100644 --- a/src/Functions/toJSONString.cpp +++ b/src/Functions/toJSONString.cpp @@ -16,7 +16,7 @@ namespace static constexpr auto name = "toJSONString"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - FunctionToJSONString(ContextPtr context) : format_settings(getFormatSettings(context)) {} + explicit FunctionToJSONString(ContextPtr context) : format_settings(getFormatSettings(context)) {} String getName() const override { return name; } @@ -49,7 +49,7 @@ namespace private: /// Affects only subset of part of settings related to json. - FormatSettings format_settings; + const FormatSettings format_settings; }; } From 231740f2d65e3bdb2ebb383d9061a814860514ee Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Jul 2021 14:44:50 +0300 Subject: [PATCH 782/931] Function bin for uint uses writeBinByte, correct for single zero --- src/Common/hex.h | 15 ++++++++ src/Functions/FunctionsCoding.h | 34 ++++++++++--------- .../0_stateless/01926_bin_unbin.reference | 9 +++++ tests/queries/0_stateless/01926_bin_unbin.sql | 10 ++++++ 4 files changed, 52 insertions(+), 16 deletions(-) diff --git a/src/Common/hex.h b/src/Common/hex.h index 82eff776244..69bc6f4f79f 100644 --- a/src/Common/hex.h +++ b/src/Common/hex.h @@ -1,5 +1,6 @@ #pragma once #include +#include /// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly. @@ -46,6 +47,20 @@ inline void writeBinByte(UInt8 byte, void * out) memcpy(out, &bin_byte_to_char_table[static_cast(byte) * 8], 8); } +inline size_t writeBinByteNoLeadZeros(UInt8 byte, char * out) +{ + if (byte == 0) + return 0; + + int clz = std::countl_zero(byte); + for (Int8 offset = sizeof(UInt8) * 8 - clz - 1; offset >= 0; --offset) + { + *out = ((byte >> offset) & 1) ? '1' : '0'; + ++out; + } + return sizeof(UInt8) * 8 - clz; +} + /// Produces hex representation of an unsigned int with leading zeros (for checksums) template inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index e9ec013d6eb..71cce3193ba 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -1344,30 +1344,32 @@ struct BinImpl static void executeOneUInt(T x, char *& out) { bool was_nonzero = false; - T t = 1; - - for (Int8 offset = sizeof(x) * 8 - 1; offset >= 0; --offset) + for (int offset = (sizeof(T) - 1) * 8; offset >= 0; offset -= 8) { - t = t << offset; - if ((x & t) == t) + UInt8 byte = x >> offset; + + /// Skip leading zeros + if (byte == 0 && !was_nonzero) + continue; + + /// First non-zero byte without leading zeros + if (was_nonzero) { - x = x - t; - was_nonzero = true; - *out = '1'; - t = 1; + writeBinByte(byte, out); + out += word_size; } else { - t = 1; - if (!was_nonzero) - { - continue; - } - *out = '0'; + size_t written = writeBinByteNoLeadZeros(byte, out); + out += written; } + was_nonzero = true; + } + if (!was_nonzero) + { + *out = '0'; ++out; } - *out = '\0'; ++out; } diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index 54c01c5d145..595b7389a5d 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -1,4 +1,5 @@ +0 1 1010 1111111 @@ -13,6 +14,14 @@ 0000000000000000000011000011110101011101010100111010101000000001 0011000100110010001100110011001100110010001101000011001000110100 0011000100110010001100110011001100110010001101000011001000110100 +0011000100110010001100110011001100110010001101000011001000110100 +0011000100110010001100110011001100110010001101000011001000110100 + 0 10 测试 +0 +0 +0 +1 +1 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql index 40635091120..fadf236ce9a 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.sql +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -1,3 +1,4 @@ +select bin(''); select bin(0); select bin(1); select bin(10); @@ -12,8 +13,17 @@ select bin(toFloat64(1.2)); select bin(toDecimal32(1.2, 8)); select bin(toDecimal64(1.2, 17)); select bin('12332424'); +select bin(materialize('12332424')); +select bin(toNullable(materialize('12332424'))); select bin(toLowCardinality(materialize('12332424'))); +select unbin(''); select unbin('00110000'); -- 0 select unbin('0011000100110000'); -- 10 select unbin('111001101011010110001011111010001010111110010101'); -- 测试 +select unbin(materialize('00110000')); +select unbin(toNullable(materialize('00110000'))); +select unbin(toLowCardinality(materialize('00110000'))); + +select unbin(bin('')) == ''; +select bin(unbin('')) == ''; From dd06866fa8e7b7788d34def117c893c9a33f9601 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Jul 2021 14:56:39 +0300 Subject: [PATCH 783/931] Fix unbin for corner cases --- src/Functions/FunctionsCoding.h | 14 +++++++++----- .../queries/0_stateless/01926_bin_unbin.reference | 5 +++++ tests/queries/0_stateless/01926_bin_unbin.sql | 7 +++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index 71cce3193ba..33b26afc8dc 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -1420,6 +1420,13 @@ struct UnbinImpl static void decode(const char * pos, const char * end, char *& out) { + if (pos == end) + { + *out = '\0'; + ++out; + return; + } + UInt8 left = 0; /// end - pos is the length of input. @@ -1431,12 +1438,11 @@ struct UnbinImpl { left = left << 1; if (*pos != '0') - { left += 1; - } ++pos; } - if (0 != left) + + if (left != 0 || end - pos == 0) { *out = left; ++out; @@ -1451,9 +1457,7 @@ struct UnbinImpl { c = c << 1; if (*pos != '0') - { c += 1; - } ++pos; } *out = c; diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index 595b7389a5d..ace28af5211 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -17,6 +17,7 @@ 0011000100110010001100110011001100110010001101000011001000110100 0011000100110010001100110011001100110010001101000011001000110100 +1 0 10 测试 @@ -25,3 +26,7 @@ 0 1 1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql index fadf236ce9a..3593448d407 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.sql +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -18,6 +18,7 @@ select bin(toNullable(materialize('12332424'))); select bin(toLowCardinality(materialize('12332424'))); select unbin(''); +select unbin('0') == '\0'; select unbin('00110000'); -- 0 select unbin('0011000100110000'); -- 10 select unbin('111001101011010110001011111010001010111110010101'); -- 测试 @@ -27,3 +28,9 @@ select unbin(toLowCardinality(materialize('00110000'))); select unbin(bin('')) == ''; select bin(unbin('')) == ''; +select bin(unbin('0')) == '00000000'; + +-- hex and bin consistent for corner cases +select hex('') == bin(''); +select unhex('') == unbin(''); +select unhex('0') == unbin('0'); From 9071ecd428929ead37a6e218ecf5f1f7d82bf071 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 5 Jul 2021 15:44:58 +0300 Subject: [PATCH 784/931] fix alter of settings in MergeTree --- src/Storages/MergeTree/MergeTreeData.cpp | 19 ++++++++----------- src/Storages/MergeTree/MergeTreeData.h | 3 +++ src/Storages/StorageMergeTree.cpp | 5 +++++ src/Storages/StorageMergeTree.h | 2 ++ src/Storages/StorageReplicatedMergeTree.cpp | 5 +++++ src/Storages/StorageReplicatedMergeTree.h | 2 ++ 6 files changed, 25 insertions(+), 11 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 40b37f5afc4..ae3d2220936 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1818,11 +1818,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context if (MergeTreeSettings::isPartFormatSetting(setting_name) && !new_value) { /// Use default settings + new and check if doesn't affect part format settings - MergeTreeSettings copy = *getSettings(); - copy.resetToDefault(); - copy.applyChanges(new_changes); + auto copy = getDefaultSettings(); + copy->applyChanges(new_changes); String reason; - if (!canUsePolymorphicParts(copy, &reason) && !reason.empty()) + if (!canUsePolymorphicParts(*copy, &reason) && !reason.empty()) throw Exception("Can't change settings. Reason: " + reason, ErrorCodes::NOT_IMPLEMENTED); } @@ -1984,14 +1983,12 @@ void MergeTreeData::changeSettings( } } - MergeTreeSettings copy = *getSettings(); - /// reset to default settings before applying existing - copy.resetToDefault(); - copy.applyChanges(new_changes); + /// Reset to default settings before applying existing. + auto copy = getDefaultSettings(); + copy->applyChanges(new_changes); + copy->sanityCheck(getContext()->getSettingsRef()); - copy.sanityCheck(getContext()->getSettingsRef()); - - storage_settings.set(std::make_unique(copy)); + storage_settings.set(std::move(copy)); StorageInMemoryMetadata new_metadata = getInMemoryMetadata(); new_metadata.setSettingsChanges(new_settings); setInMemoryMetadata(new_metadata); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index da8c7fcbb65..a6ece4a7a98 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1087,6 +1087,9 @@ private: // Get partition matcher for FREEZE / UNFREEZE queries. MatcherFn getPartitionMatcher(const ASTPtr & partition, ContextPtr context) const; + + /// Returns default settings for storage with possible changes from global config. + virtual std::unique_ptr getDefaultSettings() const = 0; }; /// RAII struct to record big parts that are submerging or emerging. diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 6f8b69ba419..8f387187074 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1577,4 +1577,9 @@ void StorageMergeTree::startBackgroundMovesIfNeeded() background_moves_executor.start(); } +std::unique_ptr StorageMergeTree::getDefaultSettings() const +{ + return std::make_unique(getContext()->getMergeTreeSettings()); +} + } diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 6678ae06b53..a359de07c07 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -235,6 +235,8 @@ private: void startBackgroundMovesIfNeeded() override; + std::unique_ptr getDefaultSettings() const override; + friend class MergeTreeProjectionBlockOutputStream; friend class MergeTreeBlockOutputStream; friend class MergeTreeData; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b51b39f7d68..b6a08afa2eb 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -7174,6 +7174,11 @@ void StorageReplicatedMergeTree::startBackgroundMovesIfNeeded() background_moves_executor.start(); } +std::unique_ptr StorageReplicatedMergeTree::getDefaultSettings() const +{ + return std::make_unique(getContext()->getReplicatedMergeTreeSettings()); +} + void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) const { diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 6f717b7c450..e03255ccd07 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -702,6 +702,8 @@ private: void startBackgroundMovesIfNeeded() override; + std::unique_ptr getDefaultSettings() const override; + std::set getPartitionIdsAffectedByCommands(const MutationCommands & commands, ContextPtr query_context) const; PartitionBlockNumbersHolder allocateBlockNumbersInAffectedPartitions( const MutationCommands & commands, ContextPtr query_context, const zkutil::ZooKeeperPtr & zookeeper) const; From 59c93e85e4ce3a69ef0b4c496e39d25c49c1dc7c Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Mon, 5 Jul 2021 19:26:02 +0300 Subject: [PATCH 785/931] Update README.md --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index a915570122d..496a6357f44 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,3 @@ ClickHouse® is an open-source column-oriented database management system that a * [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. * You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. - -## Upcoming Events -* [China ClickHouse Community Meetup (online)](http://hdxu.cn/rhbfZ) on 26 June 2021. From 754140c538b39f24c1e045d24df10670201707b3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 5 Jul 2021 21:32:00 +0300 Subject: [PATCH 786/931] Update skip list --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 8453094cc65..903c72f044a 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -252,3 +252,4 @@ 01914_exchange_dictionaries 01923_different_expression_name_alias 01932_null_valid_identifier +00918_json_functions From 7ae15fee31645f760e417e770a3a7c674d31649d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 22:08:29 +0300 Subject: [PATCH 787/931] Change performance test after adjusted the name of column --- docker/test/performance-comparison/compare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index c3447c17d35..9a8ffff7cd9 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -1178,11 +1178,11 @@ create view right_async_metric_log as -- Use the right log as time reference because it may have higher precision. create table metrics engine File(TSV, 'metrics/metrics.tsv') as with (select min(event_time) from right_async_metric_log) as min_time - select name metric, r.event_time - min_time event_time, l.value as left, r.value as right + select metric, r.event_time - min_time event_time, l.value as left, r.value as right from right_async_metric_log r asof join file('left-async-metric-log.tsv', TSVWithNamesAndTypes, '$(cat left-async-metric-log.tsv.columns)') l - on l.name = r.name and r.event_time <= l.event_time + on l.metric = r.metric and r.event_time <= l.event_time order by metric, event_time ; From 22ba93789b69fbc2fb649ba5b2542ec780473550 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 22:29:36 +0300 Subject: [PATCH 788/931] Fix warning --- src/Interpreters/AsynchronousMetrics.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index c4a084e2c9b..edf7dd69234 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -481,7 +481,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti AsynchronousMetricValues new_values; auto current_time = std::chrono::system_clock::now(); - auto time_after_previous_update = current_time - previous_update_time; + auto time_after_previous_update [[maybe_unused]] = current_time - previous_update_time; previous_update_time = update_time; /// This is also a good indicator of system responsiveness. From 1fde0e13ccd4d3a38ec889a535071ef955c9f383 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 22:32:33 +0300 Subject: [PATCH 789/931] A check just in case --- src/Interpreters/AsynchronousMetrics.cpp | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index edf7dd69234..7f843852fc6 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -718,16 +718,19 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti /// Also write values normalized to 0..1 by diving to the number of CPUs. /// These values are good to be averaged across the cluster of non-uniform servers. - new_values["OSUserTimeNormalized"] = delta_values_all_cpus.user * multiplier / num_cpus; - new_values["OSNiceTimeNormalized"] = delta_values_all_cpus.nice * multiplier / num_cpus; - new_values["OSSystemTimeNormalized"] = delta_values_all_cpus.system * multiplier / num_cpus; - new_values["OSIdleTimeNormalized"] = delta_values_all_cpus.idle * multiplier / num_cpus; - new_values["OSIOWaitTimeNormalized"] = delta_values_all_cpus.iowait * multiplier / num_cpus; - new_values["OSIrqTimeNormalized"] = delta_values_all_cpus.irq * multiplier / num_cpus; - new_values["OSSoftIrqTimeNormalized"] = delta_values_all_cpus.softirq * multiplier / num_cpus; - new_values["OSStealTimeNormalized"] = delta_values_all_cpus.steal * multiplier / num_cpus; - new_values["OSGuestTimeNormalized"] = delta_values_all_cpus.guest * multiplier / num_cpus; - new_values["OSGuestNiceTimeNormalized"] = delta_values_all_cpus.guest_nice * multiplier / num_cpus; + if (num_cpus) + { + new_values["OSUserTimeNormalized"] = delta_values_all_cpus.user * multiplier / num_cpus; + new_values["OSNiceTimeNormalized"] = delta_values_all_cpus.nice * multiplier / num_cpus; + new_values["OSSystemTimeNormalized"] = delta_values_all_cpus.system * multiplier / num_cpus; + new_values["OSIdleTimeNormalized"] = delta_values_all_cpus.idle * multiplier / num_cpus; + new_values["OSIOWaitTimeNormalized"] = delta_values_all_cpus.iowait * multiplier / num_cpus; + new_values["OSIrqTimeNormalized"] = delta_values_all_cpus.irq * multiplier / num_cpus; + new_values["OSSoftIrqTimeNormalized"] = delta_values_all_cpus.softirq * multiplier / num_cpus; + new_values["OSStealTimeNormalized"] = delta_values_all_cpus.steal * multiplier / num_cpus; + new_values["OSGuestTimeNormalized"] = delta_values_all_cpus.guest * multiplier / num_cpus; + new_values["OSGuestNiceTimeNormalized"] = delta_values_all_cpus.guest_nice * multiplier / num_cpus; + } } proc_stat_values_other = current_other_values; From ac1baaf6d43cac3146f89e0b0a6c7331c8d0d2de Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 22:37:12 +0300 Subject: [PATCH 790/931] Comments --- src/Interpreters/AsynchronousMetrics.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 7f843852fc6..db0fd1f7c43 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -1054,6 +1054,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti for (size_t i = 0, size = edac.size(); i < size; ++i) { /// NOTE maybe we need to take difference with previous values. + /// But these metrics should be exceptionally rare, so it's ok to keep them accumulated. try { From 945b54441d0b20efee12b3093f8514491efb7a44 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 22:41:50 +0300 Subject: [PATCH 791/931] Comments --- src/Interpreters/AsynchronousMetrics.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index f012dda267c..606d117e605 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -28,6 +28,9 @@ using AsynchronousMetricValues = std::unordered_map Date: Mon, 5 Jul 2021 22:58:36 +0300 Subject: [PATCH 792/931] Some partially working code --- src/Storages/MergeTree/DropPartsRanges.cpp | 60 +++++++++++++++++++ src/Storages/MergeTree/DropPartsRanges.h | 33 ++++++++++ .../MergeTree/ReplicatedMergeTreeQueue.cpp | 34 +++++++++++ .../MergeTree/ReplicatedMergeTreeQueue.h | 7 +++ src/Storages/StorageReplicatedMergeTree.cpp | 12 ++++ src/Storages/ya.make | 1 + 6 files changed, 147 insertions(+) create mode 100644 src/Storages/MergeTree/DropPartsRanges.cpp create mode 100644 src/Storages/MergeTree/DropPartsRanges.h diff --git a/src/Storages/MergeTree/DropPartsRanges.cpp b/src/Storages/MergeTree/DropPartsRanges.cpp new file mode 100644 index 00000000000..e9cf07fb51f --- /dev/null +++ b/src/Storages/MergeTree/DropPartsRanges.cpp @@ -0,0 +1,60 @@ +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +bool DropPartsRanges::isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const +{ + if (entry.new_part_name.empty()) + return false; + + MergeTreePartInfo entry_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); + for (const auto & [znode, drop_range] : drop_ranges) + { + if (!drop_range.isDisjoint(entry_info)) + { + postpone_reason = fmt::format("Has DROP RANGE with entry. Will postpone it's execution.", drop_range.getPartName()); + return true; + } + } + + return false; +} + +void DropPartsRanges::addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * /*log*/) +{ + if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to add entry of type {} to drop ranges, expected DROP_RANGE", entry->typeToString()); + + //LOG_DEBUG(log, "ADD DROP RANGE {}", *entry->getDropRange(format_version)); + MergeTreePartInfo entry_info = MergeTreePartInfo::fromPartName(*entry->getDropRange(format_version), format_version); + drop_ranges.emplace(entry->znode_name, entry_info); +} + +void DropPartsRanges::removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * /*log*/) +{ + if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to remove entry of type {} from drop ranges, expected DROP_RANGE", entry->typeToString()); + + //LOG_DEBUG(log, "REMOVE DROP RANGE {}", *entry->getDropRange(format_version)); + drop_ranges.erase(entry->znode_name); +} + +bool DropPartsRanges::hasDropRange(const MergeTreePartInfo & new_drop_range_info) const +{ + for (const auto & [znode_name, drop_range] : drop_ranges) + { + if (drop_range.contains(new_drop_range_info)) + return true; + } + + return false; +} + +} diff --git a/src/Storages/MergeTree/DropPartsRanges.h b/src/Storages/MergeTree/DropPartsRanges.h new file mode 100644 index 00000000000..23f38b70420 --- /dev/null +++ b/src/Storages/MergeTree/DropPartsRanges.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +class DropPartsRanges +{ +private: + MergeTreeDataFormatVersion format_version; + + std::map drop_ranges; +public: + + explicit DropPartsRanges(MergeTreeDataFormatVersion format_version_) + : format_version(format_version_) + {} + + bool isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const; + + bool hasDropRange(const MergeTreePartInfo & new_drop_range_info) const; + + void addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * log); + + void removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * log); + +}; + +} diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index e5e4787da14..8fa69bb2c36 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -26,6 +26,7 @@ ReplicatedMergeTreeQueue::ReplicatedMergeTreeQueue(StorageReplicatedMergeTree & , format_version(storage.format_version) , current_parts(format_version) , virtual_parts(format_version) + , drop_ranges(format_version) { zookeeper_path = storage.zookeeper_path; replica_path = storage.replica_path; @@ -168,6 +169,13 @@ void ReplicatedMergeTreeQueue::insertUnlocked( } else { + drop_ranges.addDropRange(entry, log); + auto drop_range = *entry->getDropRange(format_version); + /// DROP PARTS removes parts from virtual parts + MergeTreePartInfo drop_range_info = MergeTreePartInfo::fromPartName(drop_range, format_version); + if (!drop_range_info.isFakeDropRangePart() && virtual_parts.getContainingPart(drop_range_info) == drop_range) + virtual_parts.removePartAndCoveredParts(drop_range); + queue.push_front(entry); } @@ -261,6 +269,11 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( virtual_parts.remove(*drop_range_part_name); } + if (entry->type == LogEntry::DROP_RANGE) + { + drop_ranges.removeDropRange(entry, log); + } + if (entry->type == LogEntry::ALTER_METADATA) { LOG_TRACE(log, "Finishing metadata alter with version {}", entry->alter_version); @@ -269,6 +282,11 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( } else { + if (entry->type == LogEntry::DROP_RANGE) + { + drop_ranges.removeDropRange(entry, log); + } + for (const String & virtual_part_name : entry->getVirtualPartNames(format_version)) { /// Because execution of the entry is unsuccessful, @@ -1003,6 +1021,16 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( return false; } + if (entry.type != LogEntry::DROP_RANGE && drop_ranges.isAffectedByDropRange(entry, out_postpone_reason)) + { + //LOG_DEBUG(log, "POSTPONE ENTRY {} ({}) PRODUCING PART {} BECAUSE OF DROP RANGE {}", entry.znode_name, entry.typeToString(), entry.new_part_name); + return false; + } + else + { + //LOG_DEBUG(log, "NO DROP RANGE FOUND FOR PART {} OF TYPE {}", entry.new_part_name, entry.typeToString()); + } + /// Check that fetches pool is not overloaded if ((entry.type == LogEntry::GET_PART || entry.type == LogEntry::ATTACH_PART) && !storage.canExecuteFetch(entry, out_postpone_reason)) @@ -2074,6 +2102,12 @@ bool ReplicatedMergeTreeMergePredicate::isMutationFinished(const ReplicatedMerge return true; } +bool ReplicatedMergeTreeMergePredicate::hasDropRange(const MergeTreePartInfo & new_drop_range_info) const +{ + std::lock_guard lock(queue.state_mutex); + return queue.drop_ranges.hasDropRange(new_drop_range_info); +} + ReplicatedMergeTreeQueue::SubscriberHandler ReplicatedMergeTreeQueue::addSubscriber(ReplicatedMergeTreeQueue::SubscriberCallBack && callback) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 820d2794a31..f97ab74bd28 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -100,6 +101,10 @@ private: */ ActiveDataPartSet virtual_parts; + + /// + DropPartsRanges drop_ranges; + /// A set of mutations loaded from ZooKeeper. /// mutations_by_partition is an index partition ID -> block ID -> mutation into this set. /// Note that mutations are updated in such a way that they are always more recent than @@ -475,6 +480,8 @@ public: /// The version of "log" node that is used to check that no new merges have appeared. int32_t getVersion() const { return merges_version; } + bool hasDropRange(const MergeTreePartInfo & new_drop_range_info) const; + private: const ReplicatedMergeTreeQueue & queue; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b51b39f7d68..6945dbb82ae 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2104,6 +2104,10 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) try { String part_name = entry.actual_new_part_name.empty() ? entry.new_part_name : entry.actual_new_part_name; + + if (!entry.actual_new_part_name.empty()) + LOG_DEBUG(log, "Will fetch part {} instead of {}", entry.actual_new_part_name, entry.new_part_name); + if (!fetchPart(part_name, metadata_snapshot, fs::path(zookeeper_path) / "replicas" / replica, false, entry.quorum)) return false; } @@ -6986,6 +6990,14 @@ bool StorageReplicatedMergeTree::dropPartImpl( return false; } + if (merge_pred.hasDropRange(part->info)) + { + if (throw_if_noop) + throw Exception("Already has DROP RANGE for part " + part_name + " in queue.", ErrorCodes::PART_IS_TEMPORARILY_LOCKED); + + return false; + } + /// There isn't a lot we can do otherwise. Can't cancel merges because it is possible that a replica already /// finished the merge. if (partIsAssignedToBackgroundOperation(part)) diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 6e412cddba7..495ec9c4fd6 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -30,6 +30,7 @@ SRCS( MergeTree/BackgroundJobsExecutor.cpp MergeTree/BoolMask.cpp MergeTree/DataPartsExchange.cpp + MergeTree/DropPartsRanges.cpp MergeTree/EphemeralLockInZooKeeper.cpp MergeTree/IMergeTreeDataPart.cpp MergeTree/IMergeTreeDataPartWriter.cpp From 21fb6ddea5877328a8fa471bdf28ff9f65c08e85 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 23:02:24 +0300 Subject: [PATCH 793/931] Remove AIO for sequential reads --- src/Common/ProfileEvents.cpp | 12 +- .../CompressedReadBufferFromFile.cpp | 4 +- .../CompressedReadBufferFromFile.h | 2 +- src/Dictionaries/SSDCacheDictionaryStorage.h | 16 +- src/Disks/DiskCacheWrapper.cpp | 12 +- src/Disks/DiskCacheWrapper.h | 2 +- src/Disks/DiskDecorator.cpp | 4 +- src/Disks/DiskDecorator.h | 2 +- src/Disks/DiskLocal.cpp | 4 +- src/Disks/DiskLocal.h | 2 +- src/Disks/DiskMemory.h | 2 +- src/Disks/DiskRestartProxy.cpp | 4 +- src/Disks/DiskRestartProxy.h | 2 +- src/Disks/HDFS/DiskHDFS.h | 2 +- src/Disks/IDisk.h | 2 +- src/Disks/S3/DiskS3.h | 2 +- src/IO/HashingReadBuffer.h | 2 +- src/IO/ReadBufferAIO.cpp | 312 -------- src/IO/ReadBufferAIO.h | 111 --- src/IO/createReadBufferFromFileBase.cpp | 36 +- src/IO/createReadBufferFromFileBase.h | 6 +- src/IO/examples/CMakeLists.txt | 5 - src/IO/examples/read_buffer_aio.cpp | 670 ------------------ .../tests/gtest_aio_seek_back_after_eof.cpp | 91 --- .../MergeTree/MergeTreeDataMergerMutator.cpp | 2 +- .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- 26 files changed, 72 insertions(+), 1239 deletions(-) delete mode 100644 src/IO/ReadBufferAIO.cpp delete mode 100644 src/IO/ReadBufferAIO.h delete mode 100644 src/IO/examples/read_buffer_aio.cpp delete mode 100644 src/IO/tests/gtest_aio_seek_back_after_eof.cpp diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 86f06f27455..e71111a2a6b 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -22,10 +22,6 @@ M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \ M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \ M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \ - M(ReadBufferAIORead, "") \ - M(ReadBufferAIOReadBytes, "") \ - M(WriteBufferAIOWrite, "") \ - M(WriteBufferAIOWriteBytes, "") \ M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \ M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \ M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \ @@ -34,6 +30,10 @@ M(UncompressedCacheWeightLost, "") \ M(MMappedFileCacheHits, "") \ M(MMappedFileCacheMisses, "") \ + M(AIOWrite, "Number of writes with Linux or FreeBSD AIO interface") \ + M(AIOWriteBytes, "Number of bytes written with Linux or FreeBSD AIO interface") \ + M(AIORead, "Number of reads with Linux or FreeBSD AIO interface") \ + M(AIOReadBytes, "Number of bytes read with Linux or FreeBSD AIO interface") \ M(IOBufferAllocs, "") \ M(IOBufferAllocBytes, "") \ M(ArenaAllocChunks, "") \ @@ -43,8 +43,8 @@ M(MarkCacheHits, "") \ M(MarkCacheMisses, "") \ M(CreatedReadBufferOrdinary, "") \ - M(CreatedReadBufferAIO, "") \ - M(CreatedReadBufferAIOFailed, "") \ + M(CreatedReadBufferDirectIO, "") \ + M(CreatedReadBufferDirectIOFailed, "") \ M(CreatedReadBufferMMap, "") \ M(CreatedReadBufferMMapFailed, "") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ diff --git a/src/Compression/CompressedReadBufferFromFile.cpp b/src/Compression/CompressedReadBufferFromFile.cpp index e14a1784b14..22ffb74f61a 100644 --- a/src/Compression/CompressedReadBufferFromFile.cpp +++ b/src/Compression/CompressedReadBufferFromFile.cpp @@ -47,13 +47,13 @@ CompressedReadBufferFromFile::CompressedReadBufferFromFile(std::unique_ptr(0) - , p_file_in(createReadBufferFromFileBase(path, estimated_size, aio_threshold, mmap_threshold, mmap_cache, buf_size)) + , p_file_in(createReadBufferFromFileBase(path, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache, buf_size)) , file_in(*p_file_in) { compressed_in = &file_in; diff --git a/src/Compression/CompressedReadBufferFromFile.h b/src/Compression/CompressedReadBufferFromFile.h index 2ee7021b35a..fe9add6f015 100644 --- a/src/Compression/CompressedReadBufferFromFile.h +++ b/src/Compression/CompressedReadBufferFromFile.h @@ -33,7 +33,7 @@ public: CompressedReadBufferFromFile(std::unique_ptr buf, bool allow_different_codecs_ = false); CompressedReadBufferFromFile( - const std::string & path, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, + const std::string & path, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, bool allow_different_codecs_ = false); void seek(size_t offset_in_compressed_file, size_t offset_in_decompressed_block); diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h index 7232d2d01b7..395328a904d 100644 --- a/src/Dictionaries/SSDCacheDictionaryStorage.h +++ b/src/Dictionaries/SSDCacheDictionaryStorage.h @@ -26,8 +26,10 @@ namespace ProfileEvents { extern const Event FileOpen; - extern const Event WriteBufferAIOWrite; - extern const Event WriteBufferAIOWriteBytes; + extern const Event AIOWrite; + extern const Event AIOWriteBytes; + extern const Event AIORead; + extern const Event AIOReadBytes; } namespace DB @@ -531,8 +533,8 @@ public: auto bytes_written = eventResult(event); - ProfileEvents::increment(ProfileEvents::WriteBufferAIOWrite); - ProfileEvents::increment(ProfileEvents::WriteBufferAIOWriteBytes, bytes_written); + ProfileEvents::increment(ProfileEvents::AIOWrite); + ProfileEvents::increment(ProfileEvents::AIOWriteBytes, bytes_written); if (bytes_written != static_cast(block_size * buffer_size_in_blocks)) throw Exception(ErrorCodes::AIO_WRITE_ERROR, @@ -600,6 +602,9 @@ public: buffer_size_in_bytes, read_bytes); + ProfileEvents::increment(ProfileEvents::AIORead); + ProfileEvents::increment(ProfileEvents::AIOReadBytes, read_bytes); + SSDCacheBlock block(block_size); for (size_t i = 0; i < blocks_length; ++i) @@ -687,6 +692,9 @@ public: throw Exception(ErrorCodes::AIO_READ_ERROR, "GC: AIO failed to read file ({}). Expected bytes ({}). Actual bytes ({})", file_path, block_size, read_bytes); + ProfileEvents::increment(ProfileEvents::AIORead); + ProfileEvents::increment(ProfileEvents::AIOReadBytes, read_bytes); + char * request_buffer = getRequestBuffer(request); // Unpoison the memory returned from an uninstrumented system function. diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp index d5b82edb134..f672376841e 100644 --- a/src/Disks/DiskCacheWrapper.cpp +++ b/src/Disks/DiskCacheWrapper.cpp @@ -90,17 +90,17 @@ DiskCacheWrapper::readFile( const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const { if (!cache_file_predicate(path)) - return DiskDecorator::readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + return DiskDecorator::readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); LOG_DEBUG(log, "Read file {} from cache", backQuote(path)); if (cache_disk->exists(path)) - return cache_disk->readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + return cache_disk->readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); auto metadata = acquireDownloadMetadata(path); @@ -134,7 +134,7 @@ DiskCacheWrapper::readFile( auto tmp_path = path + ".tmp"; { - auto src_buffer = DiskDecorator::readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + auto src_buffer = DiskDecorator::readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); auto dst_buffer = cache_disk->writeFile(tmp_path, buf_size, WriteMode::Rewrite); copyData(*src_buffer, *dst_buffer); } @@ -158,9 +158,9 @@ DiskCacheWrapper::readFile( } if (metadata->status == DOWNLOADED) - return cache_disk->readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + return cache_disk->readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); - return DiskDecorator::readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + return DiskDecorator::readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); } std::unique_ptr diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index 6d58394640f..7e711dd521c 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -38,7 +38,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index d1ff3f9f827..7237a249bcb 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -115,9 +115,9 @@ void DiskDecorator::listFiles(const String & path, std::vector & file_na std::unique_ptr DiskDecorator::readFile( - const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const + const String & path, size_t buf_size, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const { - return delegate->readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + return delegate->readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); } std::unique_ptr diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index 401078e6b2e..0910f4c28cd 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -39,7 +39,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 89c1514f5c8..4ceb76ab059 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -211,9 +211,9 @@ void DiskLocal::replaceFile(const String & from_path, const String & to_path) std::unique_ptr DiskLocal::readFile( - const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const + const String & path, size_t buf_size, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const { - return createReadBufferFromFileBase(fs::path(disk_path) / path, estimated_size, aio_threshold, mmap_threshold, mmap_cache, buf_size); + return createReadBufferFromFileBase(fs::path(disk_path) / path, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache, buf_size); } std::unique_ptr diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 47482ad8d67..63a6fe59bea 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -74,7 +74,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index d5c57b20a4a..40fd2b2a9f9 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -66,7 +66,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/Disks/DiskRestartProxy.cpp b/src/Disks/DiskRestartProxy.cpp index 2600dc5a1e1..1bd5b2acf50 100644 --- a/src/Disks/DiskRestartProxy.cpp +++ b/src/Disks/DiskRestartProxy.cpp @@ -187,11 +187,11 @@ void DiskRestartProxy::listFiles(const String & path, std::vector & file } std::unique_ptr DiskRestartProxy::readFile( - const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) + const String & path, size_t buf_size, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const { ReadLock lock (mutex); - auto impl = DiskDecorator::readFile(path, buf_size, estimated_size, aio_threshold, mmap_threshold, mmap_cache); + auto impl = DiskDecorator::readFile(path, buf_size, estimated_size, direct_io_threshold, mmap_threshold, mmap_cache); return std::make_unique(*this, std::move(impl)); } diff --git a/src/Disks/DiskRestartProxy.h b/src/Disks/DiskRestartProxy.h index f5502d9d68f..e6c94d9ad7b 100644 --- a/src/Disks/DiskRestartProxy.h +++ b/src/Disks/DiskRestartProxy.h @@ -47,7 +47,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override; diff --git a/src/Disks/HDFS/DiskHDFS.h b/src/Disks/HDFS/DiskHDFS.h index 49fdf44728b..1f93192fd57 100644 --- a/src/Disks/HDFS/DiskHDFS.h +++ b/src/Disks/HDFS/DiskHDFS.h @@ -48,7 +48,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index ecaf7d63fdc..f9e7624f4ab 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -156,7 +156,7 @@ public: const String & path, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, size_t estimated_size = 0, - size_t aio_threshold = 0, + size_t direct_io_threshold = 0, size_t mmap_threshold = 0, MMappedFileCache * mmap_cache = nullptr) const = 0; diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 21bf0d3867b..fc7c832e45d 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -77,7 +77,7 @@ public: const String & path, size_t buf_size, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const override; diff --git a/src/IO/HashingReadBuffer.h b/src/IO/HashingReadBuffer.h index 08b6de69dcb..5d42c64478c 100644 --- a/src/IO/HashingReadBuffer.h +++ b/src/IO/HashingReadBuffer.h @@ -34,7 +34,7 @@ private: working_buffer = in.buffer(); pos = in.position(); - // `pos` may be different from working_buffer.begin() when using AIO. + // `pos` may be different from working_buffer.begin() when using sophisticated ReadBuffers. calculateHash(pos, working_buffer.end() - pos); return res; diff --git a/src/IO/ReadBufferAIO.cpp b/src/IO/ReadBufferAIO.cpp deleted file mode 100644 index c064e0d4ed9..00000000000 --- a/src/IO/ReadBufferAIO.cpp +++ /dev/null @@ -1,312 +0,0 @@ -#if defined(OS_LINUX) || defined(__FreeBSD__) - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - - -namespace ProfileEvents -{ - extern const Event FileOpen; - extern const Event ReadBufferAIORead; - extern const Event ReadBufferAIOReadBytes; -} - -namespace CurrentMetrics -{ - extern const Metric Read; -} - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int FILE_DOESNT_EXIST; - extern const int CANNOT_OPEN_FILE; - extern const int LOGICAL_ERROR; - extern const int ARGUMENT_OUT_OF_BOUND; - extern const int AIO_READ_ERROR; -} - - -/// Note: an additional page is allocated that will contain the data that -/// does not fit into the main buffer. -ReadBufferAIO::ReadBufferAIO(const std::string & filename_, size_t buffer_size_, int flags_, char * existing_memory_) - : ReadBufferFromFileBase(buffer_size_ + DEFAULT_AIO_FILE_BLOCK_SIZE, existing_memory_, DEFAULT_AIO_FILE_BLOCK_SIZE), - fill_buffer(BufferWithOwnMemory(internalBuffer().size(), nullptr, DEFAULT_AIO_FILE_BLOCK_SIZE)), - filename(filename_) -{ - ProfileEvents::increment(ProfileEvents::FileOpen); - - int open_flags = (flags_ == -1) ? O_RDONLY : flags_; - open_flags |= O_DIRECT; - open_flags |= O_CLOEXEC; - - fd = ::open(filename.c_str(), open_flags); - if (fd == -1) - { - auto error_code = (errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE; - throwFromErrnoWithPath("Cannot open file " + filename, filename, error_code); - } -} - -ReadBufferAIO::~ReadBufferAIO() -{ - if (!aio_failed) - { - try - { - (void) waitForAIOCompletion(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - if (fd != -1) - ::close(fd); -} - -void ReadBufferAIO::setMaxBytes(size_t max_bytes_read_) -{ - if (is_started) - throw Exception("Illegal attempt to set the maximum number of bytes to read from file " + filename, ErrorCodes::LOGICAL_ERROR); - max_bytes_read = max_bytes_read_; -} - -bool ReadBufferAIO::nextImpl() -{ - /// If the end of the file has already been reached by calling this function, - /// then the current call is wrong. - if (is_eof) - return false; - - std::optional watch; - if (profile_callback) - watch.emplace(clock_type); - - if (!is_pending_read) - synchronousRead(); - else - receive(); - - if (profile_callback) - { - ProfileInfo info; - info.bytes_requested = requested_byte_count; - info.bytes_read = bytes_read; - info.nanoseconds = watch->elapsed(); //-V1007 - profile_callback(info); - } - - is_started = true; - - /// If the end of the file is just reached, do nothing else. - if (is_eof) - return bytes_read != 0; - - /// Create an asynchronous request. - prepare(); - -#if defined(__FreeBSD__) - request.aio.aio_lio_opcode = LIO_READ; - request.aio.aio_fildes = fd; - request.aio.aio_buf = reinterpret_cast(buffer_begin); - request.aio.aio_nbytes = region_aligned_size; - request.aio.aio_offset = region_aligned_begin; -#else - request.aio_lio_opcode = IOCB_CMD_PREAD; - request.aio_fildes = fd; - request.aio_buf = reinterpret_cast(buffer_begin); - request.aio_nbytes = region_aligned_size; - request.aio_offset = region_aligned_begin; -#endif - - /// Send the request. - try - { - future_bytes_read = AIOContextPool::instance().post(request); - } - catch (...) - { - aio_failed = true; - throw; - } - - is_pending_read = true; - return true; -} - -off_t ReadBufferAIO::seek(off_t off, int whence) -{ - off_t new_pos_in_file; - - if (whence == SEEK_SET) - { - if (off < 0) - throw Exception("SEEK_SET underflow", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - new_pos_in_file = off; - } - else if (whence == SEEK_CUR) - { - if (off >= 0) - { - if (off > (std::numeric_limits::max() - getPosition())) - throw Exception("SEEK_CUR overflow", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - } - else if (off < -getPosition()) - throw Exception("SEEK_CUR underflow", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - new_pos_in_file = getPosition() + off; - } - else - throw Exception("ReadBufferAIO::seek expects SEEK_SET or SEEK_CUR as whence", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - - if (new_pos_in_file != getPosition()) - { - off_t first_read_pos_in_file = first_unread_pos_in_file - static_cast(working_buffer.size()); - if (hasPendingData() && (new_pos_in_file >= first_read_pos_in_file) && (new_pos_in_file <= first_unread_pos_in_file)) - { - /// Moved, but remained within the buffer. - pos = working_buffer.begin() + (new_pos_in_file - first_read_pos_in_file); - } - else - { - /// Moved past the buffer. - pos = working_buffer.end(); - first_unread_pos_in_file = new_pos_in_file; - - /// If we go back, than it's not eof - is_eof = false; - - /// We can not use the result of the current asynchronous request. - skip(); - } - } - - return new_pos_in_file; -} - -void ReadBufferAIO::synchronousRead() -{ - CurrentMetrics::Increment metric_increment_read{CurrentMetrics::Read}; - - prepare(); - bytes_read = ::pread(fd, buffer_begin, region_aligned_size, region_aligned_begin); - - ProfileEvents::increment(ProfileEvents::ReadBufferAIORead); - ProfileEvents::increment(ProfileEvents::ReadBufferAIOReadBytes, bytes_read); - - finalize(); -} - -void ReadBufferAIO::receive() -{ - if (!waitForAIOCompletion()) - { - throw Exception("Trying to receive data from AIO, but nothing was queued. It's a bug", ErrorCodes::LOGICAL_ERROR); - } - finalize(); -} - -void ReadBufferAIO::skip() -{ - if (!waitForAIOCompletion()) - return; - - /// @todo I presume this assignment is redundant since waitForAIOCompletion() performs a similar one -// bytes_read = future_bytes_read.get(); - if ((bytes_read < 0) || (static_cast(bytes_read) < region_left_padding)) - throw Exception("Asynchronous read error on file " + filename, ErrorCodes::AIO_READ_ERROR); -} - -bool ReadBufferAIO::waitForAIOCompletion() -{ - if (is_eof || !is_pending_read) - return false; - - CurrentMetrics::Increment metric_increment_read{CurrentMetrics::Read}; - - bytes_read = future_bytes_read.get(); - is_pending_read = false; - - ProfileEvents::increment(ProfileEvents::ReadBufferAIORead); - ProfileEvents::increment(ProfileEvents::ReadBufferAIOReadBytes, bytes_read); - - return true; -} - -void ReadBufferAIO::prepare() -{ - requested_byte_count = std::min(fill_buffer.internalBuffer().size() - DEFAULT_AIO_FILE_BLOCK_SIZE, max_bytes_read); - - /// Region of the disk from which we want to read data. - const off_t region_begin = first_unread_pos_in_file; - - if ((requested_byte_count > static_cast(std::numeric_limits::max())) || - (first_unread_pos_in_file > (std::numeric_limits::max() - static_cast(requested_byte_count)))) - throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); - - const off_t region_end = first_unread_pos_in_file + requested_byte_count; - - /// The aligned region of the disk from which we will read the data. - region_left_padding = region_begin % DEFAULT_AIO_FILE_BLOCK_SIZE; - const size_t region_right_padding = (DEFAULT_AIO_FILE_BLOCK_SIZE - (region_end % DEFAULT_AIO_FILE_BLOCK_SIZE)) % DEFAULT_AIO_FILE_BLOCK_SIZE; - - region_aligned_begin = region_begin - region_left_padding; - - if (region_end > (std::numeric_limits::max() - static_cast(region_right_padding))) - throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); - - const off_t region_aligned_end = region_end + region_right_padding; - region_aligned_size = region_aligned_end - region_aligned_begin; - - buffer_begin = fill_buffer.internalBuffer().begin(); - - /// Unpoison because msan doesn't instrument linux AIO - __msan_unpoison(buffer_begin, fill_buffer.internalBuffer().size()); -} - -void ReadBufferAIO::finalize() -{ - if ((bytes_read < 0) || (static_cast(bytes_read) < region_left_padding)) - throw Exception("Asynchronous read error on file " + filename, ErrorCodes::AIO_READ_ERROR); - - /// Ignore redundant bytes on the left. - bytes_read -= region_left_padding; - - /// Ignore redundant bytes on the right. - bytes_read = std::min(static_cast(bytes_read), static_cast(requested_byte_count)); - - if (bytes_read > 0) - fill_buffer.buffer().resize(region_left_padding + bytes_read); - if (static_cast(bytes_read) < requested_byte_count) - is_eof = true; - - if (first_unread_pos_in_file > (std::numeric_limits::max() - bytes_read)) - throw Exception("An overflow occurred during file operation", ErrorCodes::LOGICAL_ERROR); - - first_unread_pos_in_file += bytes_read; - total_bytes_read += bytes_read; - nextimpl_working_buffer_offset = region_left_padding; - - if (total_bytes_read == max_bytes_read) - is_eof = true; - - /// Swap the main and duplicate buffers. - swap(fill_buffer); -} - -} - -#endif diff --git a/src/IO/ReadBufferAIO.h b/src/IO/ReadBufferAIO.h deleted file mode 100644 index d476865747d..00000000000 --- a/src/IO/ReadBufferAIO.h +++ /dev/null @@ -1,111 +0,0 @@ -#pragma once - -#if defined(OS_LINUX) || defined(__FreeBSD__) - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace CurrentMetrics -{ - extern const Metric OpenFileForRead; -} - -namespace DB -{ - -/** Class for asynchronous data reading. - */ -class ReadBufferAIO final : public ReadBufferFromFileBase -{ -public: - ReadBufferAIO(const std::string & filename_, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, int flags_ = -1, - char * existing_memory_ = nullptr); - ~ReadBufferAIO() override; - - ReadBufferAIO(const ReadBufferAIO &) = delete; - ReadBufferAIO & operator=(const ReadBufferAIO &) = delete; - - void setMaxBytes(size_t max_bytes_read_); - off_t getPosition() override { return first_unread_pos_in_file - (working_buffer.end() - pos); } - std::string getFileName() const override { return filename; } - int getFD() const { return fd; } - - off_t seek(off_t off, int whence) override; - -private: - /// - bool nextImpl() override; - /// Synchronously read the data. - void synchronousRead(); - /// Get data from an asynchronous request. - void receive(); - /// Ignore data from an asynchronous request. - void skip(); - /// Wait for the end of the current asynchronous task. - bool waitForAIOCompletion(); - /// Prepare the request. - void prepare(); - /// Prepare for reading a duplicate buffer containing data from - /// of the last request. - void finalize(); - -private: - /// Buffer for asynchronous data read operations. - BufferWithOwnMemory fill_buffer; - - /// Description of the asynchronous read request. - iocb request{}; - std::future future_bytes_read; - - const std::string filename; - - /// The maximum number of bytes that can be read. - size_t max_bytes_read = std::numeric_limits::max(); - /// Number of bytes requested. - size_t requested_byte_count = 0; - /// The number of bytes read at the last request. - ssize_t bytes_read = 0; - /// The total number of bytes read. - size_t total_bytes_read = 0; - - /// The position of the first unread byte in the file. - off_t first_unread_pos_in_file = 0; - - /// The starting position of the aligned region of the disk from which the data is read. - off_t region_aligned_begin = 0; - /// Left offset to align the region of the disk. - size_t region_left_padding = 0; - /// The size of the aligned region of the disk. - size_t region_aligned_size = 0; - - /// The file descriptor for read. - int fd = -1; - - /// The buffer to which the received data is written. - Position buffer_begin = nullptr; - - /// The asynchronous read operation is not yet completed. - bool is_pending_read = false; - /// The end of the file is reached. - bool is_eof = false; - /// At least one read request was sent. - bool is_started = false; - /// Did the asynchronous operation fail? - bool aio_failed = false; - - CurrentMetrics::Increment metric_increment{CurrentMetrics::OpenFileForRead}; -}; - -} - -#endif diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 230f049b2cb..00b7fcfd44b 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -1,8 +1,5 @@ #include #include -#if defined(OS_LINUX) || defined(__FreeBSD__) -#include -#endif #include #include @@ -10,8 +7,8 @@ namespace ProfileEvents { extern const Event CreatedReadBufferOrdinary; - extern const Event CreatedReadBufferAIO; - extern const Event CreatedReadBufferAIOFailed; + extern const Event CreatedReadBufferDirectIO; + extern const Event CreatedReadBufferDirectIOFailed; extern const Event CreatedReadBufferMMap; extern const Event CreatedReadBufferMMapFailed; } @@ -21,27 +18,44 @@ namespace DB std::unique_ptr createReadBufferFromFileBase( const std::string & filename_, - size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, + size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment) { #if defined(OS_LINUX) || defined(__FreeBSD__) - if (aio_threshold && estimated_size >= aio_threshold) + if (direct_io_threshold && estimated_size >= direct_io_threshold) { + /** O_DIRECT + * The O_DIRECT flag may impose alignment restrictions on the length and address of user-space buffers and the file offset of I/Os. + * In Linux alignment restrictions vary by filesystem and kernel version and might be absent entirely. + * However there is currently no filesystem-independent interface for an application to discover these restrictions + * for a given file or filesystem. Some filesystems provide their own interfaces for doing so, for example the + * XFS_IOC_DIOINFO operation in xfsctl(3). + * + * Under Linux 2.4, transfer sizes, and the alignment of the user buffer and the file offset must all be + * multiples of the logical block size of the filesystem. Since Linux 2.6.0, alignment to the logical block size + * of the underlying storage (typically 512 bytes) suffices. + * + * - man 2 open + */ + constexpr size_t min_alignment = DEFAULT_AIO_FILE_BLOCK_SIZE; + if (alignment % min_alignment) + alignment = (alignment + min_alignment - 1) / min_alignment * min_alignment; + /// Attempt to open a file with O_DIRECT try { - auto res = std::make_unique(filename_, buffer_size_, flags_, existing_memory_); - ProfileEvents::increment(ProfileEvents::CreatedReadBufferAIO); + auto res = std::make_unique(filename_, buffer_size_, flags_ | O_DIRECT, existing_memory_, alignment); + ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIO); return res; } catch (const ErrnoException &) { /// Fallback to cached IO if O_DIRECT is not supported. - ProfileEvents::increment(ProfileEvents::CreatedReadBufferAIOFailed); + ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIOFailed); } } #else - (void)aio_threshold; + (void)direct_io_threshold; (void)estimated_size; #endif diff --git a/src/IO/createReadBufferFromFileBase.h b/src/IO/createReadBufferFromFileBase.h index 46d5b39ea44..8bc1b5bb519 100644 --- a/src/IO/createReadBufferFromFileBase.h +++ b/src/IO/createReadBufferFromFileBase.h @@ -13,15 +13,15 @@ class MMappedFileCache; /** Create an object to read data from a file. * estimated_size - the number of bytes to read - * aio_threshold - the minimum number of bytes for asynchronous reads + * direct_io_threshold - the minimum number of bytes for asynchronous reads * - * If aio_threshold = 0 or estimated_size < aio_threshold, read operations are executed synchronously. + * If direct_io_threshold = 0 or estimated_size < direct_io_threshold, read operations are executed synchronously. * Otherwise, the read operations are performed asynchronously. */ std::unique_ptr createReadBufferFromFileBase( const std::string & filename_, size_t estimated_size, - size_t aio_threshold, + size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, diff --git a/src/IO/examples/CMakeLists.txt b/src/IO/examples/CMakeLists.txt index bcd0a8bba24..d5907bf67ad 100644 --- a/src/IO/examples/CMakeLists.txt +++ b/src/IO/examples/CMakeLists.txt @@ -49,11 +49,6 @@ target_link_libraries (io_operators PRIVATE clickhouse_common_io) add_executable (write_int write_int.cpp) target_link_libraries (write_int PRIVATE clickhouse_common_io) -if (OS_LINUX OR OS_FREEBSD) - add_executable(read_buffer_aio read_buffer_aio.cpp) - target_link_libraries (read_buffer_aio PRIVATE clickhouse_common_io) -endif () - add_executable (zlib_buffers zlib_buffers.cpp) target_link_libraries (zlib_buffers PRIVATE clickhouse_common_io) diff --git a/src/IO/examples/read_buffer_aio.cpp b/src/IO/examples/read_buffer_aio.cpp deleted file mode 100644 index 01ac9808cbb..00000000000 --- a/src/IO/examples/read_buffer_aio.cpp +++ /dev/null @@ -1,670 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace -{ - -void run(); -void prepare(std::string & filename, std::string & buf); -void prepare2(std::string & filename, std::string & buf); -void prepare3(std::string & filename, std::string & buf); -void prepare4(std::string & filename, std::string & buf); -std::string createTmpFile(); -[[noreturn]] void die(const std::string & msg); -void runTest(unsigned int num, const std::function & func); - -bool test1(const std::string & filename); -bool test2(const std::string & filename, const std::string & buf); -bool test3(const std::string & filename, const std::string & buf); -bool test4(const std::string & filename, const std::string & buf); -bool test5(const std::string & filename, const std::string & buf); -bool test6(const std::string & filename, const std::string & buf); -bool test7(const std::string & filename, const std::string & buf); -bool test8(const std::string & filename, const std::string & buf); -bool test9(const std::string & filename, const std::string & buf); -bool test10(const std::string & filename, const std::string & buf); -bool test11(const std::string & filename); -bool test12(const std::string & filename, const std::string & buf); -bool test13(const std::string & filename, const std::string & buf); -bool test14(const std::string & filename, const std::string & buf); -bool test15(const std::string & filename, const std::string & buf); -bool test16(const std::string & filename, const std::string & buf); -bool test17(const std::string & filename, const std::string & buf); -bool test18(const std::string & filename, const std::string & buf); -bool test19(const std::string & filename, const std::string & buf); -bool test20(const std::string & filename, const std::string & buf); - -void run() -{ - namespace fs = std::filesystem; - - std::string filename; - std::string buf; - prepare(filename, buf); - - std::string filename2; - std::string buf2; - prepare(filename2, buf2); - - std::string filename3; - std::string buf3; - prepare2(filename3, buf3); - - std::string filename4; - std::string buf4; - prepare3(filename4, buf4); - - std::string filename5; - std::string buf5; - prepare4(filename5, buf5); - - const std::vector> tests = - { - [&]{ return test1(filename); }, - [&]{ return test2(filename, buf); }, - [&]{ return test3(filename, buf); }, - [&]{ return test4(filename, buf); }, - [&]{ return test5(filename, buf); }, - [&]{ return test6(filename, buf); }, - [&]{ return test7(filename, buf); }, - [&]{ return test8(filename, buf); }, - [&]{ return test9(filename, buf); }, - [&]{ return test10(filename, buf); }, - [&]{ return test11(filename); }, - [&]{ return test12(filename, buf); }, - [&]{ return test13(filename2, buf2); }, - [&]{ return test14(filename, buf); }, - [&]{ return test15(filename3, buf3); }, - [&]{ return test16(filename3, buf3); }, - [&]{ return test17(filename4, buf4); }, - [&]{ return test18(filename5, buf5); }, - [&]{ return test19(filename, buf); }, - [&]{ return test20(filename, buf); } - }; - - unsigned int num = 0; - for (const auto & test : tests) - { - ++num; - runTest(num, test); - } - - fs::remove_all(fs::path(filename).parent_path().string()); - fs::remove_all(fs::path(filename2).parent_path().string()); - fs::remove_all(fs::path(filename3).parent_path().string()); - fs::remove_all(fs::path(filename4).parent_path().string()); - fs::remove_all(fs::path(filename5).parent_path().string()); -} - -void prepare(std::string & filename, std::string & buf) -{ - static const std::string symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; - - filename = createTmpFile(); - - size_t n = 10 * DEFAULT_AIO_FILE_BLOCK_SIZE; - buf.reserve(n); - - for (size_t i = 0; i < n; ++i) - buf += symbols[i % symbols.length()]; - - std::ofstream out(filename.c_str()); - if (!out.is_open()) - die("Could not open file"); - - out << buf; -} - -void prepare2(std::string & filename, std::string & buf) -{ - filename = createTmpFile(); - - buf = "122333444455555666666777777788888888999999999"; - - std::ofstream out(filename.c_str()); - if (!out.is_open()) - die("Could not open file"); - - out << buf; -} - -void prepare3(std::string & filename, std::string & buf) -{ - filename = createTmpFile(); - - buf = "122333444455555666666777777788888888999999999"; - - std::ofstream out(filename.c_str()); - if (!out.is_open()) - die("Could not open file"); - - out.seekp(7, std::ios_base::beg); - out << buf; -} - -void prepare4(std::string & filename, std::string & buf) -{ - static const std::string symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; - - filename = createTmpFile(); - - std::ofstream out(filename.c_str()); - if (!out.is_open()) - die("Could not open file"); - - for (size_t i = 0; i < 1340; ++i) - buf += symbols[i % symbols.length()]; - - out.seekp(2984, std::ios_base::beg); - out << buf; -} - -std::string createTmpFile() -{ - char pattern[] = "/tmp/fileXXXXXX"; - char * dir = ::mkdtemp(pattern); - if (dir == nullptr) - die("Could not create directory"); - - return std::string(dir) + "/foo"; -} - -void die(const std::string & msg) -{ - std::cout << msg << "\n"; - ::exit(EXIT_FAILURE); -} - -void runTest(unsigned int num, const std::function & func) -{ - bool ok; - - try - { - ok = func(); - } - catch (const DB::Exception & ex) - { - ok = false; - std::cout << "Caught exception " << ex.displayText() << "\n"; - } - catch (const std::exception & ex) - { - ok = false; - std::cout << "Caught exception " << ex.what() << "\n"; - } - - if (ok) - std::cout << "Test " << num << " passed\n"; - else - std::cout << "Test " << num << " failed\n"; -} - -bool test1(const std::string & filename) -{ - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - if (in.getFileName() != filename) - return false; - if (in.getFD() == -1) - return false; - return true; -} - -bool test2(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(buf.length()); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - - return (newbuf == buf); -} - -bool test3(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(buf.length()); - - size_t requested = 9 * DEFAULT_AIO_FILE_BLOCK_SIZE; - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - in.setMaxBytes(requested); - size_t count = in.read(newbuf.data(), newbuf.length()); - - newbuf.resize(count); - return (newbuf == buf.substr(0, requested)); -} - -bool test4(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(buf.length()); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - in.setMaxBytes(0); - size_t n_read = in.read(newbuf.data(), newbuf.length()); - - return n_read == 0; -} - -bool test5(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(1 + (DEFAULT_AIO_FILE_BLOCK_SIZE >> 1)); - - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - in.setMaxBytes(1 + (DEFAULT_AIO_FILE_BLOCK_SIZE >> 1)); - - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - - if (newbuf != buf.substr(0, newbuf.length())) - return false; - - return true; -} - -bool test6(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(buf.length()); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - if (in.getPosition() != 0) - return false; - - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - - if (static_cast(in.getPosition()) != buf.length()) - return false; - - return true; -} - -bool test7(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(buf.length() - DEFAULT_AIO_FILE_BLOCK_SIZE); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - (void) in.seek(DEFAULT_AIO_FILE_BLOCK_SIZE, SEEK_SET); - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != (9 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - - return (newbuf == buf.substr(DEFAULT_AIO_FILE_BLOCK_SIZE)); -} - -bool test8(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(DEFAULT_AIO_FILE_BLOCK_SIZE - 1); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - (void) in.seek(DEFAULT_AIO_FILE_BLOCK_SIZE + 1, SEEK_CUR); - size_t count = in.read(newbuf.data(), newbuf.length()); - - if (count != newbuf.length()) - return false; - - if (newbuf != buf.substr(DEFAULT_AIO_FILE_BLOCK_SIZE + 1, newbuf.length())) - return false; - - return true; -} - -bool test9(const std::string & filename, const std::string & buf) -{ - bool ok = false; - - try - { - std::string newbuf; - newbuf.resize(buf.length()); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - in.setMaxBytes(9 * DEFAULT_AIO_FILE_BLOCK_SIZE); - } - catch (const DB::Exception &) - { - ok = true; - } - - return ok; -} - -bool test10(const std::string & filename, const std::string & buf) -{ - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - { - std::string newbuf; - newbuf.resize(4 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count1 = in.read(newbuf.data(), newbuf.length()); - if (count1 != newbuf.length()) - return false; - - if (newbuf != buf.substr(0, 4 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - (void) in.seek(2 * DEFAULT_AIO_FILE_BLOCK_SIZE, SEEK_CUR); - - { - std::string newbuf; - newbuf.resize(4 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count2 = in.read(newbuf.data(), newbuf.length()); - if (count2 != newbuf.length()) - return false; - - if (newbuf != buf.substr(6 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - return true; -} - -bool test11(const std::string & filename) -{ - bool ok = false; - - try - { - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - (void) in.seek(-DEFAULT_AIO_FILE_BLOCK_SIZE, SEEK_SET); - } - catch (const DB::Exception &) - { - ok = true; - } - - return ok; -} - -bool test12(const std::string & filename, const std::string &) -{ - bool ok = false; - - try - { - std::string newbuf; - newbuf.resize(4 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - - (void) in.seek(-(10 * DEFAULT_AIO_FILE_BLOCK_SIZE), SEEK_CUR); - } - catch (const DB::Exception &) - { - ok = true; - } - - return ok; -} - -bool test13(const std::string & filename, const std::string &) -{ - std::string newbuf; - newbuf.resize(2 * DEFAULT_AIO_FILE_BLOCK_SIZE - 3); - - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count1 = in.read(newbuf.data(), newbuf.length()); - return count1 == newbuf.length(); -} - -bool test14(const std::string & filename, const std::string & buf) -{ - std::string newbuf; - newbuf.resize(1 + (DEFAULT_AIO_FILE_BLOCK_SIZE >> 1)); - - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - (void) in.seek(2, SEEK_SET); - in.setMaxBytes(3 + (DEFAULT_AIO_FILE_BLOCK_SIZE >> 1)); - - size_t count = in.read(newbuf.data(), newbuf.length()); - if (count != newbuf.length()) - return false; - - if (newbuf != buf.substr(2, newbuf.length())) - return false; - - return true; -} - -bool test15(const std::string & filename, const std::string &) -{ - std::string newbuf; - newbuf.resize(1000); - - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count = in.read(newbuf.data(), 1); - if (count != 1) - return false; - if (newbuf[0] != '1') - return false; - return true; -} - -bool test16(const std::string & filename, const std::string &) -{ - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count; - - { - std::string newbuf; - newbuf.resize(1); - count = in.read(newbuf.data(), 1); - if (count != 1) - return false; - if (newbuf[0] != '1') - return false; - } - - in.seek(2, SEEK_CUR); - - { - std::string newbuf; - newbuf.resize(3); - count = in.read(newbuf.data(), 3); - if (count != 3) - return false; - if (newbuf != "333") - return false; - } - - in.seek(4, SEEK_CUR); - - { - std::string newbuf; - newbuf.resize(5); - count = in.read(newbuf.data(), 5); - if (count != 5) - return false; - if (newbuf != "55555") - return false; - } - - in.seek(6, SEEK_CUR); - - { - std::string newbuf; - newbuf.resize(7); - count = in.read(newbuf.data(), 7); - if (count != 7) - return false; - if (newbuf != "7777777") - return false; - } - - in.seek(8, SEEK_CUR); - - { - std::string newbuf; - newbuf.resize(9); - count = in.read(newbuf.data(), 9); - if (count != 9) - return false; - if (newbuf != "999999999") - return false; - } - - return true; -} - -bool test17(const std::string & filename, const std::string & buf) -{ - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - size_t count; - - { - std::string newbuf; - newbuf.resize(10); - count = in.read(newbuf.data(), 10); - - if (count != 10) - return false; - if (newbuf.substr(0, 7) != std::string(7, '\0')) - return false; - if (newbuf.substr(7) != "122") - return false; - } - - in.seek(7 + buf.length() - 2, SEEK_SET); - - { - std::string newbuf; - newbuf.resize(160); - count = in.read(newbuf.data(), 160); - - if (count != 2) - return false; - if (newbuf.substr(0, 2) != "99") - return false; - } - - in.seek(7 + buf.length() + DEFAULT_AIO_FILE_BLOCK_SIZE, SEEK_SET); - - { - std::string newbuf; - newbuf.resize(50); - count = in.read(newbuf.data(), 50); - if (count != 0) - return false; - } - - return true; -} - -bool test18(const std::string & filename, const std::string & buf) -{ - DB::ReadBufferAIO in(filename, DEFAULT_AIO_FILE_BLOCK_SIZE); - - std::string newbuf; - newbuf.resize(1340); - - in.seek(2984, SEEK_SET); - size_t count = in.read(newbuf.data(), 1340); - - if (count != 1340) - return false; - if (newbuf != buf) - return false; - - return true; -} - -bool test19(const std::string & filename, const std::string & buf) -{ - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - { - std::string newbuf; - newbuf.resize(5 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count1 = in.read(newbuf.data(), newbuf.length()); - if (count1 != newbuf.length()) - return false; - - if (newbuf != buf.substr(0, 5 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - { - std::string newbuf; - newbuf.resize(5 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count2 = in.read(newbuf.data(), newbuf.length()); - if (count2 != newbuf.length()) - return false; - - if (newbuf != buf.substr(5 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - return true; -} - -bool test20(const std::string & filename, const std::string & buf) -{ - DB::ReadBufferAIO in(filename, 3 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - { - std::string newbuf; - newbuf.resize(5 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count1 = in.read(newbuf.data(), newbuf.length()); - if (count1 != newbuf.length()) - return false; - - if (newbuf != buf.substr(0, 5 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - (void) in.getPosition(); - - { - std::string newbuf; - newbuf.resize(5 * DEFAULT_AIO_FILE_BLOCK_SIZE); - - size_t count2 = in.read(newbuf.data(), newbuf.length()); - if (count2 != newbuf.length()) - return false; - - if (newbuf != buf.substr(5 * DEFAULT_AIO_FILE_BLOCK_SIZE)) - return false; - } - - return true; -} - -} - -int main() -{ - run(); - return 0; -} diff --git a/src/IO/tests/gtest_aio_seek_back_after_eof.cpp b/src/IO/tests/gtest_aio_seek_back_after_eof.cpp deleted file mode 100644 index 784f5479657..00000000000 --- a/src/IO/tests/gtest_aio_seek_back_after_eof.cpp +++ /dev/null @@ -1,91 +0,0 @@ -#if defined(__linux__) || defined(__FreeBSD__) - -#include - -#include -#include -#include -#include -#include -#include -#include - - -namespace -{ -std::string createTmpFileForEOFtest() -{ - char pattern[] = "./EOFtestFolderXXXXXX"; - if (char * dir = ::mkdtemp(pattern); dir) - { - return std::string(dir) + "/foo"; - } - else - { - /// We have no tmp in docker - /// So we have to use root - std::string almost_rand_dir = std::string{"/"} + std::to_string(randomSeed()) + "foo"; - return almost_rand_dir; - } - -} - -void prepareForEOF(std::string & filename, std::string & buf) -{ - static const std::string symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; - - filename = createTmpFileForEOFtest(); - - size_t n = 10 * DEFAULT_AIO_FILE_BLOCK_SIZE; - buf.reserve(n); - - for (size_t i = 0; i < n; ++i) - buf += symbols[i % symbols.length()]; - - std::ofstream out(filename); - out << buf; -} - - -} -TEST(ReadBufferAIOTest, TestReadAfterAIO) -{ - using namespace DB; - std::string data; - std::string file_path; - prepareForEOF(file_path, data); - ReadBufferAIO testbuf(file_path); - - std::string newdata; - newdata.resize(data.length()); - - size_t total_read = testbuf.read(newdata.data(), newdata.length()); - EXPECT_EQ(total_read, data.length()); - EXPECT_TRUE(testbuf.eof()); - - - testbuf.seek(data.length() - 100, SEEK_SET); - - std::string smalldata; - smalldata.resize(100); - size_t read_after_eof = testbuf.read(smalldata.data(), smalldata.size()); - EXPECT_EQ(read_after_eof, 100); - EXPECT_TRUE(testbuf.eof()); - - - testbuf.seek(0, SEEK_SET); - std::string repeatdata; - repeatdata.resize(data.length()); - size_t read_after_eof_big = testbuf.read(repeatdata.data(), repeatdata.size()); - EXPECT_EQ(read_after_eof_big, data.length()); - EXPECT_TRUE(testbuf.eof()); - - if (file_path[0] != '/') - { - const size_t last_slash = file_path.rfind('/'); - const std::string temp_dir = file_path.substr(0, last_slash); - std::filesystem::remove_all(temp_dir); - } -} - -#endif diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index b6ccfc05dc2..c2561fbd23f 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -825,7 +825,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor UInt64 watch_prev_elapsed = 0; /// We count total amount of bytes in parts - /// and use direct_io + aio if there is more than min_merge_bytes_to_use_direct_io + /// and use direct_io if there is more than min_merge_bytes_to_use_direct_io bool read_with_direct_io = false; if (data_settings->min_merge_bytes_to_use_direct_io != 0) { diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index e82b1966461..2a3c7ed00a1 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -54,7 +54,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( MergeTreeReaderSettings reader_settings = { - /// bytes to use AIO (this is hack) + /// bytes to use direct IO (this is hack) .min_bytes_to_use_direct_io = read_with_direct_io ? 1UL : std::numeric_limits::max(), .max_read_buffer_size = DBMS_DEFAULT_BUFFER_SIZE, .save_marks_in_cache = false From f29eb11e6c5c59115a21a45c90de8f85eb3e7045 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 23:17:53 +0300 Subject: [PATCH 794/931] Fix error --- src/IO/ReadBufferFromFile.cpp | 2 ++ src/IO/createReadBufferFromFileBase.cpp | 13 +++++++------ src/IO/createReadBufferFromFileBase.h | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp index d0f94441622..cc13eed0cb4 100644 --- a/src/IO/ReadBufferFromFile.cpp +++ b/src/IO/ReadBufferFromFile.cpp @@ -38,6 +38,8 @@ ReadBufferFromFile::ReadBufferFromFile( if (o_direct) flags = flags & ~O_DIRECT; #endif + std::cerr << flags << ", " << (flags & O_DIRECT) << "\n"; + fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_CLOEXEC); if (-1 == fd) diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 00b7fcfd44b..94872209dc0 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -17,9 +17,9 @@ namespace DB { std::unique_ptr createReadBufferFromFileBase( - const std::string & filename_, + const std::string & filename, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, - size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment) + size_t buffer_size, int flags, char * existing_memory, size_t alignment) { #if defined(OS_LINUX) || defined(__FreeBSD__) if (direct_io_threshold && estimated_size >= direct_io_threshold) @@ -44,7 +44,8 @@ std::unique_ptr createReadBufferFromFileBase( /// Attempt to open a file with O_DIRECT try { - auto res = std::make_unique(filename_, buffer_size_, flags_ | O_DIRECT, existing_memory_, alignment); + auto res = std::make_unique( + filename, buffer_size, flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_DIRECT, existing_memory, alignment); ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIO); return res; } @@ -59,11 +60,11 @@ std::unique_ptr createReadBufferFromFileBase( (void)estimated_size; #endif - if (!existing_memory_ && mmap_threshold && mmap_cache && estimated_size >= mmap_threshold) + if (!existing_memory && mmap_threshold && mmap_cache && estimated_size >= mmap_threshold) { try { - auto res = std::make_unique(*mmap_cache, filename_, 0); + auto res = std::make_unique(*mmap_cache, filename, 0); ProfileEvents::increment(ProfileEvents::CreatedReadBufferMMap); return res; } @@ -75,7 +76,7 @@ std::unique_ptr createReadBufferFromFileBase( } ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary); - return std::make_unique(filename_, buffer_size_, flags_, existing_memory_, alignment); + return std::make_unique(filename, buffer_size, flags, existing_memory, alignment); } } diff --git a/src/IO/createReadBufferFromFileBase.h b/src/IO/createReadBufferFromFileBase.h index 8bc1b5bb519..dc2912ea752 100644 --- a/src/IO/createReadBufferFromFileBase.h +++ b/src/IO/createReadBufferFromFileBase.h @@ -19,14 +19,14 @@ class MMappedFileCache; * Otherwise, the read operations are performed asynchronously. */ std::unique_ptr createReadBufferFromFileBase( - const std::string & filename_, + const std::string & filename, size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, - size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, + size_t buffer_size = DBMS_DEFAULT_BUFFER_SIZE, int flags_ = -1, - char * existing_memory_ = nullptr, + char * existing_memory = nullptr, size_t alignment = 0); } From 8fae774e858dbd7648b363c115956b0dbc8ddad1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 23:18:13 +0300 Subject: [PATCH 795/931] Fix error --- src/IO/ReadBufferFromFile.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp index cc13eed0cb4..d0f94441622 100644 --- a/src/IO/ReadBufferFromFile.cpp +++ b/src/IO/ReadBufferFromFile.cpp @@ -38,8 +38,6 @@ ReadBufferFromFile::ReadBufferFromFile( if (o_direct) flags = flags & ~O_DIRECT; #endif - std::cerr << flags << ", " << (flags & O_DIRECT) << "\n"; - fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_CLOEXEC); if (-1 == fd) From 6d3d724cd477c9743b84f99bfebe1831117c5d3c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Jul 2021 23:20:25 +0300 Subject: [PATCH 796/931] Fix error --- src/IO/createReadBufferFromFileBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 94872209dc0..4d903366e99 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -45,7 +45,7 @@ std::unique_ptr createReadBufferFromFileBase( try { auto res = std::make_unique( - filename, buffer_size, flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_DIRECT, existing_memory, alignment); + filename, buffer_size, (flags == -1 ? O_RDONLY | O_CLOEXEC : flags) | O_DIRECT, existing_memory, alignment); ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIO); return res; } From 87f59ba67096dc9ee4a4805434f59f8e25414454 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 5 Jul 2021 20:42:37 +0300 Subject: [PATCH 797/931] Fix parallel execution of integration tests. --- tests/integration/helpers/cluster.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 1db9e07a69e..07d503dfe1a 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -394,11 +394,13 @@ class ClickHouseCluster: def cleanup(self): # Just in case kill unstopped containers from previous launch try: - result = run_and_check(f'docker container list --all --filter name={self.project_name} | wc -l', shell=True) + # We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see + # https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name + result = run_and_check(f'docker container list --all --filter name=^/{self.project_name}$ | wc -l', shell=True) if int(result) > 1: - logging.debug(f"Trying to kill unstopped containers for project{self.project_name}...") - run_and_check(f'docker kill $(docker container list --all --quiet --filter name={self.project_name})', shell=True) - run_and_check(f'docker rm $(docker container list --all --quiet --filter name={self.project_name})', shell=True) + logging.debug(f"Trying to kill unstopped containers for project {self.project_name}...") + run_and_check(f'docker kill $(docker container list --all --quiet --filter name=^/{self.project_name}$)', shell=True) + run_and_check(f'docker rm $(docker container list --all --quiet --filter name=^/{self.project_name}$)', shell=True) logging.debug("Unstopped containers killed") run_and_check(['docker-compose', 'ps', '--services', '--all']) else: From 0241253a8e93bb5a4cd5041e11011d9a72e96e74 Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Tue, 6 Jul 2021 01:05:12 +0400 Subject: [PATCH 798/931] Guidelines for adding new third-party libraries --- docs/en/development/contrib.md | 12 ++++++++++++ docs/en/development/developer-instruction.md | 2 ++ docs/en/development/style.md | 2 +- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index 64ca2387029..d370a9ea78a 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -89,3 +89,15 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li | xz | Public Domain | /contrib/xz/COPYING | | zlib-ng | zLib | /contrib/zlib-ng/LICENSE.md | | zstd | BSD | /contrib/zstd/LICENSE | + +## Guidelines for adding new third-party libraries and maintaining custom changes in them {#adding-third-party-libraries} + +1. All external third-party code should reside in the dedicated directories under `contrib` directory of ClickHouse repo. Prefer Git submodules, when available. +2. Fork/mirror the official repo in [Clickhouse-extras](https://github.com/ClickHouse-Extras). Prefer official GitHub repos, when available. +3. Branch from the branch you want to integrate, e.g., `master` -> `clickhouse/master`, or `release/vX.Y.Z` -> `clickhouse/release/vX.Y.Z`. +4. All forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras) should be automatically synchronized with upstreams. `clickhouse/...` branches will remain unaffected, since virtually nobody is going to use that naming pattern in their upstream repos. +5. Add submodules under `contrib` of ClickHouse repo that refer the above forks/mirrors. Set the submodules to track the corresponding `clickhouse/...` branches. +6. Every time the custom changes have to be made in the library code, a dedicated branch should be created, like `clickhouse/my-fix`. Then this branch should be merged into the branch, that is tracked by the submodule, e.g., `clickhouse/master` or `clickhouse/release/vX.Y.Z`. +7. No code should be pushed in any branch of the forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras), whose names do not follow `clickhouse/...` pattern. +8. Always write the custom changes with the official repo in mind. Once the PR is merged from (a feature/fix branch in) your personal fork into the fork in [Clickhouse-extras](https://github.com/ClickHouse-Extras), and the submodule is bumped in ClickHouse repo, consider opening another PR from (a feature/fix branch in) the fork in [Clickhouse-extras](https://github.com/ClickHouse-Extras) to the official repo of the library. This will make sure, that 1) the contribution has more than a single use case and importance, 2) others will also benefit from it, 3) the change will not remain a maintenance burden solely on ClickHouse developers. +9. When a submodule needs to start using a newer code from the original branch (e.g., `master`), and since the custom changes might be merged in the branch it is tracking (e.g., `clickhouse/master`) and so it may diverge from its original counterpart (i.e., `master`), a careful merge should be carried out first, i.e., `master` -> `clickhouse/master`, and only then the submodule can be bumped in ClickHouse. diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index ac6d4a2b563..90f406f3ba8 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -237,6 +237,8 @@ The description of ClickHouse architecture can be found here: https://clickhouse The Code Style Guide: https://clickhouse.tech/docs/en/development/style/ +Adding third-party libraries: https://clickhouse.tech/docs/en/development/contrib/#adding-third-party-libraries + Writing tests: https://clickhouse.tech/docs/en/development/tests/ List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22 diff --git a/docs/en/development/style.md b/docs/en/development/style.md index 2151735c2f4..bee567be468 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -757,7 +757,7 @@ If there is a good solution already available, then use it, even if it means you **3.** You can install a library that isn’t in the packages, if the packages do not have what you need or have an outdated version or the wrong type of compilation. -**4.** If the library is small and does not have its own complex build system, put the source files in the `contrib` folder. +**4.** If the library is small and does not have its own complex build system, put the source files in the `contrib` folder. See [Guidelines for adding new third-party libraries](https://clickhouse.tech/docs/en/development/contrib/#adding-third-party-libraries) for details. **5.** Preference is always given to libraries that are already in use. From f4fc1d0807be569b022b063d6289445c5f81d742 Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Tue, 6 Jul 2021 01:05:30 +0400 Subject: [PATCH 799/931] Minor fixes --- docs/en/development/contrib.md | 4 ++-- docs/en/development/style.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index d370a9ea78a..f372da8859f 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -7,13 +7,13 @@ toc_title: Third-Party Libraries Used The list of third-party libraries can be obtained by the following query: -``` +``` sql SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en' ``` [Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==) -| library_name | license_type | license_path | +| library_name | license_type | license_path | |:-|:-|:-| | abseil-cpp | Apache | /contrib/abseil-cpp/LICENSE | | AMQP-CPP | Apache | /contrib/AMQP-CPP/LICENSE | diff --git a/docs/en/development/style.md b/docs/en/development/style.md index bee567be468..78e3c8fc966 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -628,7 +628,7 @@ If the class is not intended for polymorphic use, you do not need to make functi **18.** Encodings. -Use UTF-8 everywhere. Use `std::string`and`char *`. Do not use `std::wstring`and`wchar_t`. +Use UTF-8 everywhere. Use `std::string` and `char *`. Do not use `std::wstring` and `wchar_t`. **19.** Logging. From 0811b74ad66d737f64534cd708d7e8a55c821c16 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Tue, 6 Jul 2021 00:19:29 +0300 Subject: [PATCH 800/931] 1 try --- docs/en/images/play.png | Bin 0 -> 37317 bytes docs/en/interfaces/http.md | 7 +++---- docs/ru/images/play.png | Bin 0 -> 37317 bytes docs/ru/interfaces/http.md | 6 ++---- 4 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 docs/en/images/play.png create mode 100644 docs/ru/images/play.png diff --git a/docs/en/images/play.png b/docs/en/images/play.png new file mode 100644 index 0000000000000000000000000000000000000000..583bd8a7ad9cccacefe859b0144911438b771946 GIT binary patch literal 37317 zcmY(q19V+o*9Mx#PGj4)(Z;rICylKJjg!W<)7Z9ctFfKNxVvB9f82Y=IQt|yJ7=xA z*P49hv%(bQ#1UX|U_n4Y5F{l;lt4hhIY2-@`9VVhpVYQX$^$=O>?JgvKtK@t|6ZRG zX%TUO522hTWksR(p&-x^KyrSgnu353fk=u7s<>yIZTP9osb@WYP|8hnVpx@k#1klw z)KCAWgFPDtsi2Ec-V0_t4CyvR?>or? z#$SxFQbjkLq=bi!sP?<8Ehb4fY*w!NnoY5~0A)fuo_gzxkP+yoUdEbppNu~L_S?x9 zpLm@(Fa67<4o;DzeP(dMl-cE!M@(dJ!Oy@8LtR2*SP4_`b7wu8lSjG(n@W=C?Q;>DPkGG)UU=m3b>&5bIe<%VXHa2HLIC|&^Y6T);aL#`N1r4B}ps4O{6=RaVS+th^Fpf$nS^(1>K5MW<*N)H`C^!F7pFn4eV&dTSHh`jze=ywn} zE84)e{nd;(p~IgO)y_W^cDZV0fs?)VJee95YVW6A?-A_Qi&w*>KR;t&a9jq!^$OwQ z;u_o7M6Na2IX&H&e%ovxIbUl^Zq@iVLGMyOAd+T_Bl+oKh4ItPeo#E&gx(Ai6O?v~o3C2L}g|X5w|$&UybR>0}0GCZ?}iS~z8D6Z_g0iMMUEvlz%QG7(0Le^m`?hDvWbEU8Hv?{cqW;%6fVP&s=%8UJp1m z8*Q+O4%R;>Ep`wrYj7;!Co|~o{N0MSt+$H+2zC%)hJnQj?Wemr`4;En*#La?&j)>| z>(y)L7Hnzc3{n4E>3}Pjq8ST&F1+t4x3ZrDkeR9-_D9hLo{vbCl$9|}MdPQZ6*}L} z?QVg!zg{uVrhmMKFk~1U9DJP1e(&e(ypC$~++;PMTwl0_`S<7#jp(#$_AmFg$n&2u z*3X$A^J#-V99#X~k6^d&)pOEhVQskZ5TBo)G3ZOSYaI8bfQgQekFUi@Aq-1H84>=c zso>jsWUHmULM{A|V(QSvRo*>4tW8wBPU)<|##mjTJyDYWQ9lKqLqiCC-33FkWlR@- zsq(#E^?BdTh)4J`2txNiL z?*9C8puP3HSRS0S-s}*Vtmg^y9aGQUa1jC;0bw7wgYj3*!otGpd|yQlCo@UO$N=pG zM}r+0M@lKi<@|Q_vh|Ubl|>-%w1*0^v$G>1A@RFrh+wz#CNQ?Iqi&-5uNQK&|E)53-1!RRd0s!E#{r~zund6U}t9sRCeOH#?sOf4FiKgg1fb~l?e-e zvr8G!n7!tJu7NXii2RzGSU@e9`1tVV-xU=V`Q9IQ&z5V9d&W>o?4GBEkzx}P`rW!- zbfFPKBtJ#gRP~4RZf11>UI>`24wk_^0Ozkh=Y|lZIT6oWH9Tbd5=TZy8!=&F@HfxQ z%?Z1>uyfjN-mPtJZOH>BNGX?*)7+d=P(Wr!>74rn$YYuFvYGYOR+lHT^6{ zgK5G?Z4>4bV~Ob{6a&938IYPLs@>2?Noejl`FU_*KUYWiisBvPs<58%jy}1i9WCu$ znJDe)Qg85&85R}y1tnxm29tYJ2Ybp*TZI`O-XjFj>oS92vAfI~&cvh^X~)!Zg$NX! zmq$dc#cRYg6H=@>zT%Tkq#vH<%x9;;#?Qc1|@99E;aO6)sWNe*Hlxjl(rj z*&nZmf)8PYo=3TwUFw^9ALiLUgR2eJfs>0VSo7}pE9T}dM*6wAx#hZTNwn&f)vo8O z+=Ng>%*?Tyo13CY-!cSz(zf33n?T6O$f`}pFyi9kngMgdWYGD&HxiE(CH8)t{T^nX zo=k&z3MH;h_14sFO5`osQjmG^!E0ttwUJtxueT1&|lW}sZ zpb#872YRiOl1CUc)!R5m-6mol^5cZxhMe#5%#GtDwyNh%k--gf@wgpL)Lt$(+T403 zHx?EK8myNzIy~8bUyO&BM-3d}c3+)~Y_)sD14=mqD7=o&dO&7XLj(Hvn;S{0Fmz(h14QP6y+ufRg-OdCo2G_&_M6THiO%XafFGJKtMe5CXqf2jACS zLZ{9DrlWg92P)v!JdnKFF8nnV-wGrV$T-H){I{*dwsnkI8;d zn3EaYyA>*M)CvlHw*+{6?h4?X7XiR7fiUQ`4?_`pf3>orsi~>y9~=a|ZPWUFdN7eb zzqBL|Yvms7%m1+M^qsdG*xgThK7Uj>Y*)#G3yJ3R(q#R00vpJ2_Sw96#3$!g=cTeO zu{3i<@Z$&X*V*b(Jp(6m>*m!tNzL(dxTt&0Up!W|){fW_jNK$fEO&>;Ck*px<`3Ty zpJmi+7$8PVu;Q8yV=5yINDjX;f-&H8Gsj7Un;?y#nWf+Gck#1xxSyFFRKx_@KCM;CT} zJzxSGfufSjI({P@@NP_8ob(;t3Gxv*`vH}8}1?bbG(i_7m!H9BpVdNX^(;?3}cp83^aE56?oiKlnvq>n9% zqSkH8wLX_Z?zJ|_sqxxAUPuVLP&7F~PAx4u=Y1}KW(v`{?llDa|+tUg;^g|Y) zzz*lD?3;kw0_psCy9B$~X142k;{cTF{v8_5?pNe@KxgCtuawGaj=@&Ft-0a!_WbAd z%>2tOWS#HRn7-T-mOKdAaxN|F^_{FwAP%K`*4^f1I{wYs)_bJ>YnA?u-^UL6JKw^F zIy9WW%NCaL6N$OaYu}gGzC41hwKX>`yr3Tw>v&p`Ignp9G?oMCUs{)YqT(s#9d8e0 z?TQ9f{%06~kdb1$0U1litvQg*fwUye4sQQ*xr~(q;oaFhF5&a@^En5nSL00%C-c;o zn59f$#i@%Y`FEwY%PpMIp^B)sva<7wA$48$`E7H%SUcY0v=zE<@NM0Gp8JZ=+UdvY z{qYT~5%$T6m5XX8TQLfW--N+}XiRiQqfGYgNCS{3v%eMniK56uJTna38mf%<@BF0G zinnuNwe^)m30K@q2IChrW>PdN8ylW*{tLCG{zAF;7>HSNbVhm9Q4$@>?`}2&Vx8n4 zumG+FxLeYWy zJwLWI7SrsUNEvs={0?1xE(oGYTSUo3n+FRus19QA_~DU7#X;yk)aKAc7oU@NMFE!M zy-D65+q@ug*_Y3zI}G{Fdt2NlnIqvwzCnR@nUfL{5 z>$T`pQ&Zzd$?4iDv)ls!9hqHQYqK~^VMwV-`KzSHKlu+3QrdyiCjS1~B&1xwD{BHK z{jub{0|Fn?iSf!es#S*}iBVtRd=fFlSTO(xWGtC+Xy0+7-8cq=^=~2r#$;-mGVz$Z zlE$TY)SnX^LfL!`q}D3noSl0DkG})I8}sw0;-K1Yv?i{uu6jUlm>A<1lJC#E=zmQj zATP|x_%#Z8x9P!DUay{{q_z^U9T!9O+eMK!zNzK7W7R>ukhiRVi1kC2P6%@j^N!8T&AHj!!1Po+`dEXYVp`+%1jM} z9L3o5&Azb;Fb#Vs4l&Pk3mj-lp9i_|KeEejSzp0%A6_G)HrHw_bivw}?XhpEEJ(f& zlQr5stXBzRn&o~BgP7sI`y&j2bbv89w>SiS66PPWJYh4$ZX|9)3lcvL9UCV$-x@qx-kW^Icx9FXj6KkPuJ%Ud!K6rR7m6SD#yK;uV9$k&~5&E2DNZ zi9%8*i`=-ZYmMT`^NePfJ6SjIW$e?AZQMiZiVSUBv3p%i&@rJn@l5VWF4-Gqc?||C z6>8;34YRM%ck1t^=hu6_uLRRkU$$d_FkmNJe-FmY&j0ikc?xON4cYoQ=Q#>-oC7N z4RuD=cuQ`e#tGQ>EqNV3$G~+Sh*r!H;=!FD4h!*Xf9Ewd0F%2nODPT-xSw z&POeN@4`_63q$?)_=q9>0~cwY7r(UV(U+?&`x51`&075CzG~QBcCC6E5c*aP1cqDU zRY*Fncv8v*!$SqiDniS?R6QAoR!Cbj#A_dMOIvo-CShr+{JVNrrt%4+pNE8$Mqn+x zyWDr4V6xXQD}^V;r7J-!E{vL5AFL8M2&>n`f?B$CIO60XZIjF~xMPC~Aei@?oR9n5 zyd22^_AcXxh5Om$$%Pf_M1quFNGSbtBJht1Y-n^UeC z`GA3JTR3Y`1Xt2usyaU21(kV(ySpY|8VLtMR!k9#O%d)Q#0T_mcu6cZXE#y!V5S}- z&3U%Rr^HPRS5R_M4#jq{9?K<-mf-VBC2X6muFCXZPJ3)&9u9Euly8er+b)ZnfMP#g zVJ#p~u(f!t)Lob1K%isv4n$y&^4{wj}@&Y9Sa_B2PI&_R=sY0Q$ z1tX4hAkDw4$1FaiS&oeQu?Q4O4i4A#FOP`l?mJ017bmT!ZQ%&`c4X zpEe>?9&>#&#`ppuTLh=_xOoWkjc&oFN1wcNMO0C7PG1#{0?vxUtd84YBb5tnpzk)9 z-+Yk=Rw*lwcb`a=-lL=PJ`xg~GZ4S|@gqtNBkulX6QJYd;PHn6d zKbEx=^~B`h;)bj!C|eSSf+c-UQ6XZ-?BMU5Jyf($?>pqJzit#j<5+MWNg0yIc%eY0 z5MrH~nGA@P^NG=*%;FIw)6dS2#Ea0ClOXM5cLm#nT-(5U*-JoWLQn7-rk7TJ^3y~k zp#J)4oP!xJE&eUCZF6}7EX4%Br7c?o{lws?4^iv%C)3%g_%&Mmk6`P$;gxu~An^Pm z#I&?i%S8f%B}n{Ms?<&Yv1-HiU2AqjSJBwBHDRNKTYR78rNY>9h+s4KT47y0J1im+ z+UyQyWG0><(plw3Vjk-W$9Q^W2WlSkaXrA#m7{3B4=Hw317t*H+6Q% zKt}j=B(eLFbm$F{Q6jx1tc6*Jv>IM*hWG@!Tr|%nqJ%-w;!uQ$Ke`2V8uhHoyh5oN zC}d&!@E7LO;#b-z2@1h~_uuHzH|R3#)VkKn;s!hQ3u@#n4Ks)9s^!efG%L@>WYl5H zV!{8ZpIyI#j4Mj8ZRM(GV>NJ%<0Cl2VMk!1J04a7BQ@vN+8B@d2FL67Svtr%6p5Fw z%ART!3qQQ`lT7|`$-1R%3RG~cbE_d5nJi(~ z7aK5cO=dhR9zpFJy;<=zOLpp!_2jc;-p?56W0U=T?VI*!$+H2HsUw@?AcrYm!(+Pl zd-p-7CSg_iG_{R3QdMmy*g9q+Z@T0AgCP|OmOdv#1&58VPQ2Ik(DMgg?2HpwQ! zLQMjQu2fYWct7_@PCmh?s+YZi-bgDet7g}89FT^FhW5vu2&Yv6OrZ3gpH~4O8Ta#s zrN{KHCtMIa1=8j68mOj z)8)u)2ze|(9nn6Q(k`?S?y=q$7fca(d9%WxC(M&6EPM-Ur!IohMV56{%<@W1hy>re z7t6A;8Pd`UT3rOb32}%1HlNUplomS^I`LUt)B|omidv(BC64DuWF+`}cYg!{ZGpv| zXU7yCF7C3bny-0Cb=2O)S#jewbO_|mjHKCf$j&gM9PGEVH*h&=s3G37{yh^p3E8GA zy?sMDq}{BN+Uaxq%$#j?q)w%#`>>`PN>z&%b7o zd3|AY^G8Tp4w=kl`>F-Ug0aYpU#5)3LNKtH)2nG+j@q$N@-x2Z$@VGWd48qRR(8j0*TRd#7NCd=6;?+&aCa%Lmw7IPb>Wb zL!#~$G>0>WE#dJ2xk=5$f{Qib)yHG!SPswUsZEcDDOaY?S)-klLzJ!ad(|83@_$0! zA`1#zc=KQ(q#9roFJC?v^>`U7LvvAZm=~Cz=ETxx%h=eGUJx;^HD^TYd5rr5IV_Id`m4M;?h95vb1_#9+EXO9amn& ztE@FstHM@&3L5ilpk@)g#?RVbBgj0zH}F^;&597t@|Xb(`Vq(p_W8xFtbT1}SY{1P zCQ-&HDl6|IXvnaOtZ1T{l_0o(PnLjA z#Uu;?@UnDrCUez@@MYJBufWHv(?!?EJGGndMayBkCN9vwIlO$oxWp(I8h8Ho6UaiK zd}NZxe)Jd^I6Eg?U0rF?oATmMh3pqfC!1w9FzaXKj3D_v3U{4%X6neR$>t2f+#bT% z00{Em0>2I@t1eX`kfv@1;E(DvZwn{))_MCd=Fw#@@9CYVXc+Sjdcx_(RAhw zo?41e&O>q!>Pcde8pA#2KqQg2B3q-EfXNu-ON~3SV5eQJSO#feo^H7_F)2U1#93pG z6BpD(PW1R9l07Y1x+^tL$nHejLNt4 zh=3Hp|5_>tZ>tf2ME%DZXvNE~Dh}$bl1c%AdK>XF2Ll`|v02*(JwvxFyP{lu(Tn%< zo+;j3X;Rjr?4lE4joo-j*|l-V3HQM;Q|Pu+mWe6>@w% zDiR`}?hWB;3q(0h_Xhk2c;NerxnI{UKiU#VW%#wfdr8hM9w0u`bDS|ips+9$$_Gz@ z;$Y(lseQvkDqqGTaLkpueHy~$FpN#?w=Bj@-$fK#NM}Ez+Y}(y;1Dc9k!=p}N5-B; zX>!1?!1qbXsOfKoJYuB|9r+2QKS_qT7Dhz^dK!3PtrR%VkoIJ@WaB*FR|+X6?0i3y zr@LXchG4Xqh|wJ$TiNampH^2b$L|x(EjgnllVt65;DWU?#*!VH%{VRNH?O1@B0PRM zX8L1PORGR-bC=B?TbjvdH@IJVYtMPNIssIo5(H+kziVT zgq2uqB-kKv6#vLG-og|#@o%}8c(^E1cDWfEJkc{YGck2`YeCvUX8xZDvGg{=2VQjBEJS&W-l%Whpqe?03w+(VJ4T!v+vVxXAgD-XZ77H zRej3%(8$Q1l#b43_A_c=4^aS)^bvq}k`fbt3he2Ax$K&1v|Yn^#gj&!p0;Y(^gYfM z4rz8d#Q<6+n{XQ)9UbjAqqGunA`TABeSLiqp4TokZnifHKZJUKCS~ykHyvFhfUzg( zzwTh_v^s|YEh5XF0Nf_KExqYPX;27ujFU&84f$8F46Yvik3ag1!^}Szk*4-CHa0f? z#(4l&y%bUS-3dCqv2niP)(Ie{BI@eu(}Z7vPKeQ5k*t$|el>CE_ek`;eN`bvbQe|X z5@sdMD|NQXozJu70Vp}+mAw}9xs|^H!e_e*etBcMY`E;na+eN)A85SDm>pYxH6LhlI;B7kr9idfUPX_TNjcPG`Bv01 z{I`x2G#=N%@p-|N11Fo3K+b4aK_iAXR|^>P&`D$QV*mn>B%%HlQO9rAXdcC4pq4YH ze|VnKf+pph?()bujB+5bo51gdKJo)9l=;u>%8cOKHsZYI1vg4JL^zh?!~SFO6fIdV zssBv3IDCZ41XF6hQ7h<14MRkS{NU4oJX*MYZ}IAuWl=+8*5)w%*?JvvBKSe077hjc zQ0jxX!X%Q%?mi@>2_{219*tdx(|VCQwOw53mhNCY>pi+Sx8E!D9}0Tj2sh+;G$;W( zGZP(@lSPha8M%5#Mhfca*FjQl(A{m!_qP?Wf$5}r8>hXP)!dGFt08Rg%!DnXGdmcP#1<93ZubWb3h{2 zNBT+#D-(uKd@<0MVO$W$wFqCEV)#h)P%M7`P8m3wZPsR)ZL*69ax?`I38J6RKAcn}yDO>L z!aBn22o2&fZkTnXt_sDm;CS7gdYw)lem#UC+eUw>+*D% zRm{=dJ;m>B$&Z|aW6>@v;`!MV=$yU{DoqXn+4J$)Sq9r(KPz)0W8F$*k3q^%a&25t zka*9uP?J3E;^L+d=Zp!NM2@^**AB}0M!fnt@`5YyB3inFCV^i>U-T1+KUl?d~B};Rq#FpmMw4ie(peDRRo1X#L(G1 zrDdj6FnMQ|`6oyOu(aQSIH;n7vE}i{m*<5|OCs(%H;-4POl&7qOh^W42ewENlB9Yp z4vka&-A{&j9D~er?Ntmb5LC^#iRE9hyW%_AP;)P-?3^{sylvm2ZVZ)C-@+&OQqd!< zgc(|{rGN1PnOm%W+-LK@OwOZ4*HwA23o$bs* zgmOVNv+#byM49YXcAUutZRRhuv+9~62esJ%&9=?P=_=npv|Su3zP|4TIl<=cqkn6- z4*0U-FZ;(!?n!=sHX0RZ1!3!&fG%rqZ|~z?ygU|8nk7jMW5{0s`(ev38^FW=Hh?Wg z-(1g@KQ&Y0`T)Sth1&H6j@_KmL>jyI^Kr4DF8V#tIcIVDOAg`#P-rLL_8}uE_U^aW zY|j&F-v`J3 z_-33^lyljSRva*Fau~9Nq`LeQ>hPPqQo36i0#A6$p>(w4Pd8MI9trcce6V|t;YxY2 zHi5&>e6@c(^Gfr6wMQS3@Jp3r*)|pNEk#PAkMfO50Wdh-PmS7pa&t=0}=}T9TTwY@#zPfgJo0Yn{LGvsD#IoS92>ZR^?p zd`>DNUpLJyZoo`u$yz=>4E}?|vm{06aJF93ZB}Cd_SL^Y1sNMM|B&}MG0}pQw3|Di zuqmq2ko=l^*8@Hu>@E%M=*A_U=e2U1V3nsYY)RyIt@Et+WHb+}EgzT=i4bUdX-EXA zX8*4|1;^+*TSAo70wJS>To_>g@-psS!u1MSsppZmcs-hf80uKZ%=QD zXLCoZ*n-pOqJ_W9X?Z1(iMC=nl!R{OkdUnVZ!qMBEudh-AlBixIpXuk?qw1~?njVp;Fn`aJCMi;kv1gWr9%j2*UCfBak7}O^b<&shri4iU#30=bzck*Ga zU?4AjxqvU5E-*z|(v}yl*zUn&|k{h}rnV~-@M`c*UyByCt9(!QSX8ll0I-H9TeLKPSKr8VoxcJwvaO~}& zxII!$OoSbQGVJ4o)7WBizlB)xYz_|kteH)=Yk-oPr(4v~NRH2wAR$CSS)d1%k<_?l z{$8(+>mO$dmZi8T&&>Op8!3cqS$yh_*>^&B@1ex6a(pK77DH+~bR0=bEW0n_>Tkqn z3`WG_z^i#cfHrcyIgTv}3!zD2&q)`sjQ``To-p@4D!=3&Bi`FBjW>WP|Mk@y;JZnx zt79^@Ux5NS=U>{zw+$W;2K~;n4o9R5n}{D4Z5{zs09T`+v2oGswZ5LM^P%JN>70}f zK|Z)|B%X4m!|S0vdS4PTWdaQXVB&zLxP6}Jo8$C(+E3S!!kb$HmzP z*&k2Y9nzs5K|g-{&C?l=mf7W7cgm_drHqV7fNtI9Zu$g(#jhTnpJzH!Tv(bWE4GxS z&(^YN!D>+{VJa*k0PV8`lPLuIdwfCv*fUKXV4DE(1!ualjFXcw9zTdTg*653Ziu6fbh6N#-Pf zZt2{&!m5fuvvj`q^_}63MDESEg%~}Z$oW@9{0VVx`nDLLBUI}=2hI_I9iB(x;l>=k zXvU-UbJo-?H>OvyGD7gw<5#TYYYtXgIW+Y%vwL^C2NFeB*m z$5MwXrngV=G+~obTJ?nBMjXe&wyaqGK12PGzlwr8qTio_XY-h}lPqeuAAc_;dT#NK zry~dzJzIDj5C*tLR{C0dRW@3siEiYi zT}dbfeP&7}q|AxT&Kt#yN-peWe$_T~2M8>TX#%9}$u|~OPVfvQp~MXNZy&@bKbvYD zKB7}UJ~GT!wKkjtO$MIlQ$i8;OisAw=VipT=+;0d2T&v(zwwwOHC<1(kV(y&aV_Z{ zcF)`OLS*oIhB|GYpAK_ZLk)P8y>vcXWxuQ!^c&ru@*GvuNNyw_B#Y*ctjihcv)fh> zqI8KLPUgUczX$sgfT^n0+5)`MS(C6}B(D&T59ZoDSfDB>5K6 z#$AnwnvoI7{iELl+2^TJg+)KB^LUK0Yy7W4#wRBqy3M>jbbauD09ka8vOIqS6>TqF zeTxhM4#`;UgCZ~pKs(iR83u?#+xTFdONA)_KMNzmR1g&RaI&Rq2a~mA`xMC2Gx$8> z0CHnjA`_7K|7}`f(Bs?#h7OfE=-RZT^v;5fi+lU-eLaBbebocScfV{b_~jj7qy3MZ zky>wTY}|nw4tYAB$|{(B3y_F10ICL(z+-n+r|oKeScP`eq&0%8VQHjeftu~1J7Pwj(cHij^C^k2N&1l_bC;+pf;YXt`~E5`Gk=BB0+?e1 z0|Wg70~>{LJ}u*%)is~ZP?tY<28c~5y;;$2w;|QiE`B(<34-B96Vq`R6l5P1$@Mxt z)K~|moXMxaQIaMZ>I|pw%1_%!<~~_iYG(74h`+6c0-Amx0&W zNl>>k?li^ExeGy!y!`os{K3`4vjt(LJ-b8dk25qu1jEVTV_x4NwKC8`hS*mjfJ_W7 z2=dU5Ggoo&mkj886Q8|4fCAII6rw)}*=&By4=5@S>dLhmtQ6$|T6Ol^%F17QMxz03 z9(1owXypG(EN0;d@*3&Ls;RNr39hbY+S%PbJ~`RBKVOS*H45ny1mY$Du8@=v(@|eT z5dWhPuaAj%seUfvq^|x1up6}qJob=V+u9s>Fny!kLi&1IoDSXhV#VEm`$J*8J_dZI z+hPohiTMn$qwTJWzq}n+CJCBY|3^md{I=0L0Fciq8&n|#q3H|^zzl$<>)#~%uciX@ zV(%Wg$G`G#ZcYXw31Ho6bp=!}^Xa2)Z*LpfI{ZgCmei!fuv2S;wt(&(FVaPF0fqqZ zBy)30q%W^bJel7n%L@O0lywqi;pRhtM*KGqh!i6oOQJUhHb)|^etrPuAGy4f!bI}` zNcREOI?yr!U}&Kj3Rwc2(7x^8`9B4AfIF`%*>BhYfeV4Bd)t|(#c>}Nu>6maG?lLAi;QJgvbdps) zUldLO4r?!izMkSrHLB7zWGA!gHyRM$9rADHD|Sv@aXa9c#^>xVY`cD_g^@ek-+tGk z-8Co5naYozpIziNq*xb7yS$3AlUQ`^jYo5-I=jFi*v!)0j9btDZ*mt@3sAnT^Ufw= z=)Cyjy7P!@x&3~QXO%K_Xnn@K3m9q3U(cAKZa-mMtcre%b*n0Z0#7os`584F_TLaf z_7tvjm5Z$E!>5WX1o+GTS*1G<07XZC9g^&{(B`c(tjNtV4lPWsi_ z5c+`6r$Ag^C!4RoWuk_Mhehd|{@Vy|T?p3LuFI`#pL=Yg)uZ0muvSX%)X2z48SYrB zZk80CSkW|q2L3;x>7q)5eIqzptCs;ixNt!|%|E;}Mdf@vb8wTQ_)jK%;T~rUFxd+# z=v(rWtgAvc*-RaZJcs|*t-8MGR3sc2Aq5zr(+1SkRt6`W#rJ1P_kaIjpDf#SlXGMO z8zDK*zpcl~!NFm=7`&x`rl<+=PaU^6i2*T{kX_`PlV!`Z$3V}Rg~^jF$Kw?+8t9ae zFCkMeW-Ss{Tr`T;ha^M;;u1y$FO%+3*a5r0$WHiWNznGVH^@gOuUWu%vIoP9H8w3x z{fMUjPwgI2pyv?@(`%v#zE=dpQ>V>daH9!vC@Cih)hprH+0js2WGw&A$i`l)>(y;C zaFPFv;on36SrQQsh6^{WmMre9_)`B;k6Aj>ww7%a$eyrBMO{kum4AR?v6Y3!20|r4 z#H-0vMHAbJ>xk!(iuOWEu$W?>orD*UYm?q&0VNsYdk+oEohjCV1GKdlIRy0#6*-Kg zQhbcA)ULtrR>Sh~uW>9THL~sEX4N$bHXS(BWZ;sel=%ga`Im-!f;iH3lA&rOKP0H~ zQHRSc^k=^&{%_}cJ4~v7g~o|dm9QGrXjH@Fl#VB1+~M^!nkN@@5kdDRkJb)Ewqvg$ z$b2B16s#Gf8)pA7%P%a?=s9y7MJZYqy(r3S&rG;+WTEi&l}3Ski4{PK3nFunPY~y* zpQ0`4F+hh{L0^W&!A>wW(dIQB6@#ty2_$C$xb9X`tix1D-p7I$45YhNCpiU(fdc`# zHE5>0NFxlkgjMD&8>ZN3Y*GpH&a4*9vwEq$1|A%u>?HO7i-Nr!hNV@}xCmkr)P;Q7 zKeZ}cKj_5rod)mElhzkf> z`V&{X?hv(dKBEk6`PfH1WWnBE?DF1$31fRw2aYC5-P~Q2ot^}N{(}D` zunJ!&5)u@nV3^Ul91J_El}BAREX+N?5Xf#iuOK8kqw<8O9FM@BVDe~!zn3_}lG|lt zOHIm1j8ljfP!LH0>_%6y$!XEA?)YcQT)E-=JRc=1p_u*BJR-sKNtx+G#T9z_X1}7F zQLIy`;c41fG8jp0S*ch*4{cbo-uDx%Y<>4s5qX~6+R%}}M4Ne2=^zORHo5k(Fq{n6qnzF_ae+v4M4HMG52w-QTTJ#TGPq}@z5bu!Ibm4j4L->&Q{h@uNhy*_qFDKG zn4;q5ezz&lCj(XHjVhC7E;cq!}75(6zYla_XYSrg%q`NPXGrBnE zetFuEG>D?1)CV%nG@7hm;jTob2Q6}mM%&I~|2{ut^ zrpeQp3O{-%NVT=8a)aB?VEMMd9Nn<7JzOLTqp)ld%Qq5_)})6StnMIvR>y80CEY+( z#c+*{)o|)65-<~HS=JQ((37eriou>lE{nC6hJHKrOHlC~Ly_^oF3&9~Ne`kSt)(MH zT8!n?5R+WU1W|I$M3}4U>I?t#a*X3wHDtqXGF4Pg4Y60+R{HDAwF{Ah)P?;#Mp0zY zy)ca7SS$(ZxRREfm*>hbFXF_a>Kfn3#Mpis*6PnA2Rn8ldV)J*ID)Y*x9Xt94XoK) zG~7R@+**^fdL%{zCuO0iZcsm>(MB6uS(f*bG{w+}Ayq5QM4>Wcn;2Vho*?DoYugnB zOKOY=_0ebzLF@XO-h_XN#j2D`_(jH;+G9V^gi{MBb&RViAcQSH?cYjmArLLU2UJ@G zlz9c%yFD%p!2EHTbh25WO;*y33($&FFi%!0NM^=?^0~j-OOU*veN*}l&N{_0Qa9EA zveCsNHN?u1;2Ay|?moXNwA+s)=fi#e_QsZUR7_X5t|#Z=2D`tHtz6WGT{c=0!H~~q z_scuFbOoo(BH@;*YM4R`Gyx_d0TbD_Ja-TTID4TwU?9e=Dhg9lcuf~R2t0CL$IwU# z8;em1nKu0=DW&k6={5Lqf!wbo`Rzup4bnL!Wa;RG-!8pTS(7oVv1>?j)qZvnjG~RM zRyZf0yiEoMO}%g}l$DWH4Swy)b#$B-PL)+P=2kMLi6tfu5*bu98850V3I%Wq*rbM< zpjk0?@_mSS7CtT5grFc16m=t_!ph9i1c2D`^Piu^gKwO+P*GM;b1v3ZPdenx&ZsDi z<7xRG9v*q^bBCsy6#IMaK9EEVZ`CVqF!X6oQwxFc=k-IJAplsV;GG79Ckf)Vs)mDv1lk zi7HI{(5Tk)hHB3smvhtear3==wfM7as!e7O4#R(Z-dRMcg; z16&V`^pX|gRqFDHbegAB!2RlNgbYb%?^`v z?Z$Y0@k^-n!7rpClDTn5(Y4(<=CVZE;~a_cIx~!#U~Ul%_uO=4PgQWFtwY6x3aoRJ zcCqqsrs34XdCpDaOCII!)0Tt1_HjFl$iG4*fO&%tO1S~Q*jAbcwCN2Gb=7ywI6HSu z1KD7>!7qJuqX=%^EYWhSD~hnO;A$<&H~da&{r1$<;3~;F$tTcq&BchSUr76^l@j_E z4LB@)X$+oO!~77=KQ(-AODk43#au218Fv3u3~ZPKGD-{*OwXB9L2*&SSrbAznh9`W z^2Y6IX!-siuwM6d6F~I-*Gzhdac-Yv}INl#6?Xbyd0&qInTwy&StN3 zAc=4h;5W03a}QKEuWv?JMvu^Lq-aVknP=bN*v}JP+zz&h9uqZ9%8>JCaC9nS4wbxY z>XXf4)G=ifb2GyuVeFdMg<`cx*6bh>u5;OHna2m#wwKUa(N&Yg4nBg43#2{ow1E&zeR�}ivy$=#|&X-VE%FzVuB($rXus7$lMEZ16CTU>~C2Rny&IW$DP~nYsJ;0?Iz%c0 zR?6XV5M&5yjPh%!{1IRZ2);J|OK#;lE44T8KKi-uxs|CNHN=VQsPxG?W}^vd6gl>hvXmuKD|V?! z1!P$dldt0BS5-{}HS*x=Q$><9cy|7;O}y`4Jv=z6r@l<-m#2`R%xep3=BK?e zo*OKn#pYGcS|nBI{5@nW>9tSNH1&?K1vCW{X_IsPO1C<;e&N4fY%EC}F$h-a1S@5W zN8J#8+}n9+e(}vf-+E%d?9}{TK?3$?)U-r-l&R=7`b&G`gwF-~n)`bL;aAgeFX~wo zFkU^c_`^CAs~Z%mw>~W6ON<eV^s0 zcX~?$inv%vYLq|_W-$nG7!fiGwFu*5EqaRkRaGV4hr$+7lZ=U&H@?L#ebDR5{ zYjx!39mRL`9g3VQ2wrn!^G}65oQMuqRS7BOq33xi;NWy3uS^6ApNQ!^AjzCRrjg9V zSf6_?!?vn`W1-`$Q+!ho9tVQs53E~+V`ACj=^wPDq?nJJlZRYhPcH9gOH$PIvaLTj zZyTD#v9PJPx2HbTJ7>{~U7Y9>6BF6kE-7YTZ95-|FVh`vdF?jaJ^sS1O=~XcClT)r zYcG!v`Y*URMr$Rs2&k6Jf+OYv(tYb(*Virqf4twl^o^8la9y^&Z~72y&Mp5R_TDNg zt|xdGB@hViZi8EJcSvw2K!QUE?(Q-Hf?M$5Zo%DM1HlRI?m9Tk+zI)e`*!bIXRY&a z9{z7Ld-rasuKK#V`|IlUtF_07Uu*GPK8=9(#Pc-Z1%XYtZ2=uA%)hDMj_yd=at)5 z(>31<2U_ld-rb{JsXyw9_i=#RCl={;t<>}7ssfUsFYH@mnPJKK!#S9C&Zu+_q znOmCkw>`%+%6*~UIio(kosaY13as=D7o6f&imWg$1b4`GR!q&2jMLY^Q^Xf~ldb6) z7#PS`v9CnihowTg#^2aI+tJ9y<+n;fpU<-0^BL<%GLk^QwVK{R+?vzA^g5VZNBr)` zgAk{wye8*GsagE8OcH6FoEy8g=lhKC7nw18o-4o)ouO!W}Z3r!7q19O!%~ueA@PaCS zvr@q{8yB=By9LiOX6vk8&gx)OdnSPOti?SUtc(|OL_%4xa zhGcT>Z#WCxsOdbRnW;%L25uBw)2jHTCyO;OLl)iLDqxLcWH^`27e768fW~&}i-Z{M znZL^>Wz{i@v5D>Y`8>Bh`CJZGls`Q^&CC*edd>~Z58>J||LTN|zY+3t#HV3-D{Baq z$6tvHM!qrX#knn#-Yxt_{3~a%kC0?bUzx5as=9GJOAOKg2>6>!4!a8)$zUi@E@!}_ zG~1_XR>yerg6wU@%eyI)tqp{HH}7E;ByKYfosxeKPm)BF6gtXzUjC`E4$W8@t7+oC zl$zhFEw7ny5v1Hn{qo=Gol`J}C-wsB+ff6d(OAqhx9gQIUb_OjhPmM4l}YENTT*LV z3XR&tilq+oe_u>%5J~wpurjzQ{VMZS_*STYA*7G`@@kc%aG3RSdJp(VqvhcEuvbgs zZ$tBi%!LPKiG;a5r4ugp7W(JgU2%9~F>_Qy9qOV-g#=S(F7hkmPn3ARNqByvs<&BP z$iPPKYjvAi#-t@h#zK}Vwp|=Pou7|5(GC5UYB7>P;Z8|A4r;xlqK1#-_EcAhjH3k8D`Z=rh6(mQuCrSs>aRUSv|qp6l!P+fgh3Eg`J zwis1@dHpNa7J4#?^wSL8;)rjEK|M1zi^}GgzmAUbU7g7$9YWjMFk=_#8XAi-Ryy1o z`2Juc#n@4+E%nW=l*r(ntx8=LrdGR7jh*$CSeh!V%$s9It4>e!RFLP9{1wX73bjDd zkb-xnV`S8Ujsxh#%IvBFaY^VgbZ`rFY)98&jItycKPNxY+>WQ?Ha&ey2aPnan5sL^ zV^Do+Z9GzFCTOjDa2SCTb5fX+!EL*!%qOly_Q7laG^GvV;m#)#OKe5OCG;voH*J1V ze{d|5-?!SWp(b8`B`lw(Wudw`rQw{naS4536^;63yMmjGCk5g$|7bL-E>mjm5}{jl($8_ zaG0?Mom?=nLo3pQL6M&nmO`&Ks3FtNZKj}tV4*Fl9PL9-4x+1WYDr$iShbF;q3&DT z3%Ul|HA(K*->fZH8-grumEGK4LB-S~;&Kh+Qr(^tytvEz@b$F9E_C(-SKgFcbt#Lq z(iJbS6im-dy-fjSLYp$0u8^_I*&?S-k}8@~nsu1FJ6#ZrI0LsM06B;*(?Dq5%x%zX zenm}z5K$^^ei=qTgHf0A9Sh46pE?dYRyc2UdqGmiif!cHdo-bz=85`1rPv7z&qnSK z({ns-tT3B>UwkoidcSGiOk3z^H#&Zrtuh__WHmU>2E*}% z15v8y+pG6j#BMHFOYK2KX3{VCvGQRK&qk8Z-dMqc9wI^W{-lt_#@05^T1_m0VsbHm zpOTd7HBLFUS4%6Uj)IaCzpA>>tUNWkWJ$!p!NIA2h5qqz1m?rd63K$G>QK}tLys5aO)9KNyG zhUMpx<3-LC*71f(=1t5=*c_7%7@ydzsmZvMF>kckK5;kuBN_49+EOlpv2tT|h)LtZorf zy+BF2qkcC+tWup@zejC*;%+eJO8MxF)Y1H^D_3dX2P>-{kjw2vy)|FX=#pj0Nq%{I z+9`Q)NrtvJnrsZ7baMVSyGXgsYF$)j@TV59hyA0YyrTNd_553<@=9BjG#$Z`g;iyh zwS6}?-ch%=E5{a#iq<|Ttu0?b*VmJC%~7`nwsgx?8Dp7z2g?gRg|Zl}I>e!~4c~=v zWgK?Q7x-q+_7_>kDhe-n-p@1~7t+s4Ej7^P`nE1Klmw@a+CmTh{nhH7Z)M#BGl3xk zVS^Hqhp85;i}Jdf1m&*!bp}UU?By0pXdssY^swIHu1q`4%%{U#IdPd1znVp$%DKn|eV>y3g3z+m z8al^aThBPWZ`N{fn%R~pSr(F+4?{IO4Y$D%%RVtD=j|S<#4UvB)Yom-!`$5&6Q1gO>$w&ij}oQte~uqbM6Ct7r?@>WA3eHj5x zBTG2LBfAG&at2OWKIEq9(ld=-WU>l+zlS!0i&dkkB&AT5WU1kl4nM@BES^l)9lO-p z=t|2t(gLc16}*d^Xf_o=vb}UvC#@DSbL>|LB=y|3$XC;|Cw13-VYk;44fTm*+pO%d z43mBP8Wk|rE4mAlL{x-l)I4txanR}aS68In%VM<3(kf2pB_qDea>I(8xb(w3>@Rmj zHFzNPeX(SrSX`(s_jS>>;ah@Ql-;5tnK;9H8kgA9ib*1qO_5fMg$1P_qF!{!(^3jb z-;|aH&2z(Wj;xC4$xWZA5&7z?1g6FsF0{2bSL%59RMqF+KS~0#$+#sCEi3_I!J;if ztp^b^?97Z($3aJ=S$eMMjDcIHj zW}Q0!u{E*0{v9DeBCjEcEGukk7MGT^+{js%Wy#I9h;eyfHI^ zRTr)ko+ZnrRrbYU>bfx7@=iFR!%)fG8D96vnU)A!jKPHk4F!cq3xS44Tl&R@6blAP z0|rG~X9CyqRtq8wpZ92Wr>nmT=Xno%Jhy{L<`<{Mk50+v5R{{MYisMHD|Q#FE$#BS z>!s!=8v^SbKmYX@48Nf3@_@on!~Es)KyaI zOh-2`G(H}ZAI4c>f3EuSF2D6_cks&~GOUe2@`9L*=0rCCIwjq-it;6uQ_}w=`X3Se zj~2xJUnzpw{Lj!S+nI90%S3|4Qa*(fMfZQ1a3aTi)t`SB-jZfA!c@~R&%XTq#p2xq zOE{5RbN|0=z2(T~Ds63Zx%q0k!u&rVIAsFfx*Hlz=1L_bCMvhu!Bn92aKQ4L6u)HH zq7FSG+0j$^G+e2w=?>|uX=`f>Bl#8M?_)0ZGN#M92VSTeLnAx*oH_#s*4Nh)j`ntT z7VxBAfM>5sM$V)+P&o~b zV+BshzkDj!C|9Uze6cC@^vCcS5Arm=M^~PeX}BH3rl>pe{hwS`)E3@HvH4wyUHnoJ z?QLDtNBCY{FFnvLVO#$XK2EewtQd&Cr1E1lO*t_cW@WOBflGmn zx)_8GKch{7F76#715t9Uos=Ojm1~soA`{m2;P|r@ssCXBE^51iCWSmkY#gr^i3OGa zpW@V;31;tWR2H~M{lc}N5mT}6=J@!|NvgszdQOk|{Lxw}JDgYaEmHn^qiL8Jca`avKuFAL0(oG(&cXGvxZG_6HBY z>y$;}ON`>-r*tjzHBaMpZSvVvIyzTj*O2DYU`=)DURqZ%9(7ZJNK=`xI{c;3*k(JN z;YDFzD|LcZ- z+WB8;SHJ7VGN$lopB)=0pq9=?`@2a{smf7E$3t-mLg%_uwxVW+WB1RFkKDNq=ZRW# z^Vf>L!A$=Sjyra9(@wGaIqQOBZE|1`=a*%>pmBFJ|T9j$(4;2373$N{;8zc*n{Irz$3>d^PaI2xR{(R^a}ZN{ySC=NmcE z8=JlL46nZ{a0$KGAxL`RaH{7IGKigi9xnFKps?c892oDu5={M0RKchlcJ+G8o*i^l)Z}5mkKm5EH-jQ{e>k@^|2R$l zutgb8{o&K|fSKRX!Dv~;azftse7Q?>4AiLnMm0hrBL7&dPeE(0ArbD6WXIklmzP5% zt*_$A>DOYo&Y=c9wL(9ZK9}Y>w_X`f2nHeoMCG#E0-kc4w>kQT!~qiR=5l3sDyaBR z)ZINQPL3yU%sbPxgn&0oY4=Nj%wJDl4zw{~R}vuD1+DhzShSC1ZJ|3R2Ho6;2Y9e0 zd=vZeH?8tGA2@Cfc_3>qo3C@+83>I0sp1p9czNUj=|VzxcX8OhBifXp49Bnh;_55% zvB3XeZA9t|Tj$Wxn`lSISI+XuSSu4lixzT=UJ@X{VRq>iMf$f$}JhJ8N6#b>%y%lhlKX$$xhg19Ck+AkO;;dASLE3fmv-z@*%jfe&y}aYozPlx46>GPm*xc6_NmB(HW&#?gr`Ny8 z(&zIl_^r1$XIq179{wHo^<|zW0y+NFE@z(pH+~-bVXnAqJ?anXBsFayDt4W_?&cz9pBvWqklP`T`k}(b0(xh3CB50uv+-RPMh@ zjT4N{wYPnQiyC5N$&y2RHT1;7EBnrcB`1Iygwd&dPC!7Ub4eL&u7Pp5k=EI6dZ=pL zBh>=*kt3gp*WXDZgZR@0$wT0=JER2EdFkuo17{pSd_>o1?i-8lKAEWT@6% z8Vfgc&@k@nO1Vfeo{JsKk75X#(f~Zlh}uU%{__W{F@0HaBgUP#M_OBDKzQ~EcdxGzlByXD*qx-G zjdYb0?6!tnSd%I+?}33=lhqE^p?sMTYro}Ae;MA&qR^QW0~SPV<_Ze6TLyq$$|}5- zSG?F*_1CS!x_Jsj;3T@zhfqn1*L}tlEAT?@;zzd|pCY?_0$F&!%9ZmD7>*D+f!rs- ze>9QAE>PVfBN=tW zXl?nt#Y(x_z=j=)HrHG>dSXeI82U5~5L0vjfA{^>vtG{E&lj-6x%D&4jlnf3C!iKk z=Ot|i#0nnpFq5}D^BHVRD(Vt@e(0*PJhKIiz*z%RJw}KP z7Y_V1Jk-#LqgmP6h3K-1-o+$jc|`bpzMFZ4U4*b9yj`g+DT)*v4UQZkpgfbO44U+O z&4hpX38cV<%mafiyfh#-UZa(Kd%77?ZRThF$#6BeDK6Amz*PHdz-Nlb+wvSg)(`S^ z6-l1`?_|^I8_rA(9tU9VJ2PfQj2*-vpi8Dz$WUjY9oV844Cp-P@$2K_u6PR<%)fNO z?2WP3d-)Jk-y3%W-rT3a3`ur-|etH$cPXf^8 z`E(#OxM2^B$}-UEx*Uk9F%i`htO^+;U~{59vG~Gvye<>M)C^GvcI0$Hq{|2XA}TK%kb{O^HL02@A=DtvXf89yAIzvuEaS|Z?nw5Qc+!<$>~?u#d)?3 ze?F7aA`0+#mZ4)?`d?fC5ZZeqPwm6W*TW1M81&;=#$Ro&Go4+4drPDn?K5^ZG-4R|JEkxkmlLlf>fVoKKI?gsyKh1PC;3M~m5!GCM8va6ie=JU!!5W^~t(9BmH*hbx8a+ zC7%CRY;x9CABCQ#snt67-eZn~<3T=$DyP8Qy}s6p?8>hEdx>65(%iG1%1Q7cjOh{a z!h{y=9>(RB2;2D=`+x!ZL&P`l6Ql?n>aPg*f@iB3U_j(~?p0r(0;c=CLW*@rVa6~c zXc@{XauJ(+K%Rb}DNFe??;mJ?`hOECvpawZKy;g3qlmnA!D09~l*J{17E{(ah%~Ht#Dhhc{lmkC?0aB7t`mzAg_T`V!(d z@V0f#>3Yp4(gJf+q_PGGq%Gh==PiU5()XYcsu2@J3^(Xeexge%1ju#S@z$=jdzBMF zx!F$7?|Jvw=E5{aouvibdTghZK*f0X?sLmX^$*(XC}z2{1WnaA9{;hQ5Q-IVWt^zh zq`&pQY(muvcR-9fdgSmY$;)UxlS;O{jnu|3>3_62XZu@^mCqFFkeP+S z>RxRGlnnXY5V*k<{VI+-E~E{?;XfU57KK*`!_v75bHGOa*f9xW&WCL$ie*`O7!D3*Ox7 zid-x1+pjgTzXMQ}iS134jSz!n49DA2Cc8pEhYB22zLEHC6#6!dlAUo?~nZ#q% zzK5InQ`v+cK!bi6?`w`!?iY^nC+69rwJCfmm%p`h`qy~OTuqLkabY~+_=|k}nZ_BZLSf_~^NDPW-~yxjU}G*O~&pq zd?=Jm%F4Dm>`$A~^{hHcdln9l2`*TPvRfjFy#%AMJ)O2QsA$hl2&sB&(%^tM&^%|@d`DLKY{IhBM zN$)4-1hKxj1i^P-YYv$L9+feVLc*sS_eH$$hxxGEwOvC#)5azMYRq-;e@xc%yQHof zFqG(K3N~5YGcoO*?F^M6lkC|H&Ux-wyn@<5`XLc`Ua(To)-7$-I32+H<=5ei;OKIJ zbuprwbHg69hGzGZX#)N!845=@zxxwvj;zQ?gvzut=rtU3_WBG5E1rfL7d|Q;t3g+~ z@8kV16seiArsSF+lphH}qdhEk)v@`NBsZtkz z<#fkiN)B*6!O?P?5i}%GW~iBz^Mxe4*b81N@_KG@4kNwDcu)&j-v@xz{()bZ{1s2t747~a;8tKTo;@D@aZS@J-@MQ42~inJZlmFogjpY{>CQ@?u{}RJMI^QYXs3BvwXH5Hu6RB ze`j^f)HM~EJGCOsd=_nxrAiUC;a|yyw6ja&4pjj|Ez+}JKm zaYw|j$g@v3T*h)os1MR+&o+k)h2Nw~3C^d-d_l9CSj}+8)U;lWhA2G9r+XM;H4(s# z^XWL6_P}q=HE#3-0f3y%K8l3jWdrQkMH695Rh9RCp6>Mz9MpGIV~LZ&L~}HodX!!1>-#+d-I(`!(9!GH zQPgQ;w!<+o3PK8WXeSg|&Btp_hY3(8+k>6;+m_pPbDOH&6Nsh>{>4`IdGcYPn$Z*o z6%~fSVDp}g&C~;krHSA_^$G#|yP;S69(P~>$l9dPG-adyN2s$Y;d`kABYmuRd?m)Q zX3dVVm|HZ}jtj=asep!(-)M!idLcMS!AP7%?6&>lSUD7V<-Rbmc#`<|4o@>kn-a8r z%J2-24z&H)SGGsz*%huBo|gmV3u<+lSx25p0tG-qUZbbv2<_ z#Gw2Zn+Aqv1u{Dq4 zt##QYO$=c2@3W{ax=P{Yb%>m9my#Ti79r<%jZ?s__38g@d{=7UaJA*nlq7b}&y$|F zw}WnQKZfbMV=h3@7Q+MZgJC^3hK#UPnXACZM8{MKJA5Cr7gi~|Yo8JQq`*HhPLi=! zbmaqE{Nn;##M`$c=>x*}5*HAF3C{@phHN&dNJQ;P$VCqeXgk1bA5+WzhF36`;+G@y`FK=dXqvY^rIBMY}$V z>*z%5QVsrt1J^grbkiGX?-;aRUqRr{i7#@V#_Rsm zaB5nIbz$FUOgW~zuAnQuoXGR}nt&;_aDHh>5I+sX!VAou9E|cqQ*@TLxH!@?$f4zy zw3p_2NXR9+1BJb)#pX~@?Sj*amo*Qe$p`$p`k4&s1UKHv8F67$phkm#0!sGycv)34 zS_mmHDKrpOB|l(Rpeu4&?4qSuYMXhmAV~aXBt#ifTU3g4rIp7Z;3UpHORT)g6WL?% z-M8c|>!|kph8*w?Syu5i_naG-xG)E?TNLt#>N;#ezwQNfS`hlj%gmrx3{Acw0js>oxQaZDhf$D z_b-$wiDu2UfjTz4BJwm6aQ)E!)Dq|LYUSj9-P@~2E~}qOp5N<<{`_osiIJt!Uhlj2 z`4CvxEA`hkb1_144!l)uXoyXf4Vd@G$y-AG#=wO9UFEBQRNh`&vBhy)^lg-lPdM6< zCYH9Al!+@f;NjeRV}FG7RB7SE&3Iu`oF_aSs9j*mkll=JvNMgXy(vXZcmmDks^H9a zw>1#vofTi*#oix2PMcC1xnb=qzraSnX9c~;F?zpU+sj=x`lhmgCrN-rk$02N19NX! z^XsJc1zkffZw4URqT0~nTA5cja{O= zXT;0gP=6DKW~SwP_nJPYBe9M``FMh@Nli;mD)?(rc5!5QebfwdaNoUiACc{V;SYW# zXo&j$0#nPzxG=MuD|}6C#dL}rGb_(=G7(?0Ee4p^#l;m=oe*}BqByHzXpnXR9z`RU z`-H?k)w*D{F{e#&C%zxf>B0`g!^t>Al?n-L^lHM2Xa0exG|7)$MhGZBi*BI}8EWKD-r#Nu%Qq>St=@jhOYgucen3)g>rn$y+-%bK3e_T4Xfgn^aS z$nLB4QGd(+!DxcX3Mv=N2x?`&IPK03Jah^wYli>>M;~hX^igO9)Aup6z;|4r=US!O z$hI&Ou2!QXaw@>z`ns4SIp%E7hDuf&w1_dE)1Ua8M6^3rGX{&TJ^6BdpF@gDB7J2} z&Za8#(jrL=-`^WiX0F!k1eTCr2I2~GY~_jC=Bl?VUP1R=Qcrem>=`3c@)FTj;Ug7$ zEu>>)O@`@jfq6q`x7L*t7C{qiG}fjo05apii!hI?>klIn98KSc8tnU4ObH1Q zW^C#rNHac@p!UhoFMKg5pINu%;zO?GJEeCm|%rQ<#d1x_XWKSjsxjW@7DVf^*kWbYWo+0@+0eJ{|c)(8pdS!PYye) zc#-M%7?Dr6e6rh}$;~@EzwZ+->vr#Z^-e@&pBy6FZp&#M&lXe8sAswWz>VAa$Gf%5 z0n|@fx9jNOWB0sT!N^G}gZ$h5jTvNscva-krXpUGH-n+htBed%{`zk`5oa6nXMQaP z!b6FQh6pII=}T9(1`0Nx)VlOAZQ;{zX`m3|s zugq6Ypzx`-ZW4<5fuTxJN=?jSG^U#F0rB*T$)sGqOVc2bcvlylI`rQZeB=CL*?FkWk%$Fa$7A$_=Myt-QblEnLmF6y=_G!<<` z;G&)2e)3A9Oj~~-QN8l=0sKn-)H|IPM+yV8xia%IkL`T8u~Wt8+37vhdRC{=A^=}p zUBclG)&?^Pl-ce3k%#3YkT}HGazy&Dr8J_^1hv~5Vt3wT?$p~0E3Em}OUg@ZW1z8n zY`nHlXW9^n`GZBSYro)cz5qF?{c0)`Fe$#8To2BEEJykCGISQaF+RG!=>PX0g5G8g& zWBJ@I8eC@jIFR?zLNwd7NL~6*3;3X*cNI5T+xs2U+sR_wllnpGYXv7HZx|sCC?ubh z$@G^yIC!UA8{e-KnXS>X=^L(Dj3=pDl<6vL-i(HG3iq)SF%1XZVt)DI4}N9Wrj};3 zy`hNRx!zX-gBSBrIe}h7aB=TKP3!1H3!cSAjH zOPbw@Cr}T;hx!Y~9yvz6r>@(DCH3w{fOaxBg~ltXcsf3e52Pi;Bwx*u;SZ+$;0HwW zb5P60)kf>=w>>-yA8e43*ALIj63OS9_1r$zv=K=5mrWyXU8KC`X!= zfWFd!#U6S}-pTV$3=4dK1e=J5+xAw{0E|fu_l%k$(7&Vr4rpAv<#a}&mwGeJv^qb` zmlb553D!kdzEC)71#Yq3P0gj1jCIJng&9^sggSq;`GB9Fq?2ClN6Y~eTDs4st}{9} zhE%_Qw#1u!7}RdqJ0evu6lhAv@M#K&XA>EZfjTQy0aDj07vQjAJFJaA z8f^9=k5{oR^dyQ^9M0s$ar?+<|hFMAhW0B+JJ zEZ<|tS!jf;ZYC=}F{LJFj1WWL%)DnLMEnojopD2lw+2VmSY?}VX<1ZHPH$ucv$p2f z+3$l25~R-ZtlFx=ULmli@3oZnb1{!7R`xefhFE%(1DrL4+pM^Joa+iG- zgXw?(eCf@|=93s7g3eWSdZSq2)x1Zs2X|ZJcTpeX0-{<}INm4H@>uEBAMetF7mNLY z3(_M}gh-H5Xk-d5o{HOtKJh6q=<4x2TI$-@q})WMuVQLqv(tMVEh&^$toF6uLXxeW zhu$3B8;Z4MxVnFyP?L*uX+&}~JsF#1g>L1%(!}UHsJvsjyajs)JP9VB8uqt>ALWAI zX04xiL-6bk_Y8&s_B&~n_PeFPny5+8i2#ph4 zq|YV^3L7s(?SDhd{1d_-aX9@c$}dULR_#5?+=tsRn+%gv${H|VSXovDDb!QDF(TJ{ zJfA6rC#c?xHP?!5@c6K(kzbpNS5O>|7b7fUnZ^iP>T!OnY3h;lZ3qrPd)eh8grwS5 z6Z=M!S6Mf`dFSYE9jEr#1y`>xJPMHZKnN3n5;_Y^uy`;}GxGiu1t=0!aAX@)c3^Bu zzb-8TM;m^EFlPR4s3b}>Eu8!EHTO`OT#@a3aa`G3nTc3zG=3V$oB}_0{&^&onJt(> z_mKf@ns*zyYCje~(}%{^(GxBW8qK#{Ut?t`*aq;sX&*`?6c${G_OIsPy}Z1Xeltkg zn&siwUK8!;h^5mYjv@Y9$6e2QVq%5M3t;ti_tEHJcv4%%?hss*?DRC95%HPvYPWkm z$bV+X-*l#|^y@si!Gb%hL=Sgg*JN)xN$J(z8#s1umyAI8D7(treI%{6@C2gJ;kggB z+(S@z>FOaZW$Lz(-+RAro6fiP)BFC-FBLcB)b%J!0$wWDYv;4}VF57geShkfO$&Xg zi~azk{WoBD6W8Xyig01+EvIvD$syg^&;R0be@tkCV&F~blNf*J{UE4w54bVq+o`23yaj>j7MMI|Q2a$=i?|1#Ane35vv&{0Zu294 z(b=8RYSZ_Qw$Sq_Dcn&rvHnC{0w_VgGpkmEoew(GGmy3jhKiSK**jGsq1x-h_D2+r z>YzFKBk@j1OLkpYOl9=ygRgvC{xXhU_kLL-(b!IbWe{Au);8Ukp zR53opAq$WlOz0W2*6I>{t`xh-2;e;OiP8jS0E^J)$I70I_KAtt_&ciH(#?;QGQ7}8Dr>0{N_itK|O11RMxz=w#Wil!CR(|gs-)ovStc&qd@ocX06F1#j@DlIM-8&EOujg5xOQ=h81KQS+6%&=S=DnXs} z2GR7!K5Dz9fo0fO*A$)acb96ppcEHFDs?xm?+G}U=WCx&8HPLL&HLizn_* z(_+UtLjI~`K4fcc>*5zuSmUzf*7Jq@qtZc=Q>z(mbN^$OWfzXlg6ztYOw$wwQQ_2t zkbJs-z>d#)?BnSjD%)S)4@mjD`+oUbY8b|q57GoxOivX@ONV!a1nv)DLDFMy@6C2~ zR|z6u=0PZqe9`j(J($C5p0Idt-46kOHCRzDemPA49KC>4#rz{e6>i; zb1oYi^-zwEs3lw8cfFDaQ_(!n!uj?PmS|0k8lT|S?aDoV1 zD3tO-k27w;C$3#6dda-=cj3>owF<#2$%rV$4TSGCHhNAd=tIll)C>ApFeXr{T+6%n zCvN|ma!eaS-dH0_HiT)!4$@n)Gma<4r=;7&Qb z))SC^X^&mX4rqsk4Af?V*#e8xU#vLwPT zKi|EN2dKy!+cgn3{z82p9u{wv{i54b2{nJxx!_5;koYR-|I+nbS}GXgb<7keMQB%6 zRu7F@P8nWuY{yX|;LL9mlxj>^_(oEj2&LNHJ6`(APsd*D$dUZQM_5*(?AD+^uw(ky z)9w)^g;q*@)zUg$9Ptj+zFDdIlUCH$5l}KE^r&quKlJ`| zXeZ}eL&>AH=rpOvUb?p1F`WXQ5^hWXn5lf~UaRcWIjn{8SsmT^BFhuOFC88C6?$!z zVoziW0{MNz?7yUq5)Ge77Ba{C@+%mObvf7n%b*_`Z7ZRi*N!Go_8|PrkN>E!1!);# z+bTMo4R8{F&&WbOFD}JxRHB;TG+vK?WM8$4UyY{!5sPu`20)cKfg#a+NJQ0d!)fF? zpU|HL0H{HTEO+__{9saGl_Lv^{QCrE-|s47-omac^BDJ6MF5kX0POcU*hv+>kz!0~ z90xEkSln`w?=(D47X=49&na>yYbKx#FnryuF7Mvgkjjad1I})P=cN9Tyhd-nCyL*E4fv$e1dSu!7PIxV4tap}ezoa+pBF`P|I7ydx`jf%eG7zG&eHh} z8A719Gx_oImW0NEAdT*~>~`ty{rEm*Fr3er_ zf}9m8zae}f#c*&yrGP_8_h%3FKgWdI?PmPZ#EwHSSQ|0Xvgu(Z*~)QZX33ZHbzSkJ z0_B@E8`x0IISBqE6?MT z4cD0+%nu&hE-ycJ0PzV|H{DQs-3@m%vnsl$BHFy4Z4JCn&{lzG#Cy%7kCh!`^car- z3TU~A^nmNvQ2fqw@h4!-((XhZb239}!r>zB7l$#gyaX3%p7F$}RM*wPlwqxrzcO#| z3m;?delLz#g->tTv{R+rW?M)6G61yHSj14N*Nf05Gcgu z(?|S0q@q&)^?atiRh{Gyf$pYmoBIjV-0Ji9ar;VJZ|6rc=5*v!{o1OnxgQ>N7pp*n z9UG@TVY`camaflGQ&h5C`)n*sF0pB(3}vK?*x9vTQ?YeiU|B1yZ5aANa9CNoqcqvB z4x=o+k&${~w2U{V-nKmt(*}^SB#n)mjX}N83oh~)Ihq>`BJ{^+3h&eEjW8(0Ag#y8 zh-w>(Ivp^viMfZFvoSk(Ux_DpwA3n&8G`9hx)#~Z4yH_@p{RfFjF=-yQ-4v$3a-s; zhwd|{lWNYc@8-nOSQ?s-m!0KQyFK$5bht&8CIh=<6!x@X?Ajs3bAFJ83R=d;FoULn zkv)R-D?X3YDU_$XM8MT%fr1{m8>8vrq3D=t18VYu0s7s+f1pTeIlW+Qe-&|-WzGmr zXk7D3-Y77m6TraSiCf7q5OURqi@xY()i;eV$f!H&;)>QwUi zB{ye{fv?J)qBU?|8Uov66ZLp{Dy}d8;frzlYv@FAqdM~6sd2M|>hgI)-f&!Sy^My< zc?hKC*Jv7<&L8L%xkN0ZPVPa8i81#(?gZ#IM0{7@n(6Y4Q~ojLQwM&@i1fOnJt7>|N2a0q@?V@E%e{5w5VkLfp-m6zFQyQ0lrDm5~NrZAaEM@YA%eloLb2Y`J>f06Kj1@I;xE2{Zb7&!)LM+H$ct78%V7i zQm7hZTsJIA+4x)G;Qw@Fay0vkvABKJfSO=C%U4;`>Gaelz_k~uZ2XAwz$>f;z3_U2 zkky2|;OdIxtOQ#2T=oT$o@bG=Whrbvh78N>4<^YMk4-M76e={i zIAs=x);Z&{2m8KBPPQ|}HZA4*rk!j^ zV`4LxVa3oh9ljMhM zRP=F2z#nFa25x*cXq6X@r&2#vB*4q(%vV_{T(RJYG?s2h=-t@BbR zBYpd#6lNxLgYDA&2%|t{F0HYvD3>I)xBX7-;&qrR%rapN+^yVB9QE&0&6SIWl2Ts> z(PikkwS-4iiI;<^bKU%>4bkM1TehvytPGcZg5A z;hrD=$;wwA1bIrn@DMf=Chzt5z0&Hw&<1%3>#Tu7Q}N!vlbX9ThlfnSmPb77u-9Rd z0XX!^T?5yJiwwRJ~elu?F_M*eg0-ap7C4{Ju}-Lwlacq%sk?@3|`JW1fEAU#1USsKKi zP?rit>#7t%)M9hk0KLGC3W6h(04NUCT{E2WF`BANz6+4Vj8ohzsiR*bRU4^q}VMmr;B7lNIey6w&!DffDo%0vFN_`SkwIvq>SL~wn zIB7yTHCpNDUj`JB3O9I?Fv%|u1`j-!K?v_k1@GU%kmlhqt(B%ly3GwSx$RnKg5LNhpYP$AtCi^ffheF6c=4=j|Q&(JVvtk<+ z!wO?W%0)9nla%G-%ao9H5HiO;mU74`pP0inQX({2VUt6VecK$94my|)-`afd|G(e+ zKF|AH_w(NOdp-A$#ncZ{R7{+*7h5F#FsO~qNIut1Y4ot2tT?tzXp!b;G`+KNh_7iC zHFEIv>adk-Rj~z74v(oFF@q~(PqnC=1*(p?EpeQy2Ish-tbQv@29iY(jNJD)tw*<+ zV?z{7&5w{rIqE9q)NmSGyOtz(3Bs*yh-+IMtGOU|WQL5NRL0Q@ET64FMA3QCZCE`y zMUtvxPmL$C!&Smlv@Fex0bjRWzj&hYr<72Pf?MbP^2u8HrVMazitnIy4KFx;8(5iM z=?{1uSfM5wRT6(jPIIEBw{jjU4T^@ko|miL6kd6w-K<@&KGkLR!Tm3Ql2-(88EWcn zXNW^*J)QHr_Zew82I+Re?37nlPE#ruCT*C?{X20@*5yINafeTKZ=uF`AqdTw`VEaL z-o;4KIXh=2ws_EA;+=p82kgo-M%1>4o8I+54I3)7fmj7s)g~Yx9KEtI?wP!5Sglf_ zmXTBh;Oh5*Uw%6sitJ)-8xYUG_Su&@a)Td8Yyv}7(wt1BDoW*|aUm3qU4Sw}e-!n+ivZziehqGknoRLPiM{F; zMiE6jnCoU3rq|rf{2p_1NA^{pM+atRC8tzLNE~ZV9wD5!I{Yp+FiH_~6I~#N>5JF4 z=GiJxMCSNTTvup?v5xh88iO%H{@PXt?;+7#8gT0_UXMDLy?_pcJJ{mqbmIP$d`Q8e zQ|IYn`}N#(NHV%UAE^}+zf<^0scuCAw>S8=J?f8D>yn;UCEDYPY@#}L9YI1oEnp~= zp}n=}llHQ)xYw*VtSq+TP=!e@$D$pPMF(t8MDn6fbxhka5;EO8QyLG{VMT|$m_fH^ z7U6mt3$67+xD71A2PN$$GWOL3x_wb8>U^^g4LF9GW=Gz2U^ zlUf=<54}4*q&2>sM6Y%{kMw@H2Qq%b|5>6hk6eR58wE>oBZG+^*C1OWa- z0$E6m&ai(`ip#Y{sHl1)Tkip7bLkuw=GTi9eTC}-;r#+=OmfaZ_|HOwj;?MLT6iOA z51Yax4iD+uNi(vh7#tEJN(q`k4yRx3b-EZbPCaiTB$?+1?Xb@dMiX~0a2pD0)?kOJ zG?}R2#Ia1zB==lD2uA@$zCU)d&g$BUsCe9 zf^T|-o71z^eG=e44oPc>us(Booqgb$Z$jv5R6Q}gS)%{_e))pk%isA=BCApEP@xQR z;n1TEO)Hb2aW1ln309ERe7sS@oCXShWrGcAvvr#sdBtN$*F#n)CR_UPuGt};_mRAHVICQcTEkM3n$to&r6XIpf@GM0MyLe&9CnE3PnwnLKY{= zLj?{ZIS#Tp8hc!?{OD0HcAlNkxh}gQixM<@f90z}-QF*%h{#q*L_JYZ!`~XS!(sOQ zjb08`3L;7JD`%(JVI`M97PAg@c~m8Xyz#sM-3|3fCz+7oQ^s(7jPJ=zcgfmNar_c; z4K~TgE64;!38hLv2&pTks86-@9jhdMpU7P^;hC>d=y z$wp&+sw-%~0m{z7P{s=IjF#3<9P4GrntDsj&lp4#;e~?z0mT@4kJpF zv-n}}SnhrQhg9w0lFL0aW__V!2FzQRq^&%Og2qlj8$zft-SGi>c8;>$77^aqIHNv5 z=a-3MqXp|b8JlIhXgYAy_^p1-Tg3eWec9eMpg^-HsgL_B(=AC0n=#DC1@*-V2?Eoc zUK6SKN5AE41^P{!f3JC0v}wTbckrf513C78mj>47U+2IN?JgvKtK@t|6ZRG zX%TUO522hTWksR(p&-x^u=^w03PC`KKqN&3RopYqHvClP)UzHxDCMR(F|0~N;t7;T z>ZgCx!JZ9+RM15z?*+0BuilFO+M4Qycj{zu>7>neX{>#_<28M#S6-cRhIAXE_nl+` z<1fZosiK=rQo_SVRQp}l7L%kKHY-6$yloSN1ZNJ-413CW1 z^uZ13^B*wEZEC`7(3;@vdXl{q2(YiZ@y?ZbrH2k6`umC)m^-)wXXW!pL|*?K^g9Tg z6>VVK{%S^?(BaRCYUiH{yIi%hz{y^Fo=lAjwfEDm_Xu|D#j9b`pPw->I4%R=dWCRt zagA+kBG;PioStq>ziqaUoUb({w`%;Gpm(Vs5J|Jek^FSA!uaWCKPV_D^mnkmeDe}3 zhZ($$PK)D@&Q1XW-$z4do{fxXVlNoe({%%Fv*4sq@1Um>Y!@y#N_S4;*e2eq(YyiIc=Yu}f z_3E{A3$`?JhN%CobikEM(Ts&X7vA@jTiMS6$V}A^`=jUr&qt(6%F39gqVdzy3Y~A~ zcDKOVU$2;F(?4EA7%~hF4nEFhzxQ)?UPrZgZnBzBt}ooe{Co6=Ms!*=`*vgm`LsbFj;((0N3h%X>N#n$ur^$Hh|kZ@81yCEHIDmIz(mK#$Jb({5Qe3pj0peJ zRPb#*venXFp%#8fF?HzTD({{i)+Q=mr*u|fW2`RFo+wHGsGkDQp&^96?t&rNGNuc^ zRQX=7`n>OE#3Ote1flyMB80px|5K+2lo>U2jM@D!Kc>C@z-4iAJ-t+K#y>11fEvjJQ4DaCFck@uoNWha?{K~^kUrz_txH|V#WwyO` zcYl64(B67pEDz3EZ*~Yw*7Jnwy54ox2k^d~YMynSrLEy^R+sAU|0y^mq{aJ*#jFk+q(bdfmxhCa0(cF)toNU;eC{cc?^ zy3hzAlAj`Ls`|ruH?z6`F9gh12g~3dfb&Lv_C?K<=bk2Q(WarB~k{5Uq0q4X}^zq>@mdk{7 z-}(WE*=)ZZ@Nf^9#mLl@e$1__i;JSZ{#21{Iu#w=UFR0CILgIx*JpK8wbo0Ontqm} z!8GBcwh8lzvBdNeih*C23`k8A)oy5{BsBM&{5-g@pQ|H$Me&YtRann>N1xo%j+XYW zOqBL?sWYSt3YVyBzy2VU#^IW% z?2p$&!G|zH&!b$;F7-{l5A$rF!PN%qz{$lFta#YS;5s zZbB#`X6D$<%}r6HZy5qUX<+eh)Lx z@-XX}LW&Q>fkGkx|Gl5AkI@q)x(8!bP*haw`NxXD=S~sWIH>OZvE&x}ZP2Ak-Tbn$ z2w=bHB@F)lW;_(_@VnbzQj;e~)b9!RiB8}O|funpsi9qYuM&tgVl}a)-uLs_-K0;Uf$vC-H zPzVm41HINs$s-J!>TMjOZWA#N`EkN;L(cbj=EiXnTh;TX$l!*#c-)RAYA=@?ZEii2 z8w(2q4c1E<9iHsJFUG^mqXrIfyRXhgw%R@70i~P)6kbPXJs`8Hp#lB-&5ffSQO|D3 zmsjGhmyJsb{)5><>4fBDr-SiSKuP|tJm;2od>|B3t?!#>Gy(spo$oC!2!Y?LgYWAu zq0{j!x$kwCujJ^*6T&+&aqPzfDW$P~WGo@!=wOLy`O9DjD~2h9a8*11dzpGw-VFGX z3JltOirt}R{*3*flLZeb4!|+xK>;qYSx{FOALPsP%+KXo(})Ma8@2re>=D@P$7H`J z%*hPy-3k>rY6XS9TLL^jcLi|HivVDkKp6DehoK0)zgk(*)YR1U4-SIfwrTx7J(x(J zUs{rfwQ>*k<$qXr`p(-8?CvK$pFgS`wyR{pg+z0DX|n!0feqw1`)po3;*)c$^HSNC zSem&a`0<1H>umL?o`I9Ob@S?+q~`cJT-3ehFCME}Ye(z|#%_`#mb=5_6NdRT^M~(< z&oXK@3=pFwSaD5UG!9pjppT-Y|-oA*ihc59o?#pQRV8lARFy_r2?@n(2J&-`kz72ofP#M8TS(#IA> zQR}wlTAxcH_gb6e)Oc+lFC>IrD4LugrQDeSp2V;|6K|GI#VN`)maZmg}h0H=tx(sU`f?P-M^`XLKY zV2ATn_D#TTfpmVnU4mU~Guw5&aRAD7{|*gj_bc){pfhrSS4w3y$6%}8*4%Ju&Qh9slNR>pfEcwMzfS?_&r3oo``7 z9U9KxWedyriNxIIweQPoUmn5M+L{{|UeJ$;bv&)e9LTR48p{FnFRjZxQSp@Wj<*N0 zc143K|1%6g$Vjo>fQ%*M)*Q&@Kw6S!2e<#ZT*k_Q@a}9Lm+<-d`J98(tMMj>lX>b( z%u*(>;?%{H{JYZHay3%0;!4tr&&GZ^B?fG$uNuQ6~F#qyfm2+24x(L{a1+o*4#i4OPbbcYe}o z#oM{C+WJbOgez_)gYgR*GbtLCjSWvY|ApF8f1%ua48$xsI-@-5D2Wc`cQ=~>u}<<2 zScCcTZhJK5PoR478a&)HhQjGdV@XsHjqe;<+FLn-9^d(KFa4*3twV|1M7({8M`C|F zMj9Mn7F3j`8Qv>r@Cgqn*_yo^$VF#Z@AlpCyJyGkMXbeuu6;7X;CyEuv(i&4Yy+R0lD5{P4)4;vjS%YIA6!i_gisq5#YB z-X!mjZC;SL?8|4<9ftc89RY2;lr@qDU5lZupdX6TIqj6R zwWR^?0s{l%`;al?;l{EpA z{#bI}0f7(c#CYW!)vCjg#HcTDK8YA&tQdd;GM3CZwC_04ZXAQb`ZtjQV=^^OnRv`y zN#jyH>dy%dp=`beQfn1(&dxo7$KQe9jrsXgaZqhHS`$}SS3Mv&OpNgh$@k}7^uHz% zkQe4;{2B$l+w@>6uUAh}QdqxwRmhZWu^u~ zj$&;3X5Ux^n1($RhnQ!&1r9W&&x73fAKB%%tgqm>53i9?n`<=|x?t_g_Sm;n79`(? z$r|k*)~kdu&2m46LCkR9{Sk&hI=~p5TO5Ku3G)wGp0F8WHxjp@1&JSrj*SzW@6M;9 zk@DLqtnqbMp9vuo{9s@X$y_rE!#XZFS>PaXB{Z1|z82@5aylFEJZk|QuhcCf<8;B$ z%*@hQ48DDb_ujTKk(($jQpXl~KEy zL?Nk@MQ&WywMOyec}BC#ova)9GWKc5Htr#HMTR!6*u5?$=$KHPcqVrwm+TF*yaof6 z3bk^ihS^u>JN0+d^Xom|SAyxNFWWId7_gJAzXxMx=YRT&JcTsshHQPD^BjdZ&Vijw zg6AT`!|Uo<>n4*?QZg9l`yEBT-@G9HmoUBs?AS8%K!8!Dzo(veCBgTriGhUuNTnAe zez46Of^F_>aK^b@N4mDLm4;6SXv*fSmoA6~4K1v!vu=qol^cWdeg{r*%g}US;zyjC zhP#;o&B45-6&AQt{WB=psnT+BuuCBg?d!jR;K#hV7n6(V>vTl7+VRCW1Ox9tE^YHT z=c5+Cci||3g`xg?e8iCcfr~WHi(gvw=*!iXeTnkeW-WemUo~tmyH-672z{#t0>drw zDkL3OJSpXZ;h_R$6`^Hcs-BEPE2OO%;Z{$0H*Q~89^&qG2=Bd`|U zUG6(iFxl&umBN$a(v=_<7e-C34^{~rgw<aNRhAkZ;-2O=;_D=Hvzs;j5dH+a_0o&H8dct|kFKz}eOME9Sj9O+LB zRlWlg>6}lS51pJ<-Ggg(UFWthAwPcPE?9M7rt^h|hquRfd4UoEIrNnt9Xdv`RH0DW zf)PhLkmldjV-_FMEJsHDSOf|s2Z!tWmq)~N_nk1xlFG{W6G)Cq{jSMhs%1s^Q#ZYE zjPAVi+1@uW(4aRay`00o$Ou0iz>Xr_qH zPa6>`kGZ}XV|;;-ErL^d+&qN&Mz>(oqfg$sBC4o3r>_b}0cS;FR>y6yk;;WO(07~5 zZ@$O_tCW?;yHBJ_?@>|_m5?kT8jKDf1yK^qoM0NSOC7JSC@dO|;xQe}vifH4B1N4T zWm=MGU|W^5=ZB_{#v-A+sp9c&#Ue#ZHA`EA{fmm8Hiu|c!p)M6$e3!3s7)y^r#4oK zAIn;bdSY^LaYI%Vlr0HE!ID0ws1UJZcJO!39xB?W_Z{-qUpES%aV$8GqzuVpyilN0 z2(ix0Oa{ct`NU{YX7LD;>1XFh;zj7nNs#ujyMpaOu5Dnw>?NQwp(l6^(@QHq`DvmN zP=Eb2&cTeA7XKF6wz)h3mSTe6(v~fPeqwOchp2V>lj&?#{2DF(N3iwW@JhT~5O{tO zVp`g%5+r^rRqCeyShZpMt~I-%t7z=mny^vAExyn4QekX4M6j8At*|bh9TpJ@ zZFUDUG80b_>8x@iF^_ebr7>$un`=(5%+R&jW{G-houLWbn~z!n{S(@^P>>KfQ-wZ3 zx^{K11O|0@&8M{)DzTb7*XfhCF$pB`vUIGdF;n$uNIG+m;$td1k+Uw;gsssW zAS3)blGuGoI`oFfD3RV0*21hqS`Du@Lwo{VE}CZ(QNo~TaVSE>AKijFje1sPUZK@ev&1up=`Av|(a|^4TQr^i>s% z>2hQ?ggh3Yj%c4tX&2fE_gHU>3#N#?yjfw;6XwYj7QO|wQx`$$BFnleW_cwhM1t?# zi)C5a3~6Zvtu6xJgt$Y0n@?y)N{gKdo%k#+>H)VOMXgc6636o+G7@~gyFY?}w!q@f zvttSm7k61z&DT7nI%@CYthjL-Is|fOM$+s#WM`OB4))vG8@QY_)DZ7k|DK7QglyB5 z-oBw6(r#8s?Q}fF<+eSNaM+4uq&%MtawR#1nmAd`+(3?V$LOhTW=edcd}}Y<=U=nP zyuL8H`6DDPhfL1AsAT9>D9C@NA1`s`59mIWcw8Gyu#-K(}`vU z0`OrnJ>f^CI5e`({hr2F(mNsN1Ie*EjBSvcyObSd-_aue~Q zuHWJhBZ3k$1A|c8W9I%+-TFVN_TNIu(V)QI%+Q!ikA!+=SNtWV_MqCDT6q5)DU7|{ zaYQA5p+Ll!y+h)Xkb?zks84VfPW1wBw}x_rp3!px`!GL8OqYUKNc8tIS3PCYGS4=) zztEA9P$!pf3bPSb4bdPG*)mYLD2d5q)o{1;6MANLj!7v!6y>Ed{MyDdJ#r+h$V`rP z=Jg3Cp^jO!H8C!SoX(BFGWu=L_@)-R0I5mv1GJE5Q8&B6>g>&j4Ox6g?BRA+(##X_ z!LYU$a>|SP;pTbkrAVxb^zbW;Wo&FoFNhe|nlqyHJjVTj92UoJ{nciT8r>@1eIXR| zLCLH;OI?U?1sIi)YaFX#N$AD}2zB^yipgvq;@pfSv1K}BacQ7jSz5g=56K#tjw>(X zRo0rRRbi_>1&w(&P_qbL<7aKJ5oDg<8+fdaW<>~RdCUL?{Rrd)`~2coR=>6~EVG6t zlPF^pm6dl9G-TLCRya!(RaD3*P&*?2LOUlXCjcS~pv%UJEGvHZtZqE}N)X(?Crd!5 zViJY`cv(6*leua{__FK6SK#B->7wi7o!ZU!qUEq%6Bp>;9A3U(Tw;_9jXQt)31lHq zJ~GK;KY9!doShS{uC6rcO?mOBLiP)#lg%<4nDw)AMv(j-g}cr>Gj-(EWOIgKZVzE> z00jAOfnNudRhOy|NK-chaLCJ7^Gyan+0%U}>kOH6z6Y@=c{9Ayu+~cE#<~W|Xgc!- zPc6kK=OMWV^(3)Kjo}`1Ad*O1k*(27z+?>arN$juu+y$qEQ2&KPq*Ben3Nx0;;b>p zi3@5XCwhDl$)1)_w4^SCz!sUZI@W-49fPiAiLQ4?$Okj4MzF8+gVwz{qnL8m;_>Kc z1zS20x}jvh&zE3aAA(%_TvTUUud3d+i89OY0P?r$fHiU*lwsZcf~K2+AzFn!M&(<2 zL_iAQe=QY+x77$hqWef*sSe?o}pWoT~V&S=*9ba z&lGR2G%0IQcF~Ej#%{c%?Akcwg!^EaDRkQ@%S4rcc->au{PNk??g#3~=JmI;crbWb z6$z0~_l9t_1)`j$djtLhJn((R+^_4FA8iSwGW^=#y(H%r4-lW}InEd$P*@lW<%6d{ zaj%l`mrvIOa;-J`Le=7{(^{TNdM{?;?sVq_dyVZ3+-;a0r&5$TkP~BV$ja zG&$f`;QORx)bzJP9g8D}@vjcD|p< z)7>yzLoixQ#OMx>t!#IOPphkz7gJO7EcFkIqCBp=VqO=OpZNHDEE z!b&VQ5^Rt-ihtx8Z($0W__y3kJX{nhyW9*7p6D5ynV34ewV>}&s7|NMt!K<I(eQ%Ly=RQ9Qpvz!Tgob%EIiGs-fXuph{JmK+W@iXM71TT*I(37VQh82RzA>c>c zvcSG8NyAe-uSVZy6~)*}b1Ft%kzWHXvlo|x!&ZI`0Fg|YFq6yV+4pI;vj@9^v-<9p zsy=0WXk=thN=Iii`x!N`hbRC?`Ut=~Nr{O+1@?5mTy{-0+OA=|;z=VpelhWsm723L>%#~*#hVdfujV%}5p{L-X~M5SC&Xy3NY+U}znVDodnEeazN(NSx{E4x z3A2*sl{(wx&ga?k0F<2Z%3cfl+{#}8;j>)@zq~PBHeB{(xl4z@4>+P-S|qlb#x{P- z%aOL!rS(lr+zp`1rO4P>$pQG~f^Bt6%#N+Unh!KNol+o&QlMIKucAlFq#Ww8d@Jf0 z{#!>18jtJX_`G1sfs;*1AZN6zpb>92P|F$9 zKRi!qL6dS$cX?zSMmZ4JP2l%JANc_l%KT?`Wk&FA8*yIqf*Yk9A{@){VgIpsik7UG z)PJU196my2f+;oMs1cOMec1e2j0kH)UUX}w6D+Ac11OLs7y^&VZE+wT?n4+TAMgd6fa8kB&Y znTZa{$s)(Ij9k4VBL#Kz>@JVv|8W5}m~+=XY>U0J4Et@O1r@iXKaRxeDNyfAqWT{b z1^o+@a&ayt%Gk8(VQCqcvJ35~Qk`KGsHO)8m{*sE!iwC(vm-Mzs0+glh0)-xIUo`1 zBYh==l?lTqz8L7sFfNGWT7<7nF?^(YC>Fner;I187 zPj!c13R(*f8%j{HWdFokkded?-Gq=owlUQIq?UQ07M&d*Hnw%l+1_uCL}xo>iMtqA z#|icHh^+O~?X$9@xgMHklAux!NH0fG{IZs;HfyuYHrYi4Ihulq1kulDA5JQg-IdgA zVI5(1ga+{#H_SRxR|f8Hy-T7tHgsK&1B?e9{2JO#-}dKAm4VJ-jmxQ;9qzA^l40It zF<;+KH@~`-GX(c>a|;Ooqsz?9)M;~PAZq4|ucy(Fay4#iy-^*DY-`%wE=I_BM zT+RdC&utr>M7PM@JCN+%{M%9*8d!iS0xc5H(YH;nOaAw3!Via@N!&=F6LnCTb$L3> zD(2|!p5phmBB1c}ZYX{|gBVPR_7GVR+&Qw85QA8|&z()ucW)$6t4lYNY z*l>V+wGGOL1IN;?tN&LJe7gtUC^+mW5XSw)ga2S9MyY)jiX;2BVlxM6Ek>_q1Dm+D zj!2W4X1^5bbu(_jwSEiM%z>$d#Z-cv0#{s&cK48~aD#6gg$hw5=DB zYM2~dUry$XgH98L5w-WSQcNNs8`W`1cL?>&j4a$f<(^u-Y+?gm(V$VX-)3bHvH~(uDo~92DJZTt3?3@o zG^lj)Y#YZ<7Y!DIA>?c;79on%O`kYDAFX^%Y5e`|WhI&Lr&2Dd`K9mu<(70bV(4t1 z(lS#jn7lK~{1YSsSlaJE98}T4*z)+}%k#pfB@uU>o5!nCCbknQCM1Kj16!mBNm4x) zhsLS??kB@MjzQ+R_9})I2&(4W#PYA$UGW`lsJWL^cFr1R-nMU1H-^fnZ{ZVsspt__ z!VE3f(@XRHPcWwHB`OvwIBd91%khPStFDo1w`Y^CZU{3j?Fi`%hm$lE1ahnuLsP^< z`*C%|SAOwxvxC^SHuD$SS#?d3gW7C>X4_`tbd~QP+AaK$o?*xA$=`ULFf4&61>sG2}0R{jlYi4dCH_8^9K$ zZ?0#{pPDIgeE?wSLhbqj$8OGOB8}bq`M6k67yTaSoU=InB?s{VD72Gr`;ZY7d-vOG zw&w}8@5|cO&Dz#`(&nZfFvvE3k|WSZ1Y*&)uAGI11#}`!isk#^CBQmpp*?(R9h;gu zTu|39tgD;jO>ZP&(T2ryD9pkA(SJKG;3SaHYIh zo50~`zS=*Yd8K*3+M|z1_@&CRY@3SsmLet5NBKsj02rL^CrN2Q-t~y^SX4Sy#fX^D zpn`g!V&-=}YD3m~)?u2vi&V@R^CL|~ElEvTHqjZDKn{M0wa(x8*{X^p&depcw)O0P zJ|`8Cubbu;H(;i-WGx>b2LHj~S&|}jI9o62HmflJ`|4kyf{YEBf5>~Bm}o&t+RYtM z*c4T1NPf+|>j9q+c9({BbmJ1w^IEx0u*%aHwj}bq)_K-@GMb0gmJdvbLO2LjuCR(Dn>vMg3) zG<@zfIV!K}=dVh+Fv-VX-UtqbIZ>#4Vm92c!o!=RthCnBERURH%V7eMKW?SAx2Lzn zv$>;HY{6-C(Zb*5w7e3?L|d^ONhY5<#E^vlWSLN4C<4Ja!IL*#0Zy=gs$Ox27nq_fY0C>&?D@Wt81(Z6jB!>Gsfat~#r5G2RgWkZ@N-{#P5#)h8Ki5o z=f;Ci^*laf$qikR%+Mc{qcW`FU5;lRk3BGEvwo;09nM9FzMWuupp|$PT>R@-IQI5X z+#abWCc=(D8TN6)X>2jM-$E>THU|fN*32f`H9$$t(=F;~B**7TkPxDvEYO3>NNU_N zf3H`^^^Y?J%TipFXXbs)jTFMQEIxI|>^q^m_fXL6(;q}lSy)TKFGJysGFmXUr+&)kA&2joX?I&})UlC35-NIK?ROmF@fBG&? z=#xON-B=A&dU_uF=vkg8rR18_x0SL4&)r~1(`#!HbH`+C&IfjnUwfjC)%a7R|IvKl zSfIr0>=*%rc_sjdAS=~*%llT=)5{C2G7Jyk3<3nWT~P!pnFsm3r($G`EVZ)z%fc9B}Nf%$|`e}N-Iz}WE37Hbj+EZX_!t5`UoA(PRjTb72L~Ikx z1my=_TJn8z6T;^VZCbTzsW&QWPnX|L6B=A$3E{3<3FHY$hu6uwV;xCNDoRO2pVT7h zRTCC{x)ZgVlauKy@@u;@eUWulL`}Vr_sKt^v=(nH1j$#{*5T?rU-EuA!@BWWSlJfd zSK}v>WKh}iJ^kQDrTEAQ2dlHhI_qP@ySQyL*S+{86-OFt%6iDlV}aYat0Y0Sz@y}L z7=z8*i6iRAXeJ8(?L6FP#FpE-k6mjNZ-IN%6GJT9XvJvP{k2UpK+iWj-kBy*BK zw{-4XVO2$-SvueQ`p)o1BKPLoLX4hHevw2L~NftHSkH41^J-2wr z(-DMRLKt{%)Sq@d|l?63fmW6XEj4yP6zNnl6;G3 z$2l)R3+QG_&mOdshZ-a&^0UKa% z*9s&kZWnO!SB5}d25^?*l9Cz?J_Sc$GCohVC8edE0IYz&1U(Nv5JWsKrrnr*fDKt~ zIY*I1uPr7a5&9s0^>Ca0foCr-$GxyM$8Xk&gNy6&@%BJTPr;P9P^QkfDn%Z#!ubX~ zIi^X_fYCLajZT+urqkghP(Ur_)N)`10A3U*scbgexl2?P!JFRSeSejenZH750nD+1 zfr0*kfsMjApO$gX>YC4HsLP)_1H`73-mGZ1+mLE$7eAcb1i^5liRm~D3bGH1{V^B#)@8Ib6p1Fq0r0e+00`!)o?ig-e&#Jk-$5epl~c+|Vlj*@Cdrp4}n!#~GR+g5hNFF|TisS{Y~|L+q;%Kqdwk z1bJx3nX5SXO9u44iO*giK!NF93eg{gY&JjU2NV?ub>&(OR*LcftvY*dW#unDqtSpi z54zVTH1hu?7PD{!d5v^r)zsMR1Xoux?d8P(E zi2qTD*T+P>R6iGSQdfTh*o|5Q9(%~GZEX%bn7&bNA$>h9PKWM$vEuH({h=^k9|Jzq zZ83($#C!(W(RNqGU*3)@lLSqy|05%Je%ojr0LbT*4XO}=&~yd{U32>S5pCc zv3HN$<6rqVHzxy;1hDS3x&o?~`Sj7Yx3`UK9sVO6OKQ?#*r~NaTR``Y7wICo07C$H zlDRo0(wA2zp3HBPWrhDg$~uX%aPuKRBmSEQM2eA)CD9uLn^E$Hz=gomz3t4?;E$=I8GmEE8o<eaF1%>bOmnfn=MR9FH@$vm2C(Qr-6C#bK?N|C z>9h~zYB2>74XHOy<3k|Pm#>vSF&{na0fua%m5r3 zki)P>L!-@=9(dwvjak)Z^QX$_otNDf`TwTEh^MsQZUYV1KoD(K_bvp*C6FtQpAvY9 z1h{59_FVQYI)Fjpe+SSy!XH9{02lzAcrZL)ZGh9W5Y&KIhF}vh5dJrmhE1nG!jv7L zgBR%fBi+G7-l}0a!TFM(_s4xB*e~Mc)P7n=N-{m`@g09 zyLX0-6p-via2D?GYU60_DVA%G!4SSQ|5)JxSAUo}I3VW=G@(2`cNr5M@O=&-I?1Y@ zFAApshqaeMUr%wR8dd2UvXfc$8x08W4*56p6+5S{xE*jz<8yWwwp~Be!pNQNZ@+8N z?wS+jOyx(<&o1&BQmhN4U0%i5Ni4ed#-q7Zon2rMY-VY0#;xc7H@OR{1t{Ovd1sR_ zbYA>%-Fd{d+6qx1TUBRz<(Xx>XfHfhQT+{EQk7`)>## zdkWXbB3zt{wUO7}!|>)}W9}N|b2w#kP?Xo$Z0o}jkGyAoL`jPc5{i=cNB+FhNC;e(| z2z|ikQy{Ldlg-!PGEu|B!=m&}|80b~E(B|A*X35W&pkHL>QV1&SSzJ>YGh=j40kM5 zH%p36tY{iQ1OK1UbWx?jz7ZU))yse$T)3c~<{w^~qH;c-Ik-tt{3nyXaE~(vnCt}= z^ey>G)>R>!Y^DxHp2L6ZR$X6oDiRKikOGX*X#;9%D}xix;`_6t`@es%PnK=E$vHBC zjgXw@-`3;g;NUP_4Bk>eQ`Cg`r;gj3#DJJe$S(5D$+G3yW1wfu!sJPomr3_1?10^0WGDQxBxrlw8{{LC*DT;W*@NN58k?4; zeniv%r*@Ah(DR6d=`~RV-z$RQsncdJxY2|-l#~;M>XmTp>}aShGM0a5WMi+@_3Abm zxX6FT@NXi3EQyE*!-X4GOBQ!le5rq_$1ELbTgx^IWKUS6qAsQS%0Ixc*vdj<1EG>2 z;?-oTqKWOqb;R>XMSCG7SWL0cPQr`FwMlQXfRYUHy@!V7&J^pw0ovM&9D;g=iX28# zDLzJ5YS-X*t6};0*Ep7v8rgPnv+9}zn+_amGH}UK%KQSz{7b_~kQG^*ioO2?Bh?(ljV&65kdh@g9uM{5Tn+p*UW zWIm8h3f2tL4YPlkSc9Q!4MZw+Owy4 zpIQ~JA9P~*PJ{PnN_Uu@n@Oz~FugWUvIIOYur`G?bbXmw6s3~XmL}6)`kaL`#03N` z{fVnxcZgdtwWn8l2Apa!pHYUkeC#70vS4p7c6sl>gt0xT14omjZtgD1PEUeBf5Cqe zScNYX2?>f(FwE#&4u&1o%A+nD7Umvc2xK>%R}d1NQF+2sjz?fmFnKh=-%Fff$?dYS zr6y%0#wkP#D2Sv0cB8A<O@hc+x(@B0Z>w!V9+h&)ejZRki~qRqUibdUrDn_TTaYe@bKrgg#O%%ai%2A|}Xsc@~Sq!dXdQLKD8 zOi^)jzuT1OlL0G_-F$9NHHV6V((70DmZ5#gFPc(pi(%qNH8C{%n zzdUV78k$Hnc0?mwW`$h4LrM?RanlkajZ*5eUNr^(DQHx`MvT$1C=>MrW>uk~1e+){ z)8y$)g&#c>q}tk4xxwvcuzXu!j&9i49xf7vQCPN!cV~=qbM@y zUKqx3ES3ayTuDpL%X4Lz7ja@yb&YRiVr)MRYxU=mgB`mNJ;5C@9Kl$ZTXoRl2G;B? z8t$J{Zmr2#JrbjVld@1$H>e-cXrm3SEX#XInqp|gkgAntqEMN!O^mHLPmpr)we1Rm zB{fEb`e?Lyx$5W<$9_HU)O5QvuF1F9_o z%De*X-5wVPVE#BvI@zqxCM#*i1!%=7m?tY0Bs1ec`P^UaB}iV-zA1eNXPx30shjG5 z+2~@C8e-*0@C+Xfcb{Js+U-Y@^Wi>!dt*yFDyFMj*OT*bgWcc9RxWD8E*mY0V94jQ z`{f;7x`I3blv4Q3^cwuQK<-zP{C1<)2I-s1tb1RwB#1az+i3}>5j2BfFg#tJQY*Ir_ z(5x6c`94HE3!fHjLQs$hin<7d;e-CgDwc~sr0jdo`mReh6v95|x3c<@;7>o)^9NNXORF}9oi;k=#>RseOmBa<& zL=~oeXjE%_Q*;hk8mSZ>K9&Z-eB?aTD|&`!@I35(E-Em!NHOdNz00MMq`MWgJ#zP^ zkwsUUaHUW+T1A>fD&^6qZq8MLa`YK8kuB=TL+O!Qh6x=#qTGaVKHPl=t32f!D(bS_ z0j>u|ddUj$Ds_28I?Yq6;mYr|40O@VgJRF6(w4ggsfeYXQklR7HAuo+zTC-)GuwwNFHa?CTR27Tx5kw5FywFBJTqUD-l%V4n_YfHY10#s7`1D&qz=!zW`{|- zc4NH0_$Ac(;1|*m$=tZ3=-Tcab6FzoagM}zof$?=Ft-SXdv3b2rz$wo)}dlT1=hJq zyI6TR({SqHJm;qIC699VY0JT0`?#G&j+1~$wA86^e@rsvG5ptvaEtO=nU%>=lx zda*oHTLy;f{H+A=E&;-aPzUXIe*oabU;XS3Hi zkVLo$@S9o2xd*D7*Eb_9qeo~rQZyx&%(HKB?B|ItZU@^$kBOQlWytw6I64(Ehe}>H z^~q*2>X@>LxtZaSFm}!BLa|yTYjzL`*ST!9&6EF3DOshSfL+a7Ja}yITNj6kb3ohq z;TNO+;02y^j2Lv`ro@mNsi8oe?+c+lCukWbOsI0V@qv_O~nuO;`DxQN%?wo zy&fJZ>qx2^=A(x~r^8p4+9sPiW+#sxt@tP{eW84Fi|{i&N9ozqH_)$fN-%-v_0HNg zE9LMw2r>jUM)@^V{s=II@oF+N=H~f?<+iK2!jV4lw(DVAfCgKeSf7-vu=1_ns=>&DF28k?(MuZzxZaLZ#}VJc4~gFAOZU`YFeT^%2f0k{iVHe!sh~g&HcTB@T+OK7xgR( z7_XjJ{9&Dm)eQ>OTOXG3B}R_U0mIm6#!`!KTjB;%vmN7@J3_?NDe@8f%_d@AyEUBt z?gS{UDYX=tC3YN7I?esbj7CpuNs;UxiM7&FcQ=|6&H_v=8d9S{#xy}8} zwL0?ij^exe4n@ut1g|-=`KLl2PDBT*s)UsC(DS?$aBw=2S0)05PsDT{kYvsu(@17w ztj|4{VOv$evCwhWDZZ%(j|0K+2i7gZF|lm%^bcB6Qq0HA$wMx$CztoLB`Iop+14MN zw+&6=SlHCt+fyIvowI1gE>84`iHU4%mlU(Fww({fm+20-ymp)I9)DrhrZt!JlZf|* zwU@^S{TEyuqqP!R1XRmq!4Y!->ArQY>uZ;QKi=Aj_2EN6qY&`K^U7_j zabaj=B(QFos`7DaC-x!&1YN&k>=vg*+8xiza-v-Ov;v9#c(i!m9tc4*ca5Wwn_`RK zKAYW&neN;#0^W-O!NNg7L5ypY1q+vp>a4PME{+>L@X9#pgm*vMgfFkI=x33(cXpuR zDE^e?^t%`hu%HP^)1?wRv~uCe#nBvmO8x1S@^okR)Wb-7wK%;(r#>E=Vul4>H+@}* z%q`9N+n!?@<-SnwoKc_N&d2$01y*{73r=w>MOGLWf;(h8E2id1#_4O|DdG#g$=37? z3=HI}*jJ+M!%`t#<8SPq?P%oU@>`{#&u7{0`HXcW8A+huT21dDZp~?5dL7KIBYyYe zL5R~-UX$~pavy|6|68wQ#RkLC!Lw<@TTSN`W&`h*B}F_7MLl=Vlr=cx?twdglC3GW zkG*Jr)9w!6kO}D~e^snhnCrDxagVoQ2^vVIeM9)GoJU$(dT1PwvCs3J)GFVe{kcck zAoF*GF0>t~88&Mr<_{yyF70hME_@0YJe>OOzsd#F5HLn(t&D$~$sdf+LbD%TcF~6& zIOBSbV_iXn*8GHyr;WndIx4JNqPBi<$W83emSWo-G-duTC~`vg(+{*u-}He4g8$d@cto%AcN|W@d>!J?94Ihj8tfe|5sf-w63R;?uCal{JLQ zILvxEy$AfG(Q7Bcn()n}IDvu-f(Ns$8LUq?szuFhnW4xw#rn6Zm=4UI(^D;;hP ze1EW!V(h5ZmilH_N@VcPR;8{AQ>$I4#?JamEKL^<<`Y*U`{1>In$iaGaOV?=CAOmC5_*-Pn>N3w zKRA}j?_2HGP!q4e5|+=?vQXWe(s0h(@*qukaFidEVY5(kq@$${?as!|!Lx+do_7ia z9*-xDo{uM3g1WhLyg1gTBfA-WY3?{AL^+k#LX!I5{ehvvi_=v=yw~0o2qSi@g2w4b4F-IGz&eH%X(%*+Dm2=^nz;dkyTiqq>$P zYz}L7seC-s`b+ZW%*w*E$_f^9^;an31WUa31w|!MnI7pLWf8NBZaS&%O2~dM%G;t| zILugsPA-_(p%v-DpvX@OOQBaA)R1ZCHd9bRu+SD&j`pD^2hmkGwInZMtXjv_Q1`9v z1zm&fnk4t@Z`PKp4M7&S%5HA2pknF~ak++Zscz2+Ufkt<_57+E3Z`eK-ll*up-mZ0SIF4qY>`tZNfk{g%{t88oh}GQoPpaBfE+}ZX&|(2<~Hax zzoMoqX zfhg7U?bUlMVmBA8rS>2qGwB!nSotuAXCujHZ>(TJ50RjGe^SU|V{4mdttOU0F}aw( zPf1Gk8mAoFtEH7vM?uMnUsYXbR-PJNvLs^Q;NaB1LjU+U0&}rF;^uC@M%ET>BL$at zRe~8|QdaGw2yOu{wxTx6?1H?r1X`@@4+nllCFeI`cQ&mipvn5uASIp;R2%FY4&PX8 z!}9aU@gipm>v+Q?^CspbY>r6>j8AOV)MQ-Bm^WH%pSYX-k&Jk4Z7CPQSh=w}#Jr7F z#+vli`70oo>+k$mjZcSKKNFLaWmbDss`(}*RL1Xg(CTbT^IDsO9hBgdMwb>AR<{VL zUZ5o1QNJ4@R;kXd-=nrYaW@!qrF?Wo>S%t|m8-PxgOybe$mMpT-kL9Gbjh;hB)_~p z?UcN@BtzR9O*RHkIyrxvU8LM*wJs_%_*0A5!~W4xUQvDKdj73ad8I8%nvP(}!m6^$ z+P<3`@2K0`m1B!VMQb0F)|M}z>+8w6=BV2OTe{_{jIm6}UHdfCE?6vl_cfmBCH>pakPU9~fx&7y{k zggV*@6dV8GWuKUn^L7taVi}!@!lPSVky3du+jQK_ zsmA9LTHHUj%PX;`FE@VH&aKbeIRmFGA9B-l>6u0^GFb(^-$NV0#j4R%l2WKjvea-&hacim7Eh+@j$LYP zbfsk+X#v&13f{#{G@FVb*4=i_Ln=N z8a$BtzF4wQEG|@+`?_e`@GU_t%5G7SOq}68jZ5rl#Uzo*rbw&B!h+HdQ7<~=X(g=WZaU67M1|9V9^$# z)`N%{_UYxn4oO`85~c1ZM>72n?`;;VBk*-B!-Elbf)M9yX=m#q^mWZRwoRJCGFo*@ zPHYP4(q)*{%M$PDRq6hOZnH__5c;6v?l8GVAeYJYl!Sj?{uWX-zN6zuAM zvre7=*qT^g|BetKk=GDJmK8QNi%UydZse@XvgBr4#JD`Lo60h0Mx2wBMAj)|a*O|o zC0(Vi(4S?gQpuTQr6$nURo^BoI82hSnz}8qVU|TwNVj;UE}mRkSN64a!-U%OaG`9d zsteZ%&ywZRD*NIvbzPWkc_*CEVW?#846pm-OiP3<#^Az&hJwPQg+N22E&XCciUotD z0fVBgGl6S)s|68;&wI4G)74*v^Sp;Wp4&kr^NZ8sN2g?S2+C2swYBxp6}yYomUemE z^-}Yb4S{uzpZ|J{axHIn{`z{H9FU_SM5wQqK^SA?2*!iD^uVGKh8J~P&0*DR>MAL9 zrlT7e8XphI596$`KUaNum*4ueJNRV~8P-N1c|lA@b0V96osw=^MfsA-Dd~R_{f`L# zM+@TquN1*-{%7cv?Myl0Wgd!w5Z%H#5VXA4EXJ7vQV)5>Q zC7j5ux&L3b-g4w~mA1CI++9z;$Up|#k7yn7BWufDK)Lu#PAXT_f z+Nt=W+}<}LHnzH3thR0zW=Kewog0>&lauoz_+l`2H+0e#QP*YRm;^F;WL-_(9#8}RwMo#~RmiQN`oGGRZfD{32)TY{nvqVIva0euc z7<`|Z^9g~|%*ZxP|Jqlhq!GtBPr-IYLmwXy!{L7sv_&b|-~d84{--(njt&H8FKwo`-6wy zb;=^~B}Vb^Q@WPkGiQqq^V3;9sW{iY_lCs za?;54hx6r~i}P=~;~#{=+|8x^8qkvQiJRG1r3-6{#u4CBbjwT*Yrg!G`BYTvB$1*z z0tOi{opF6=MxQoWe|))VVU_KloHI%e*Zpn$`#rj153c39rO{uLEy({^wW1c7|8>JZ z?fkE_tKW5F8B=()&yEcgP)lc{{oN#}ROKk7y3STTwH^vHNGoNA6sQ^F*z= z`D;brV5WZu#~r)5X{T8IoOQvmHaRee^UJbbP`su8SA3NCWjv!q6r+ygvJzbQSm8%0 zr>@u%p>Vj67qfD4M=?2agDUoWCCBq z)>rZ5^lLF(=TL*5TA?3HpG)(cTd#~K1OpKPqH@`70Z+Nj+Z=sE;sA+ubGfoR6;%8u z>h2yDC&!aF=ACI;Lcp7)wEHDM=C3C&2ih30D+v(nf>wKUEZRr1w$L3DgKqA_13cIg zzKQ+#n^t+84;(j#Jdm}Q&DS~Z3k46uH zk)y*B1CP9%ofS3*1s!WBo{aH36`k4dV{$?l`21Gb|5}rKBPMlVzxD zd6A&qhtfHBcRFH;UVQi&Ug+hVUd!f} z2q619+#;i`i|+flbnm&O*?if@<@5QXUf%I(-`x_jinUu&Z0_req^SZ8GXag$)9YVk z>GSy&{MOr>v#r535C4w)`Z7-wfgFEomorcQ8$XZzFjw5Q9`y@l9tV)r4FC;G+Y?ZK zq`)AXzT4N~`!vm`%vE)7Ih!<2vp%dJ-;>vOWMCeSr+g=;%a;!gF42fe8`^D)(Qd z#tBB}+S@+DMGY~sWXYku8hT>km3`;Jk`q7;!st{!Cm*T6X3Nb77jJybRB zk!k@qg+Z>scHr%9ZE%2|>U+;?<_CJ@IE^c$7l27mnvld6soa_mC4b>eiBO`6EZ27 zNmE?94)MVbbmfXi!bz&HlwUHu_e}+Ef!jtu17JoN*ofEf&s-d`&Cy&Z4Nv87GF0m> zjfERJXc+f(rCg*KPaya3(d|wFx572YI_Dq-i_P$s5@YM-Z}>Rwz2IqG+rQW%ur-Cz zvxA})LXv9Tnfh;$s!Royw!ZbJ z1NYdw4L9&{2!Z*B;w_&m6}QV2g4E{BRs7mGkH9>(vx2=DOV(=)c10|yh-CFEaq7u@ z!^&}2pUh-*mM!oA1-%eZy38F0`K?x+AN84G;>|M>&fn7*vI5#!F=qmq@XfK73$1@z84>=l`D2U^v>wu0LW|HL`( zY>7VftaYmk;@pUMH?_9#{Y?VTf;~jv>j*@1PxrQ>v@4A&v+IET0#jyf{cdoy-IwPh zMpgtb4D2Je>@qunIpVLci?!MVdY&Ko9hsC}OcD7y+GW_2q6p%%#>|+qE>tr)WQ)qrklhIT0IA2zI|SgzYL#oWB5Le zmN9d+;Twhe=aIGDT>5!ELO-dXdgu!N#gfR$z`&Y9s)zegO$W6-5KhnkxLf**nAp?P zm0me8eE&ZAK-4Q$AxwT1@+(GNA9rv0h^+>=R&U)>_8@8nAUykoyVutUN!1Jn>`v0p zM!L!gc3VR(tVtD^_rSob$!Z7dP`*rvwcm25zYK3>QRvKx0Sh8Fa|MOkEdxL=Wfk7a zD_(4@`s-F<-8=;%a1veVL#QOh>ptU&6?h?c@uS;~Pmx_dfh@dV<;r;n3`Yo^K<<;^ zKblBl7pV1&1ZL@Ai44+t;J4a#0y&z`%0@^V{Uh299nn88Cu2nXtN3kcHuJOoWVjmK6c_3&V5j(L| ziX_kece3g94QHkXj{`9Gof)$t#tvc-&?VC;y4s1~i26UeD`1NsdSGt>(Fc>POlIVfD|&@*FAOpD^WhdZUFo&a(tAx1eSQ^fKfMa!Cjsd4 zd^!*s+^`2mWf^F7T@J+5n272LR)vfausPA5SbSkSUY7}BYKEu-@|dipo=AQ7LX7-O zb>R6Wo9YmiGYWvAWq9}Cc_{|1_x$BR*~us5U59TSSK=I~w^`<2si>~btTir4EpgbY1dFrbS?MtqdDBfKFY>!zwKBNqw-r%<_q_{t8Zy9DS_quhFpG`efalk$yY$IwXFZ z63_oDHaTmnk3vt=)M}l3?=i>0@gSc=l~Z8uUSDfPc4b%oy+ki2Y3|uhy0&2~q?O^;d*@!LwBiFd*_g_o}Z?0n>e6A;mhRFk_ez zv3dKJ)rbiqh8y%KKhY%>0_3{vcx%_%y~+uo z+-#@k_q=;-b72~z&e8&IJ+@Oypklmx_qk=H`UmZG6tmn}f~IO5kN?m+2P`Vj87rw(s=B!MKro6S!ho`;xWL0NlmRJd#J#_*vI=nB5?Z_oCi zO--Qe=N$x?%Qbzu?x0##EOHWaYT0*E@b{XP*dFd1`+~RISc_L!^4R5|%4Z67$jriE zb+0x8N``!H2;5+beig?ZmmlapOG!shXH9Xdg*h=h_u~w2g}hExXm~%Z-u?Kv)Aqiu zM!8Z&HbHg%h0)J}j&XuYpC8xNIBg(#`f8pSY$Vi)xux;+!;lhvU+7AqluLiTQd@I! zMK2P*jZZ3C>0m>9Rrpj7f9sOLuEQDAY*Md-3jInorUJQl+~VQnq1i%@{N)?n1#fP3 zMXnY1?bn*v-vOw~#P%l2Mu@>OhU0B1lU<>oLj?{h-$?v63Vjn1wxDdc!-Tu0h5y}_m-0n5fsxvKIFq-F0iC4ejh?oQbArB&PJu?Ynuq3Gu)O1 z5?PC0a9GI?;zQ}Q7IzA5Gxwb4nqu-&pIzFGpH0i|`v&7@OR1dNS&V}NCHX|}O3ljg3@jki{S_#Zxyn%`>Pn2DKdxA%u7-%OBb^*qk0OyV(W z-^0!Pscgazph3Tk_ccc<_Y24P6Z34*+7v#O%ir2L{cAjCt|muNa&nXQx587T7AihV zus@#)r)o#Ag@ahT$Q~4G`GIlH&+mqlqi5CMsAO!&Uog#zR$1k;YBqQfeXJE7`{1w3 zS#Mpan$wEcaQw{yy0a0e^r3i(x8>NratmYA<`x)O(>S}oJ7H9uF8g^9zw?sCKp;K4 z3``_i$0|^+C*`sNhe|3U{e=INF6Qt=ksO|U)vfk50@QX+IyjbGeHm7yl_pGlI?SUX zraHbLv3KTC8sP71XncF5VDXd@YzZ5Vxl!;|OG9u+-hH*_QR~wKd~};rz@gB*RUTxt zTzL9Mpaole!~Qav1P!`l>_tD)YL1lSs-Tv%!{&i;rt^IVZ93d=?=0RCyVlp(LEsE{ z+);;*1phlj><^m;)Q9)-EGv1GszB}jvoX2wF`#|VKmT)HcAP9tvl$@U#uCr@CS&&) zJ`_qOWo6qO_9s|S=NET%#VbHjPz(45&Ko3TE;dnewTcLChPp_f&bh`q@9`7XdJEKx z2p;W3%xqYC1E+ki1*1=PE>?W)E8XdJWc!{4!8x{@Jws zr1ukZf>>W%g5bNaHHSmVv`yyWW!+hB7+O8p=X=4)rHRd|_KPKz>T~b#K z7)o?A1)HqynV9y@c81E3N%m|8=R9{TUO{aj{g4PeFIXvP>z1}^oDN|9^6PL$aCEuA zx){;TxnYl4L$mwIGy(sV422_{-~9e&2BlABZboA=&;zC{YsfMA`wrL$ghFv)Ropw?UF zr!>@#f@47d)D_s@H7&IfP@Vcpr+-SO@e#v7nIhCTN-bb`1smBZ?V za=PO$B?q{k;Apwc2pW;*3sc|Er{hml@nJg9}N?*qW_t%~I^(@r#iMF95p7U!G-rQc@@uv`U}X^Jx9+X1RWZi~In$?Au8Y-P zLrYWO#n#h6`1F(dp5NFt21k((o;3-8P7p#xf8&z{_ePnE9rugDHG=4mSw3428~LL6 zzq2}K>Y9qoom!D*K8rTUQl*I6@ULV;+S#RXhpK>~7U|i_O5*47y?wFs(K8okJbqB( zxFh0M)Banh4;= z`E(pjd*HX`8aH}^06@-WA4S6NvH^DNaq5l@Z#_MM_&Vy}x$v}IHUsl8p{ngJN%o8k zRBcUGpP)N)7$SoTYeL;^uGc`Gz#zA6e2~k}HBZr6o;ELE% zU}A6%aNvnu=gj}cqKUAjs>*vmPxtx<4(dCqvBb$>qB)vPJ<6{1_5B`!Zp?c<=;-z9 zDC)E^+u@iP1tA4Gv=fS~=HoS|!vv_4?ZM9aZOiStxlPsX2}IKb|6(iqJozwC&1i~) ziV8zuuz63$X6gaN(nRo|dWC@f-O#Ijk2^2`WNlJtnzB**Dz7peD zvu4Lw%q^N~#|7izR6s+?Z?wW$y$~FvU?k2WcH4e&tQ?BGa$gu&JV|_fho>2&O$pjQ zWq5{12ipE_ruP}0*?iT(0)r~f^=+9o)gvZ$*VNoL%e~{b?Za>12sX)o@9DSJx|&cd zVo-jIO@l_7%H1(AN@Pr(c2^&gvk)0d8Z@d@qOaS7Ww9vQVzaVm)&x0@ixI-0OIdD?fNy#?m_b7p5#{76X1*qTT2 z*1GJHCI&G1_gT~yU8V5yIz&#lOGyq$i;(lX#wp;|`t<)czALqFxZ3h(N)kKg=Sk1o z+d((DAH($BF&7|ci{SzI!LS}1Lq^!D%vIoHqGPIr9lnp*3#*jfwak^GdX z0vYl868fFDPcvH*Tc>_f6)#3KTjj&9@#Te)ig?SYNV+YFO4Zn!(kD&=ygLE%5nELy z$N((!nxB$^r&iFZ`)<#g>SZs`B>!Bqsl@@wlOX!L#}=n2ZU6Roh61@nq#n7+-6qAo z!Y8iAZ6*oxH5!j&M&!3T?&n6%UA-fXSwWx;RWVS4o3N*DLPAATpZ~cWD&=t8XcF|HSwaq+O5F~yx5~2*LEh}> z?pyMfbyRzPLk@U{EUWmMd(MqZT$qE{EeiQVbse^#U-yDKEeQSNWoFPThNk4pF265D z%3OP+M!V$|@(;%n-!%DZj9jTv+oje@psU9&RgGxjVhj97IS6zakTfcF25jVaa!AbW zfChWx-24;$xyG1Ty3pCKo3Y8g`cVf6?qerj@YwVciquX)$@9X)8kO=ZseZ1|*??P2 zWQ4CUIh)ww;R=X&w1z{yxE0>%7-a^MA10yf4eow4FKph)i5qTO3xPEAVYKe1rwQ_R5?(Nkhm(|ZC&+qj_e||Q+#K=-i zd%WfvIIHC;j;5#nRbFETs zWLuaCSF6zxIThe-6bJg1wub}%bsVBQO_KXoJd5LJN@R5qW z7Sb`YCd2f%z`UWeTkFaRi=YWM8f()P0GV;%MVQCc^@ouOj;8O!@o{&$`W@gdjpozgoaDXS{nGMYF}pca;t6g-!_jrk&xckBI#dL9sIwfzf0`H^+Ae}z>X4P!F=Cx@L? zyvX!>jL4^3KH2TgqC2KvxF8Ed+?-qENr~W$}HV5yTdF|E|T?E4HOkSKv`c7_T+XYYxDBZYz4T$y>9$96v4*s0?4?DQULJ*(4b5r8kQ zF5z$oYlE2t%Ix<2$iwmxNF3s8IU;@7QX0`{g4%5ju{&=vck1nh71n&~CFLcyG0@mO zHeTDOGi`{({J|pEwO{ZzUx1uca;UnCDacFckr8I#6_iyQxiBsIKOf&q)Aw474F?{X z8AhZ+r3vAiK9%v#Tnq|N6V6XnTQriV)H*(~^ON}i zty-{9inN1(_1wqv&C>Bn^0GaVKeVV>VzRfHb`_F`o+_9890kx7?d6ebbtL3Lh!VS? zv3%|p4K6c%9LW1rii>xbv%@}Yx4S{j51W8#pttG!3*3~BrFssrIM!tiVz zUDk&8od@}$o|CtinIW_1i??)%k-vS-r(WY%H5?d!6BOj1x?n*^ga6U!fOZRP;&9c` zmG0~;a`Qy`FnQ-Fit7_Qck7!BKzm>1Lo@=gHnHWd7jm}P+2Wpfr@3b!xU`9jn}oG+ z&S5pjCjSi9Z;3YahHViPT!8ox^!k74`9oq^Y&e(@^(zoBfdG)R_lH1$m%R%w05|Cq zmhZ9SEHpw^H3EBl)#Lo7YY0_P0Cf@>#)xloW!ycPLP+%X2W z44(oXf-H3J=S$lzGc?r=nJf1;R1%bYPq~mW{niy^6nmU?azV^jNa8v?bT%Uq|3;s+|w}`T5;Z1%_RW?@|&U#+x0Jwt|XE zd!v#-pN4RmTpGG6PrtFbb|*2+aKG#2y$h+PQ@N-X%iTbHopO&`l``9^&%zr(xy!za z!E`_XzVv2f^GS>kLFcMEy-_UiYTl#RgS)NqyQq(G0Z}a~9Pblpd93v6k9TRoi^cxH z1?dqfLL^8jG%^JjPsQy+pZF9QboF>1Ep_c{Qf{KsS1~oQ+37uwmK4e=R{L6SA<5Rx zLvN1m4aM3rT-`rUsL92-G$J{go{UYhLbq~WX=3ypRNk>%-h#aYo&=Lm4f|Wck8;6p zv(`_%A$azNdj`V*`<=8(``uDtO;n}mTz7RCfGx1i`-BeM53t$d0+hB_g#XDMgvN<2 z(r1$dg^d@Y_P?QJ{t4laIGp|z<(H&rtM(pc?!#@EO@_%SWeu1wtSqa76zZwn7?JBe zp3juR6I5@;nrp>2czjsY$gfSsD<}@fixC#FOk;#C^*F!PH1){&HUtNtz3g%kLQ-w3 ziG8EVtE`*eymNH7j#GQ=f~(gT9tB8yAcP4(37rKdSUi}g8F~MS0u%`cL**@c6yr5i1^HSwcEWO z3#p^mx>#5>UxwV0WX#7wewl~umBkLzCU%#riH%L zMSp|7GSp59U;KPZZAs`av6 zGWioWGAfQ3Ff#rTV~V6~FE)`2&_$ni-h#k%3rwCfDE=a`Mce~V5O(63*}I2gxA_sj z=L#yb&-hYIyufJNx@V`a}p`^3cS@}-;{Gne4Bj`@9x zoK`ad=V2B?V`}HpD43nnQ`0es`!_8}rCR#sTEny~A*)9IJm7dYh-kCcEeeAevgA!Rb=@%5)y+!CwZpBt=0DhS8qzixGeJk@K zgfj$#)C7{#u1Ca5!4G_8`Um4`!q?hPSu+K?Q6R$6XU*23_z3jL+z@9pEd{K@9SPoF zxRCXjm1V_~t?h2-?CsZUkhpO_ajW>~Hbm7q>~ zgJ^nVAGKZ5z%p#CYl_bIyGylPP>PEomAV_(_XM0v^mbQM6BCnf7bJ2qYwBJ?(jiT) z&@)h73Hr`cfL@{&@LFe{``-;q;881?B5&UUf$sfekQce{7qrG4->-%7{EB-(zswPg zXKQjj&vDSv3(2fP7o1Pq=n@&Vv`rUT9b3{bv+G>*HTCh#)WMM6-2VDCArXAB#S?d@ zX|dxRA%9geAF{Q!b@2-+tZ`X#>-j?dQRyJbsnv|Ox&JZCvI|FNL3U+HrfCX;sBmgR zNIu;^V8>@Y_VM%%mF+L@2c-PneZTxIH4Nj*2Wf&Trl*RdrNcWy0`~{7AnCEU_h!4g zs{|1+^Pn1<7)ntVxtxBx$5#*_+7Tt>3zP`?F48vy()*he&59q6v5n%-;!gFqr*l}J z37uY8OfKfzz1ysKXP|JehFRh>&=u9+e5O|DOg|;q7|XTAD`t-ITnWD9a4@Nv6alB+x(?D5mIQK4|%Xc(W62sXp!R$V0Nr6!G<6!&rl2INp`2jeX}zFMT_ zIhPHMdMHOn)RHalyI#qIsc4>O;e30Daxm||`mvk_10LbN>u6^&Ef0Ol$*LF`I6;Ih z6iRua#~HWa6W1;jy=30`yYT1PT7}@1WJHwW2Ez9m8$Bl!^r7W&>IHo)7!xQ}uI1hP z6Ssd&Ii?LEZ>$j|8^X0PS@d)|in4(pl{zR_tAQ6j&{sW<`Q*Za2BOFcH}{+h|WEbOl0*Uf9|UX zV4*bp!fKZ7e8d8xVyFCR%7I+Ui4DD*+zTPEU0vyx8rXmUcW{>Vldu08(ZnibaHkwz z>j_A|w8t*_AlL!gjFB|FzX=6DhI0H7Y*stS8g;2hAb#nRQRN0lK`wY=K4Tv=SrTEF zpYPts161UV?V1Q1f1$n)4~w_Te$nlzgqlC;T=1k^NPHFaf9ZNIEfoy$I%bNKBD5h0BLA65f45k^1UTgK{rJjJ+a^aVpcfuFGp{2k^+iVP6 z+lc8?sQ=*b>-=o-*-ZCW-B~)3$A8S!s@S?m8gp1FqTZ;qNh@mW2q>8ndepX-A9{Z} zw3G9#q2$q8behy-FJ0U1m`(vt3Ad$x%v3&guT}Qx9M;14td8z{k>!ctmyVA63ca>U zu_rPGf&9K<_FvLQiH1)k3z_46`4tStx}59(WzY|ewv|xMYey3(dl3HR$A47Vg0u{= zZ518P1~`eoXJnzC7nkBTDp5^v8n4GcvaedjuSV1Vh{ZT|1E5Nrz>sJ@B%;{{(Snc=wRLxC`C0aeWh1(yc__I!-#u5=0tDVP0Pp`2+q^Cy z@x1IVaM-2yxo{U7O*Y&TFQ%vhxZBN~{o27FR!g2oYVi`jZwhdjV~zuNS^&x<0te`W)J-9jPXz6C-oXX*Tg z3?b0lnf!QpOG4v7kVf}gcDrhBUWg~ToFH-_^z?G3+R@|yOF-$F^Kz1)1g0@pJb*!*zmA-@v&wuQcnm*LUehe0EAMbpun*mFMxv zhU?4@<_C{$mzN(qfcS)~n{KGR?uI*>Sry$=5pCYjwg%oOXsf_8;=Sh4$I6Z|dW=T^ z1+-j5dcgH-D1PU;_!F>ZX?LQIIhi3f;cyZ6i^G^#UV@7>&v;@~s_W`t%COeRUzs=f zg^w|JzZXZW!maLO?1}36{^LhX1#pPC$^ndRE-euZ(+%I@i6GkU8hDfir3?&02o&P- z=_CFgQc=er?s(+z*esi&db( zj*Zivu-(NyOV?+pDJog6eKr;*m)JB?hBDGc?CjdFsn|L$u&fobulE%i(#-Lv41s8dY9L)^|5&GjZh4*RoMi>-gkk;d4 zM70e?oer4T#N5No*_a)?uf!8PT51)?48e3LU5o5y2UDidP}IM7M$D002)K@C3T^jMOAfE(;fTnKWxhsFD)${T_(+H7X{LT*4ShA0z+Oe#1jPy19g{23Mw7XX+q}<;{wS|jgmCX4fMgsYHv+sgS#I%6HS7@ zv=26XkU?__`>N-Bc@Qei`i|Z9kJK^^fTrorzES&e{Y-l~RWqsO=+jHdUNmOdHj$}P zmtrSzI(D_3`;*M?%BL2wNKq}fYoDs{)QesBZ(F&;IimJz6NJ*bCB!RMjZJrqSD#K{ z_3mWARW&`s14X){cG0q>Ken<2u@XN)@71f8DUHj&QnSmgBtp5I@#ecd{SoLr;4!^Bi<2$w0lpk&KX zKA#$6#it79u<_my_x;(-EJn8DSvyIU9FDkOn|{xG(rft7c~uD|dQC$44#u&~S0>7X zd$D0qnlPC8oOuMmH*|)|9`JoTRst=1F8cyW&$CF`vJ^HSLxyGc2b1KB$0iq33Kg1M zoHC0;>zr}bHFM$8X*KU8lcKpCM|Nkm{lG3g5yH0*-1=jfgMHs5C)=4~o0jr@(@r*| zF|nD;uwv+_lSEfu-CFu4S<5J^{!>23)OS@_ac@lou8k7a%9SeDil!N=myMRlN%F%r zD*Cu1;14rI12?`Jw91RdQ>mXS65!=?=Bum}u2^tH8cVk$^zUFDpsn9cYJC(dpUZXZ!)#DzasIf~7@+1QFFpyn)PFCI>uxE<7`)_JLu zk-mLV3NsVB!FK6>gi)X}m)6)-luMG@+kU5Z@j6TuW|=Ss?pAImj{5hh=E_AwNvW@c z=rVNNTEe5M#LL0dxo-Z`hG=rhE!);;R*7#VGXQbFXL*0hs3nke(g>A(M~CY%iS}|z zu{y=Ql@}7!m>PFDT^1P25E@hfly!%Q?QIS`bM@~w(DE$EX8;NiX0jl>1hv}8JH#j5 zaLA zjb*Hzw@U^{QoW(^5kNs9zf)X?V6#Kn&iRX7r9KI&+L8-`D|S(O zoHU`F8m)BnF9V85g&RCcnBzs#MiWn z8aeoSb=bpXxIE;Qkjt$t!}l3^nz( zGsK~@p3eE*`;0UkgLJ!KcFHR&rzw>SlQvA{{++lc>++!CxWgyAw@_oe5QJt-{f0&r z?_#9roSichTRiA5@lL>l19s&ZBWl~jP4D`jh7A?kK&*nRY7>wTj$T<9_e@?jtX8Q| z%Sb8$aP|AZFTb4*MRu{a4T$Go`|L{{xxo)4Hi4lkX-+0m6{T{~xDblQE7qmL*&FxXVe~KZ<(ZMS$=$zXrENO{RFD#9nm^ zqlls%%ylyi(`)W#evdi1Bm1h)qXRRul2fWAB#yNwj}XpV9ex)Z7^R50i7pVs^u=pi z^K2C;B6EBvt}C>{SjT!kjlmcpe{HLS_mF5V4Y+j|uScEBUO)%J9c=M)I&ptWKBVB# zsq^%({d#UXBpF?wkJO5Z-zog0RJS65+Z+7b9`(nnbxBXF676wCHc=hBjvyhP7BCdb z(B4|~NqgB?+-uewRu)@vsKO+dW6_Stq64-kB6-oLI;QOy37PJlDUAo}u%bg=%%Iyd zi*P-Sh1Pl@+y)lmgOYX=@(c$ufh|1NGHAx}qwyBGr8rT|+UQ}F`bYlrmw&me%?lP&#t*X$5a{JqVg zRA$@p*8cQJO{+u=7$~RxPswqp;{6KJ@GGldg;tln9hTLv~&PqK$%5n*=AuyQYTBg%jt=wZ>;_cU|t>mzKc7il4!%D8?&tWR43MP1%L!F*v3*E~$l#Di< zWTUY@)fF`00A=T3C}Rb9#>;tMjIWQ2w1yYialH3oQ(RnR$GL@2p9Iv%b(V1Lmzu(pH{CL1QPN4I$K+?)U&bJ4e}WiwJLQoKYX3 z^UFlB(Sr4zjLouLG#$8U{8m5aE#iKGzHILrP@vh9)W`jm>6WC0%^2q6g8Jfw1c7Nz zuZdLrqu+A20{y1Vzt_Ag+B9JJJ9yKjfgJn4O9SikuXErB@^k5k_i~c!pc@YJj|(B~ EKY! Date: Tue, 6 Jul 2021 00:29:26 +0300 Subject: [PATCH 801/931] 2 try --- docs/en/interfaces/http.md | 2 +- docs/ru/interfaces/http.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 87ac4dc47ee..c5bfa7e54d2 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -18,7 +18,7 @@ Ok. Web UI can be accessed here: http://localhost:8123/play. -![Web UI](../images/play.png#) +![Web UI](../images/play.png) In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 934e0399b6b..b9c7edce7bf 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -17,7 +17,7 @@ Ok. Веб-интерфейс доступен по адресу: http://localhost:8123/play. -![Веб-интерфейс](../images/play.png#) +![Веб-интерфейс](../images/play.png) В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). From 1bbfbff00358e77a7934cb3d23cbe0f02b7592ad Mon Sep 17 00:00:00 2001 From: l1tsolaiki Date: Tue, 6 Jul 2021 01:04:09 +0300 Subject: [PATCH 802/931] Regenerate ya.make --- src/Functions/ya.make | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 039c166c1a5..d4636c6c1e2 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -79,6 +79,8 @@ SRCS( JSONPath/Parsers/ParserJSONPathMemberAccess.cpp JSONPath/Parsers/ParserJSONPathQuery.cpp JSONPath/Parsers/ParserJSONPathRange.cpp + JSONPath/Parsers/ParserJSONPathRoot.cpp + JSONPath/Parsers/ParserJSONPathStar.cpp TargetSpecific.cpp URL/URLHierarchy.cpp URL/URLPathHierarchy.cpp From 0bbcf6879a271564c8d7abc6c7f5773e377c21d8 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Tue, 6 Jul 2021 01:08:26 +0300 Subject: [PATCH 803/931] Links --- docs/en/interfaces/http.md | 2 +- docs/ru/interfaces/http.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index c5bfa7e54d2..0f497f9af80 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -16,7 +16,7 @@ $ curl 'http://localhost:8123/' Ok. ``` -Web UI can be accessed here: http://localhost:8123/play. +Web UI can be accessed here: `http://localhost:8123/play`. ![Web UI](../images/play.png) diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index b9c7edce7bf..fcd9b949ad8 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -15,7 +15,7 @@ $ curl 'http://localhost:8123/' Ok. ``` -Веб-интерфейс доступен по адресу: http://localhost:8123/play. +Веб-интерфейс доступен по адресу: `http://localhost:8123/play`. ![Веб-интерфейс](../images/play.png) From 9875adc00bd72936c716d60f78b97606b5b13986 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 01:09:39 +0300 Subject: [PATCH 804/931] Make it even worse --- src/Common/ProfileEvents.cpp | 2 - src/IO/createReadBufferFromFileBase.cpp | 61 +++++++++---------------- 2 files changed, 21 insertions(+), 42 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index e71111a2a6b..f55c116fa8e 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -43,8 +43,6 @@ M(MarkCacheHits, "") \ M(MarkCacheMisses, "") \ M(CreatedReadBufferOrdinary, "") \ - M(CreatedReadBufferDirectIO, "") \ - M(CreatedReadBufferDirectIOFailed, "") \ M(CreatedReadBufferMMap, "") \ M(CreatedReadBufferMMapFailed, "") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 4d903366e99..e008f06409d 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -3,6 +3,8 @@ #include #include +#include + namespace ProfileEvents { @@ -21,45 +23,6 @@ std::unique_ptr createReadBufferFromFileBase( size_t estimated_size, size_t direct_io_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache, size_t buffer_size, int flags, char * existing_memory, size_t alignment) { -#if defined(OS_LINUX) || defined(__FreeBSD__) - if (direct_io_threshold && estimated_size >= direct_io_threshold) - { - /** O_DIRECT - * The O_DIRECT flag may impose alignment restrictions on the length and address of user-space buffers and the file offset of I/Os. - * In Linux alignment restrictions vary by filesystem and kernel version and might be absent entirely. - * However there is currently no filesystem-independent interface for an application to discover these restrictions - * for a given file or filesystem. Some filesystems provide their own interfaces for doing so, for example the - * XFS_IOC_DIOINFO operation in xfsctl(3). - * - * Under Linux 2.4, transfer sizes, and the alignment of the user buffer and the file offset must all be - * multiples of the logical block size of the filesystem. Since Linux 2.6.0, alignment to the logical block size - * of the underlying storage (typically 512 bytes) suffices. - * - * - man 2 open - */ - constexpr size_t min_alignment = DEFAULT_AIO_FILE_BLOCK_SIZE; - if (alignment % min_alignment) - alignment = (alignment + min_alignment - 1) / min_alignment * min_alignment; - - /// Attempt to open a file with O_DIRECT - try - { - auto res = std::make_unique( - filename, buffer_size, (flags == -1 ? O_RDONLY | O_CLOEXEC : flags) | O_DIRECT, existing_memory, alignment); - ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIO); - return res; - } - catch (const ErrnoException &) - { - /// Fallback to cached IO if O_DIRECT is not supported. - ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIOFailed); - } - } -#else - (void)direct_io_threshold; - (void)estimated_size; -#endif - if (!existing_memory && mmap_threshold && mmap_cache && estimated_size >= mmap_threshold) { try @@ -76,7 +39,25 @@ std::unique_ptr createReadBufferFromFileBase( } ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary); - return std::make_unique(filename, buffer_size, flags, existing_memory, alignment); + auto res = std::make_unique(filename, buffer_size, flags, existing_memory, alignment); + + if (direct_io_threshold && estimated_size >= direct_io_threshold) + { + /** We don't use O_DIRECT because it is tricky and previous implementation has a bug. + * Instead, we advise the OS that the data should not be cached. + * This is not exactly the same for two reasons: + * - extra copying from page cache to userspace is not eliminated; + * - if data is already in cache, it is purged. + * + * NOTE: Better to rewrite it with userspace page cache. + */ + + if (0 != posix_fadvise(res->getFD(), 0, 0, POSIX_FADV_DONTNEED)) + LOG_WARNING(&Poco::Logger::get("createReadBufferFromFileBase"), + "Cannot request 'posix_fadvise' with POSIX_FADV_DONTNEED for file {}", filename); + } + + return res; } } From 94a024c7c845cabba924e668c93fef2ea3900db2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 01:12:49 +0300 Subject: [PATCH 805/931] Fix warning --- src/Interpreters/AsynchronousMetrics.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index db0fd1f7c43..11ecf547714 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -43,6 +43,9 @@ namespace ErrorCodes extern const int CANNOT_SYSCONF; } + +#if defined(OS_LINUX) + static constexpr size_t small_buffer_size = 4096; static void openFileIfExists(const char * filename, std::optional & out) @@ -62,6 +65,8 @@ static std::unique_ptr openFileIfExists(const std::string & return {}; } +#endif + AsynchronousMetrics::AsynchronousMetrics( ContextPtr global_context_, From 575dfa18e129cc849e2538c61097e0a5f9587c43 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 6 Jul 2021 01:26:11 +0300 Subject: [PATCH 806/931] Update contrib.md --- docs/en/development/contrib.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index f372da8859f..ac39c496c72 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -95,7 +95,7 @@ SELECT library_name, license_type, license_path FROM system.licenses ORDER BY li 1. All external third-party code should reside in the dedicated directories under `contrib` directory of ClickHouse repo. Prefer Git submodules, when available. 2. Fork/mirror the official repo in [Clickhouse-extras](https://github.com/ClickHouse-Extras). Prefer official GitHub repos, when available. 3. Branch from the branch you want to integrate, e.g., `master` -> `clickhouse/master`, or `release/vX.Y.Z` -> `clickhouse/release/vX.Y.Z`. -4. All forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras) should be automatically synchronized with upstreams. `clickhouse/...` branches will remain unaffected, since virtually nobody is going to use that naming pattern in their upstream repos. +4. All forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras) can be automatically synchronized with upstreams. `clickhouse/...` branches will remain unaffected, since virtually nobody is going to use that naming pattern in their upstream repos. 5. Add submodules under `contrib` of ClickHouse repo that refer the above forks/mirrors. Set the submodules to track the corresponding `clickhouse/...` branches. 6. Every time the custom changes have to be made in the library code, a dedicated branch should be created, like `clickhouse/my-fix`. Then this branch should be merged into the branch, that is tracked by the submodule, e.g., `clickhouse/master` or `clickhouse/release/vX.Y.Z`. 7. No code should be pushed in any branch of the forks in [Clickhouse-extras](https://github.com/ClickHouse-Extras), whose names do not follow `clickhouse/...` pattern. From 5a810fc061127afa761066679bedbda307ae0a26 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Tue, 6 Jul 2021 01:39:40 +0300 Subject: [PATCH 807/931] 3 try --- docs/en/interfaces/http.md | 2 +- docs/ru/interfaces/http.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 0f497f9af80..f4237cc2eae 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -18,7 +18,7 @@ Ok. Web UI can be accessed here: `http://localhost:8123/play`. -![Web UI](../images/play.png) +![Web UI](../images/play.png#) In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index fcd9b949ad8..83a6a30a071 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -17,7 +17,7 @@ Ok. Веб-интерфейс доступен по адресу: `http://localhost:8123/play`. -![Веб-интерфейс](../images/play.png) +![Веб-интерфейс](../images/play.png#) В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). From 999ce1c867271693f711fd180f282e2ac7d46a5b Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Mon, 5 Jul 2021 19:18:57 -0400 Subject: [PATCH 808/931] Enabling all TestFlows modules and fixing some tests. --- .../snapshots/common.py.tests.snapshot | 48 ++++ .../tests/array_tuple_map.py | 213 +++++++++--------- .../rbac/tests/views/materialized_view.py | 8 +- tests/testflows/regression.py | 14 +- .../testflows/window_functions/regression.py | 2 + 5 files changed, 165 insertions(+), 120 deletions(-) diff --git a/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot b/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot index c8b57ffdd1c..18b58b0cfdc 100644 --- a/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot +++ b/tests/testflows/extended_precision_data_types/snapshots/common.py.tests.snapshot @@ -1035,10 +1035,12 @@ a mapAdd_with_Int128_on_a_table = r""" a +([1,2],[2,4]) """ mapSubtract_with_Int128_on_a_table = r""" a +([1,2],[0,0]) """ mapPopulateSeries_with_Int128_on_a_table = r""" @@ -1563,10 +1565,12 @@ a mapAdd_with_Int256_on_a_table = r""" a +([1,2],[2,4]) """ mapSubtract_with_Int256_on_a_table = r""" a +([1,2],[0,0]) """ mapPopulateSeries_with_Int256_on_a_table = r""" @@ -2091,10 +2095,12 @@ a mapAdd_with_UInt128_on_a_table = r""" a +([1,2],[2,4]) """ mapSubtract_with_UInt128_on_a_table = r""" a +([1,2],[0,0]) """ mapPopulateSeries_with_UInt128_on_a_table = r""" @@ -2619,10 +2625,12 @@ a mapAdd_with_UInt256_on_a_table = r""" a +([1,2],[2,4]) """ mapSubtract_with_UInt256_on_a_table = r""" a +([1,2],[0,0]) """ mapPopulateSeries_with_UInt256_on_a_table = r""" @@ -6280,3 +6288,43 @@ a \N """ +mapAdd_with_Int128 = r""" +mapAdd(tuple(array(toInt128(\'1\'), toInt128(\'2\')), array(toInt128(\'1\'), toInt128(\'2\'))), tuple(array(toInt128(\'1\'), toInt128(\'2\')), array(toInt128(\'1\'), toInt128(\'2\')))) +([1,2],[2,4]) +""" + +mapSubtract_with_Int128 = r""" +mapSubtract(tuple(array(toInt128(\'1\'), toInt128(\'2\')), array(toInt128(\'1\'), toInt128(\'2\'))), tuple(array(toInt128(\'1\'), toInt128(\'2\')), array(toInt128(\'1\'), toInt128(\'2\')))) +([1,2],[0,0]) +""" + +mapAdd_with_Int256 = r""" +mapAdd(tuple(array(toInt256(\'1\'), toInt256(\'2\')), array(toInt256(\'1\'), toInt256(\'2\'))), tuple(array(toInt256(\'1\'), toInt256(\'2\')), array(toInt256(\'1\'), toInt256(\'2\')))) +([1,2],[2,4]) +""" + +mapSubtract_with_Int256 = r""" +mapSubtract(tuple(array(toInt256(\'1\'), toInt256(\'2\')), array(toInt256(\'1\'), toInt256(\'2\'))), tuple(array(toInt256(\'1\'), toInt256(\'2\')), array(toInt256(\'1\'), toInt256(\'2\')))) +([1,2],[0,0]) +""" + +mapAdd_with_UInt128 = r""" +mapAdd(tuple(array(toUInt128(\'1\'), toUInt128(\'2\')), array(toUInt128(\'1\'), toUInt128(\'2\'))), tuple(array(toUInt128(\'1\'), toUInt128(\'2\')), array(toUInt128(\'1\'), toUInt128(\'2\')))) +([1,2],[2,4]) +""" + +mapSubtract_with_UInt128 = r""" +mapSubtract(tuple(array(toUInt128(\'1\'), toUInt128(\'2\')), array(toUInt128(\'1\'), toUInt128(\'2\'))), tuple(array(toUInt128(\'1\'), toUInt128(\'2\')), array(toUInt128(\'1\'), toUInt128(\'2\')))) +([1,2],[0,0]) +""" + +mapAdd_with_UInt256 = r""" +mapAdd(tuple(array(toUInt256(\'1\'), toUInt256(\'2\')), array(toUInt256(\'1\'), toUInt256(\'2\'))), tuple(array(toUInt256(\'1\'), toUInt256(\'2\')), array(toUInt256(\'1\'), toUInt256(\'2\')))) +([1,2],[2,4]) +""" + +mapSubtract_with_UInt256 = r""" +mapSubtract(tuple(array(toUInt256(\'1\'), toUInt256(\'2\')), array(toUInt256(\'1\'), toUInt256(\'2\'))), tuple(array(toUInt256(\'1\'), toUInt256(\'2\')), array(toUInt256(\'1\'), toUInt256(\'2\')))) +([1,2],[0,0]) +""" + diff --git a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py index d1a5171d00a..550122c5b86 100644 --- a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py +++ b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py @@ -50,18 +50,16 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = f'Array({data_type})') with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['arraySplit((x, y) -> x=y, [0, 0, 0],']: with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f""" - SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)})) - """) + execute_query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," + f"{to_data_type(data_type,1)}))") with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() @@ -69,18 +67,15 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = f'Array(Array({data_type}))') with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in [f'arrayZip([{to_data_type(data_type,1)}],']: with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f""" - SELECT {func}array({to_data_type(data_type,3)})) - """) + execute_query(f"SELECT {func}array({to_data_type(data_type,3)}))") with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() @@ -90,9 +85,7 @@ def array_func(self, data_type, node=None): with When("I insert the output into the table"): node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,1)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['empty(', 'notEmpty(', @@ -125,20 +118,17 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", + node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", exitcode = 44, message = 'Exception:') - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") else: with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f""" - SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)})) - """) + execute_query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() @@ -146,11 +136,10 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['arrayDifference(', 'arrayCumSum(', @@ -171,12 +160,11 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", + node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", exitcode = exitcode, message = 'Exception:') - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['arrayElement']: @@ -192,20 +180,18 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") + node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['arrayPushBack', 'arrayPushFront']: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f""" - SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), {to_data_type(data_type,1)}) - """) + execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," + f"{to_data_type(data_type,1)}), {to_data_type(data_type,1)})") with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() @@ -213,20 +199,18 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = f'Array({data_type})') with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), {to_data_type(data_type,1)})") + node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), {to_data_type(data_type,1)})") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['arrayResize', 'arraySlice']: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f""" - SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1) - """) + execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() @@ -234,20 +218,18 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = f'Array({data_type})') with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") + node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") for func in ['has', 'indexOf', 'countEqual']: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f""" - SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL) - """) + execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)") with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() @@ -255,11 +237,10 @@ def array_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)") + node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") @TestOutline(Suite) @Requirements( @@ -281,11 +262,10 @@ def tuple_func(self, data_type, node=None): table(name = table_name, data_type = f'Tuple({data_type}, {data_type}, {data_type})') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT tuple({to_data_type(data_type,1)}, {to_data_type(data_type,1)}, {to_data_type(data_type,1)})") + node.query(f"INSERT INTO {table_name} SELECT tuple({to_data_type(data_type,1)}," + f"{to_data_type(data_type,1)}, {to_data_type(data_type,1)})") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"tupleElement with {data_type}"): node.query(f"SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)") @@ -298,9 +278,7 @@ def tuple_func(self, data_type, node=None): with When("I insert the output into a table"): node.query(f"INSERT INTO {table_name} SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"untuple with {data_type}"): node.query(f"SELECT untuple(({to_data_type(data_type,1)},))") @@ -313,12 +291,11 @@ def tuple_func(self, data_type, node=None): with When("I insert the output into a table"): node.query(f"INSERT INTO {table_name} SELECT untuple(({to_data_type(data_type,1)},))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"tupleHammingDistance with {data_type}"): - node.query(f"SELECT tupleHammingDistance(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), ({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") + node.query(f"SELECT tupleHammingDistance(({to_data_type(data_type,1)}, {to_data_type(data_type,1)})," + f"({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") with Scenario(f"tupleHammingDistance with {data_type} on a table"): table_name = get_table_name() @@ -326,11 +303,10 @@ def tuple_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT tupleHammingDistance(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), ({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") + node.query(f"INSERT INTO {table_name} SELECT tupleHammingDistance(({to_data_type(data_type,1)}," + f"{to_data_type(data_type,1)}), ({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") @TestOutline(Suite) @Requirements( @@ -355,13 +331,17 @@ def map_func(self, data_type, node=None): with When("I insert the output into a table"): node.query(f"INSERT INTO {table_name} SELECT map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)})") - execute_query(f""" - SELECT * FROM {table_name} - """) + execute_query(f"SELECT * FROM {table_name}") with Scenario(f"mapAdd with {data_type}"): - node.query(f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))", - exitcode = 44, message='Exception:') + sql = f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," + f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + if data_type.startswith("Decimal"): + node.query(sql, exitcode=43, message="Exception:") + else: + execute_query(sql) with Scenario(f"mapAdd with {data_type} on a table"): table_name = get_table_name() @@ -369,16 +349,27 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))", - exitcode = 44, message='Exception:') + sql = (f"INSERT INTO {table_name} SELECT mapAdd(([{to_data_type(data_type,1)},{to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + exitcode, message = 0, None - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + if data_type.startswith("Decimal"): + exitcode, message = 43, "Exception:" + node.query(sql, exitcode=exitcode, message=message) + + execute_query(f"""SELECT * FROM {table_name} ORDER BY a ASC""") with Scenario(f"mapSubtract with {data_type}"): - node.query(f"SELECT mapSubtract(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))", - exitcode = 44, message='Exception:') + sql = (f"SELECT mapSubtract(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," + f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + + if data_type.startswith("Decimal"): + node.query(sql, exitcode=43, message="Exception:") + else: + execute_query(sql) with Scenario(f"mapSubtract with {data_type} on a table"): table_name = get_table_name() @@ -386,15 +377,21 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapSubtract(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))", - exitcode = 44, message='Exception:') + sql = (f"INSERT INTO {table_name} SELECT mapSubtract(([{to_data_type(data_type,1)}," + f"{to_data_type(data_type,2)}], [{to_data_type(data_type,1)}," + f"{to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}," + f"{to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + exitcode, message = 0, None - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + if data_type.startswith("Decimal"): + exitcode, message = 43, "Exception:" + node.query(sql, exitcode=exitcode, message=message) + + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapPopulateSeries with {data_type}"): - node.query(f"SELECT mapPopulateSeries([1,2,3], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)", + node.query(f"SELECT mapPopulateSeries([1,2,3], [{to_data_type(data_type,1)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)", exitcode = 44, message='Exception:') with Scenario(f"mapPopulateSeries with {data_type} on a table"): @@ -403,15 +400,15 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapPopulateSeries([1,2,3], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)", + node.query(f"INSERT INTO {table_name} SELECT mapPopulateSeries([1,2,3]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)", exitcode = 44, message='Exception:') - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapContains with {data_type}"): - node.query(f"SELECT mapContains( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}), 'key1')") + node.query(f"SELECT mapContains( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}), 'key1')") with Scenario(f"mapContains with {data_type} on a table"): table_name = get_table_name() @@ -419,11 +416,10 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapContains( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}), 'key1')") + node.query(f"INSERT INTO {table_name} SELECT mapContains( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}), 'key1')") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapKeys with {data_type}"): node.query(f"SELECT mapKeys( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") @@ -434,11 +430,10 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = 'Array(String)') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapKeys( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") + node.query(f"INSERT INTO {table_name} SELECT mapKeys( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapValues with {data_type}"): node.query(f"SELECT mapValues( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") @@ -449,11 +444,10 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = f'Array({data_type})') with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapValues( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") + node.query(f"INSERT INTO {table_name} SELECT mapValues( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}))") - execute_query(f""" - SELECT * FROM {table_name} ORDER BY a ASC - """) + execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") @TestFeature @Name("array, tuple, map") @@ -465,7 +459,8 @@ def map_func(self, data_type, node=None): ('Decimal256(0)',), ]) def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check that array, tuple, and map functions work with extended precision data types. + """Check that array, tuple, and map functions work with + extended precision data types. """ self.context.node = self.context.cluster.node(node) @@ -481,4 +476,4 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with Given("I allow experimental map type"): allow_experimental_map_type() - Suite(test=map_func)(data_type=data_type) + Suite(test=map_func)(data_type=data_type) diff --git a/tests/testflows/rbac/tests/views/materialized_view.py b/tests/testflows/rbac/tests/views/materialized_view.py index 26fefb23bf9..d2192e81cf7 100755 --- a/tests/testflows/rbac/tests/views/materialized_view.py +++ b/tests/testflows/rbac/tests/views/materialized_view.py @@ -2103,7 +2103,7 @@ def insert_on_source_table(self, grant_target_name, user_name, node=None): with When("I grant INSERT on the source table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the source table"): - node.query(f"INSERT INTO {table1_name}(d) VALUES ('01-01-2020')", settings = [("user",f"{user_name}")]) + node.query(f"INSERT INTO {table1_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) finally: with Finally("I drop the view"): @@ -2152,7 +2152,7 @@ def insert_with_insert_privilege(self, grant_target_name, user_name, node=None): with When("I grant INSERT on the view"): node.query(f"GRANT INSERT ON {view_name} TO {grant_target_name}") with Then("I attempt to insert into the view"): - node.query(f"INSERT INTO {view_name}(d) VALUES ('01-01-2020')", + node.query(f"INSERT INTO {view_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) finally: @@ -2201,7 +2201,7 @@ def insert_on_target_table(self, grant_target_name, user_name, node=None): with When("I grant INSERT on the target table"): node.query(f"GRANT INSERT ON {table0_name} TO {grant_target_name}") with Then("I attempt to insert into the target table"): - node.query(f"INSERT INTO {table0_name}(d) VALUES ('01-01-2020')", settings = [("user",f"{user_name}")]) + node.query(f"INSERT INTO {table0_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) finally: with Finally("I drop the view"): @@ -2248,7 +2248,7 @@ def insert_on_target_table(self, grant_target_name, user_name, node=None): with When("I grant INSERT on the target table"): node.query(f"GRANT INSERT ON {implicit_table_name} TO {grant_target_name}") with Then("I attempt to insert into the target table"): - node.query(f"INSERT INTO {implicit_table_name}(d) VALUES ('01-01-2020')", settings = [("user",f"{user_name}")]) + node.query(f"INSERT INTO {implicit_table_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) finally: with Finally("I drop the view"): diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index eef6dadb4bb..c2e143a4b1c 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -23,14 +23,14 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): with Pool(8) as pool: try: run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args) #run_scenario(pool, tasks, Feature(test=load("kerberos.regression", "regression")), args) - #run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args) + run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args) finally: join(tasks) diff --git a/tests/testflows/window_functions/regression.py b/tests/testflows/window_functions/regression.py index eb4ab07edbb..778a829082f 100755 --- a/tests/testflows/window_functions/regression.py +++ b/tests/testflows/window_functions/regression.py @@ -41,6 +41,8 @@ xfails = { [(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")], "tests/:/misc/window functions in subquery": [(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")], + "tests/:/misc/in view": + [(Fail, "bug, https://github.com/ClickHouse/ClickHouse/issues/26001")], "tests/:/frame clause/range frame/order by decimal": [(Fail, "Exception: The RANGE OFFSET frame for 'DB::ColumnDecimal >' ORDER BY column is not implemented")], "tests/:/frame clause/range frame/with nulls": From 7b94573506de67be804d767cf731265d6e010029 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 03:31:39 +0300 Subject: [PATCH 809/931] Remove obsolete trash about libraries --- docs/en/development/style.md | 12 ++---------- docs/ja/development/style.md | 14 +++----------- docs/ru/development/style.md | 12 ++---------- docs/zh/development/style.md | 14 +++----------- 4 files changed, 10 insertions(+), 42 deletions(-) diff --git a/docs/en/development/style.md b/docs/en/development/style.md index 78e3c8fc966..c495e3f0417 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -749,17 +749,9 @@ If your code in the `master` branch is not buildable yet, exclude it from the bu **1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks. -**2.** If necessary, you can use any well-known libraries available in the OS package. +**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. -If there is a good solution already available, then use it, even if it means you have to install another library. - -(But be prepared to remove bad libraries from code.) - -**3.** You can install a library that isn’t in the packages, if the packages do not have what you need or have an outdated version or the wrong type of compilation. - -**4.** If the library is small and does not have its own complex build system, put the source files in the `contrib` folder. See [Guidelines for adding new third-party libraries](https://clickhouse.tech/docs/en/development/contrib/#adding-third-party-libraries) for details. - -**5.** Preference is always given to libraries that are already in use. +**3.** Preference is always given to libraries that are already in use. ## General Recommendations {#general-recommendations-1} diff --git a/docs/ja/development/style.md b/docs/ja/development/style.md index f4b3f9c77dd..596e29f4414 100644 --- a/docs/ja/development/style.md +++ b/docs/ja/development/style.md @@ -749,19 +749,11 @@ CPU命令セットは、サーバー間でサポートされる最小のセッ ## 図書館 {#libraries} -**1.** C++20標準ライブラリが使用されています(実験的な拡張が許可されています)。 `boost` と `Poco` フレームワーク +**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks. -**2.** 必要に応じて、OSパッケージで利用可能な既知のライブラリを使用できます。 +**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. -すでに利用可能な良い解決策がある場合は、別のライブラリをインストールする必要がある場合でも、それを使用してください。 - -(が準備をしておいてくださ去の悪い図書館からのコードです。) - -**3.** パッケージに必要なものがない場合や、古いバージョンや間違った種類のコンパイルがある場合は、パッケージにないライブラリをインストールできます。 - -**4.** ライブラリが小さく、独自の複雑なビルドシステムがない場合は、ソースファイルを `contrib` フォルダ。 - -**5.** すでに使用されているライブラリが優先されます。 +**3.** Preference is always given to libraries that are already in use. ## 一般的な推奨事項 {#general-recommendations-1} diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index de29e629ceb..6e1230b4831 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -824,17 +824,9 @@ The dictionary is configured incorrectly. **1.** Используются стандартная библиотека C++20 (допустимо использовать экспериментальные расширения) а также фреймворки `boost`, `Poco`. -**2.** При необходимости, можно использовать любые известные библиотеки, доступные в ОС из пакетов. +**2.** Библиотеки должны быть расположены в виде исходников в директории `contrib` и собираться вместе с ClickHouse. Не разрешено использовать библиотеки, доступные в пакетах ОС или любые другие способы установки библиотек в систему. -Если есть хорошее готовое решение, то оно используется, даже если для этого придётся установить ещё одну библиотеку. - -(Но будьте готовы к тому, что иногда вам придётся выкидывать плохие библиотеки из кода.) - -**3.** Если в пакетах нет нужной библиотеки, или её версия достаточно старая, или если она собрана не так, как нужно, то можно использовать библиотеку, устанавливаемую не из пакетов. - -**4.** Если библиотека достаточно маленькая и у неё нет своей системы сборки, то следует включить её файлы в проект, в директорию `contrib`. - -**5.** Предпочтение всегда отдаётся уже использующимся библиотекам. +**3.** Предпочтение отдаётся уже использующимся библиотекам. ## Общее {#obshchee-1} diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index bb9bfde7b9b..dcbfbc79e33 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -742,19 +742,11 @@ CPU指令集是我们服务器中支持的最小集合。 目前,它是SSE 4.2 ## 库 {#ku} -**1.** 使用C++20标准库(允许实验性功能),以及 `boost` 和 `Poco` 框架。 +**1.** The C++20 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks. -**2.** 如有必要,您可以使用 OS 包中提供的任何已知库。 +**2.** It is not allowed to use libraries from OS packages. It is also not allowed to use pre-installed libraries. All libraries should be placed in form of source code in `contrib` directory and built with ClickHouse. -如果有一个好的解决方案已经可用,那就使用它,即使这意味着你必须安装另一个库。 - -(但要准备从代码中删除不好的库) - -**3.** 如果软件包没有您需要的软件包或者有过时的版本或错误的编译类型,则可以安装不在软件包中的库。 - -**4.** 如果库很小并且没有自己的复杂构建系统,请将源文件放在 `contrib` 文件夹中。 - -**5.** 始终优先考虑已经使用的库。 +**3.** Preference is always given to libraries that are already in use. ## 一般建议 {#yi-ban-jian-yi-1} From 2e41419e3bdf26f09373cfc84eb69c48e16b927b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 03:34:27 +0300 Subject: [PATCH 810/931] Fix build --- src/IO/createReadBufferFromFileBase.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index e008f06409d..c6c284f888a 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -43,6 +43,7 @@ std::unique_ptr createReadBufferFromFileBase( if (direct_io_threshold && estimated_size >= direct_io_threshold) { +#if defined(OS_LINUX) /** We don't use O_DIRECT because it is tricky and previous implementation has a bug. * Instead, we advise the OS that the data should not be cached. * This is not exactly the same for two reasons: @@ -56,6 +57,9 @@ std::unique_ptr createReadBufferFromFileBase( LOG_WARNING(&Poco::Logger::get("createReadBufferFromFileBase"), "Cannot request 'posix_fadvise' with POSIX_FADV_DONTNEED for file {}", filename); } +#else + (void)direct_io_threshold; +#endif return res; } From 504d2c0c56492480eda7b6dd16dacefcb3e12712 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 05:31:09 +0300 Subject: [PATCH 811/931] Remove old code --- src/Interpreters/Context_fwd.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Interpreters/Context_fwd.h b/src/Interpreters/Context_fwd.h index 99c7d29f084..2564912a297 100644 --- a/src/Interpreters/Context_fwd.h +++ b/src/Interpreters/Context_fwd.h @@ -21,12 +21,9 @@ using Scalars = std::map; class Context; /// Most used types have shorter names -/// TODO: in the first part of refactoring all the context pointers are non-const. using ContextPtr = std::shared_ptr; -using ContextConstPtr = ContextPtr; /// For compatibility. Use ContextPtr. using ContextMutablePtr = std::shared_ptr; using ContextWeakPtr = std::weak_ptr; -using ContextWeakConstPtr = ContextWeakPtr; /// For compatibility. Use ContextWeakPtr. using ContextWeakMutablePtr = std::weak_ptr; template From e245f90a1ffa127e5ee75f6853da292bc72f5016 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 6 Jul 2021 08:32:21 +0300 Subject: [PATCH 812/931] Update createReadBufferFromFileBase.cpp --- src/IO/createReadBufferFromFileBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index c6c284f888a..cc0f5dd9e0d 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -41,9 +41,9 @@ std::unique_ptr createReadBufferFromFileBase( ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary); auto res = std::make_unique(filename, buffer_size, flags, existing_memory, alignment); +#if defined(OS_LINUX) if (direct_io_threshold && estimated_size >= direct_io_threshold) { -#if defined(OS_LINUX) /** We don't use O_DIRECT because it is tricky and previous implementation has a bug. * Instead, we advise the OS that the data should not be cached. * This is not exactly the same for two reasons: From 0269e34f189975508e44378d1d5e391d9c42cc63 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Jul 2021 08:45:54 +0300 Subject: [PATCH 813/931] tests/queries/0_stateless$ wc -l *.sql | grep -P '^\s+0' | awk '{ print $2 }' | xargs sed -i 's/$/\n/' --- tests/queries/0_stateless/00292_parser_tuple_element.sql | 2 +- .../queries/0_stateless/00842_array_with_constant_overflow.sql | 2 +- tests/queries/0_stateless/01271_show_privileges.sql | 2 +- tests/queries/0_stateless/01668_test_toMonth_mysql_dialect.sql | 2 +- tests/queries/0_stateless/01669_test_toYear_mysql_dialect.sql | 2 +- tests/queries/0_stateless/01670_test_repeat_mysql_dialect.sql | 2 +- .../queries/0_stateless/01671_test_toQuarter_mysql_dialect.sql | 2 +- tests/queries/0_stateless/01672_test_toSecond_mysql_dialect.sql | 2 +- tests/queries/0_stateless/01673_test_toMinute_mysql_dialect.sql | 2 +- tests/queries/0_stateless/01787_map_remote.sql | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/00292_parser_tuple_element.sql b/tests/queries/0_stateless/00292_parser_tuple_element.sql index bb28b771dce..6d43ac9c738 100644 --- a/tests/queries/0_stateless/00292_parser_tuple_element.sql +++ b/tests/queries/0_stateless/00292_parser_tuple_element.sql @@ -1 +1 @@ -SELECT ('a', 'b').2 \ No newline at end of file +SELECT ('a', 'b').2 diff --git a/tests/queries/0_stateless/00842_array_with_constant_overflow.sql b/tests/queries/0_stateless/00842_array_with_constant_overflow.sql index b31efb89686..ffd5fecde10 100644 --- a/tests/queries/0_stateless/00842_array_with_constant_overflow.sql +++ b/tests/queries/0_stateless/00842_array_with_constant_overflow.sql @@ -1 +1 @@ -SELECT arrayWithConstant(-231.37104, -138); -- { serverError 128 } \ No newline at end of file +SELECT arrayWithConstant(-231.37104, -138); -- { serverError 128 } diff --git a/tests/queries/0_stateless/01271_show_privileges.sql b/tests/queries/0_stateless/01271_show_privileges.sql index efd6ddb200c..e3210a7ae00 100644 --- a/tests/queries/0_stateless/01271_show_privileges.sql +++ b/tests/queries/0_stateless/01271_show_privileges.sql @@ -1 +1 @@ -SHOW PRIVILEGES; \ No newline at end of file +SHOW PRIVILEGES; diff --git a/tests/queries/0_stateless/01668_test_toMonth_mysql_dialect.sql b/tests/queries/0_stateless/01668_test_toMonth_mysql_dialect.sql index fa2e1e41555..bdde3e7b825 100644 --- a/tests/queries/0_stateless/01668_test_toMonth_mysql_dialect.sql +++ b/tests/queries/0_stateless/01668_test_toMonth_mysql_dialect.sql @@ -1 +1 @@ -SELECT MONTH(toDateTime('2016-06-15 23:00:00')); \ No newline at end of file +SELECT MONTH(toDateTime('2016-06-15 23:00:00')); diff --git a/tests/queries/0_stateless/01669_test_toYear_mysql_dialect.sql b/tests/queries/0_stateless/01669_test_toYear_mysql_dialect.sql index f7cd84314e2..afd79e2c1ba 100644 --- a/tests/queries/0_stateless/01669_test_toYear_mysql_dialect.sql +++ b/tests/queries/0_stateless/01669_test_toYear_mysql_dialect.sql @@ -1 +1 @@ -SELECT YEAR(toDateTime('2016-06-15 23:00:00')); \ No newline at end of file +SELECT YEAR(toDateTime('2016-06-15 23:00:00')); diff --git a/tests/queries/0_stateless/01670_test_repeat_mysql_dialect.sql b/tests/queries/0_stateless/01670_test_repeat_mysql_dialect.sql index 29fe81012ec..ae2bdb46412 100644 --- a/tests/queries/0_stateless/01670_test_repeat_mysql_dialect.sql +++ b/tests/queries/0_stateless/01670_test_repeat_mysql_dialect.sql @@ -1 +1 @@ -SELECT REPEAT('Test', 3); \ No newline at end of file +SELECT REPEAT('Test', 3); diff --git a/tests/queries/0_stateless/01671_test_toQuarter_mysql_dialect.sql b/tests/queries/0_stateless/01671_test_toQuarter_mysql_dialect.sql index b6fa41f8b49..369f2b47723 100644 --- a/tests/queries/0_stateless/01671_test_toQuarter_mysql_dialect.sql +++ b/tests/queries/0_stateless/01671_test_toQuarter_mysql_dialect.sql @@ -1 +1 @@ -SELECT QUARTER(toDateTime('2016-06-15 23:00:00')); \ No newline at end of file +SELECT QUARTER(toDateTime('2016-06-15 23:00:00')); diff --git a/tests/queries/0_stateless/01672_test_toSecond_mysql_dialect.sql b/tests/queries/0_stateless/01672_test_toSecond_mysql_dialect.sql index adb72b9843c..0306fde14cd 100644 --- a/tests/queries/0_stateless/01672_test_toSecond_mysql_dialect.sql +++ b/tests/queries/0_stateless/01672_test_toSecond_mysql_dialect.sql @@ -1 +1 @@ -SELECT SECOND(toDateTime('2016-06-15 23:00:00')); \ No newline at end of file +SELECT SECOND(toDateTime('2016-06-15 23:00:00')); diff --git a/tests/queries/0_stateless/01673_test_toMinute_mysql_dialect.sql b/tests/queries/0_stateless/01673_test_toMinute_mysql_dialect.sql index 4ac7106158a..5d188b5b95b 100644 --- a/tests/queries/0_stateless/01673_test_toMinute_mysql_dialect.sql +++ b/tests/queries/0_stateless/01673_test_toMinute_mysql_dialect.sql @@ -1 +1 @@ -SELECT MINUTE(toDateTime('2016-06-15 23:00:00')); \ No newline at end of file +SELECT MINUTE(toDateTime('2016-06-15 23:00:00')); diff --git a/tests/queries/0_stateless/01787_map_remote.sql b/tests/queries/0_stateless/01787_map_remote.sql index 854eafa0a50..748316c8044 100644 --- a/tests/queries/0_stateless/01787_map_remote.sql +++ b/tests/queries/0_stateless/01787_map_remote.sql @@ -1 +1 @@ -SELECT map('a', 1, 'b', 2) FROM remote('127.0.0.{1,2}', system, one); \ No newline at end of file +SELECT map('a', 1, 'b', 2) FROM remote('127.0.0.{1,2}', system, one); From 26009227868d056fd27d250e95337e9e44924283 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 11:36:39 +0300 Subject: [PATCH 814/931] Followup fix --- src/Storages/MergeTree/DropPartsRanges.cpp | 12 +++++++++--- src/Storages/MergeTree/DropPartsRanges.h | 1 + src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 3 +++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/DropPartsRanges.cpp b/src/Storages/MergeTree/DropPartsRanges.cpp index e9cf07fb51f..0dfcc18ea3e 100644 --- a/src/Storages/MergeTree/DropPartsRanges.cpp +++ b/src/Storages/MergeTree/DropPartsRanges.cpp @@ -9,12 +9,13 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -bool DropPartsRanges::isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const + +bool DropPartsRanges::isAffectedByDropRange(const std::string & new_part_name, std::string & postpone_reason) const { - if (entry.new_part_name.empty()) + if (new_part_name.empty()) return false; - MergeTreePartInfo entry_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); + MergeTreePartInfo entry_info = MergeTreePartInfo::fromPartName(new_part_name, format_version); for (const auto & [znode, drop_range] : drop_ranges) { if (!drop_range.isDisjoint(entry_info)) @@ -27,6 +28,11 @@ bool DropPartsRanges::isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & return false; } +bool DropPartsRanges::isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const +{ + return isAffectedByDropRange(entry.new_part_name, postpone_reason); +} + void DropPartsRanges::addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * /*log*/) { if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) diff --git a/src/Storages/MergeTree/DropPartsRanges.h b/src/Storages/MergeTree/DropPartsRanges.h index 23f38b70420..fea6ce3be4e 100644 --- a/src/Storages/MergeTree/DropPartsRanges.h +++ b/src/Storages/MergeTree/DropPartsRanges.h @@ -21,6 +21,7 @@ public: {} bool isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const; + bool isAffectedByDropRange(const std::string & new_part_name, std::string & postpone_reason) const; bool hasDropRange(const MergeTreePartInfo & new_drop_range_info) const; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 8fa69bb2c36..f607a559564 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -996,6 +996,9 @@ bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & pa { std::lock_guard lock(state_mutex); + if (drop_ranges.isAffectedByDropRange(part_name, reject_reason)) + return false; + if (isNotCoveredByFuturePartsImpl(entry.znode_name, part_name, reject_reason, lock)) { CurrentlyExecuting::setActualPartName(entry, part_name, *this); From 9665d4109267cd62da0dec9f3c6d842ee90833ba Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jul 2021 12:17:26 +0300 Subject: [PATCH 815/931] Force parentheses for DISTINCT ON --- src/Parsers/ParserSelectQuery.cpp | 39 ++++++++++++++----- .../0_stateless/01917_distinct_on.reference | 5 +++ .../queries/0_stateless/01917_distinct_on.sql | 14 ++++++- 3 files changed, 48 insertions(+), 10 deletions(-) diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 3f6607be0bc..8272a130422 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -80,12 +80,13 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTPtr limit_by_length; ASTPtr limit_by_offset; ASTPtr limit_by_expression_list; + ASTPtr distinct_on_expression_list; ASTPtr limit_offset; ASTPtr limit_length; ASTPtr top_length; ASTPtr settings; - /// WITH expr list + /// WITH expr_list { if (s_with.ignore(pos, expected)) { @@ -97,7 +98,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } - /// SELECT [DISTINCT ON expr] [ALL/DISTINCT] [TOP N [WITH TIES]] expr list + /// SELECT [ALL/DISTINCT [ON (expr_list)]] [TOP N [WITH TIES]] expr_list { bool has_all = false; if (!s_select.ignore(pos, expected)) @@ -108,18 +109,25 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (s_distinct_on.ignore(pos, expected)) { - if (!exp_list.parse(pos, limit_by_expression_list, expected)) + if (open_bracket.ignore(pos, expected)) + { + if (!exp_list.parse(pos, distinct_on_expression_list, expected)) + return false; + if (!close_bracket.ignore(pos, expected)) + return false; + } + else return false; - limit_by_length = std::make_shared(Field{UInt8(1)}); } - - if (s_distinct.ignore(pos, expected)) + else if (s_distinct.ignore(pos, expected)) + { select_query->distinct = true; + } if (!has_all && s_all.ignore(pos, expected)) has_all = true; - if (has_all && select_query->distinct) + if (has_all && (select_query->distinct || distinct_on_expression_list)) return false; if (s_top.ignore(pos, expected)) @@ -266,15 +274,18 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) select_query->limit_with_ties = true; } + if (limit_with_ties_occured && distinct_on_expression_list) + throw Exception("Can not use WITH TIES alongside LIMIT BY/DISTINCT ON", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); + if (s_by.ignore(pos, expected)) { /// WITH TIES was used alongside LIMIT BY /// But there are other kind of queries like LIMIT n BY smth LIMIT m WITH TIES which are allowed. /// So we have to ignore WITH TIES exactly in LIMIT BY state. if (limit_with_ties_occured) - throw Exception("Can not use WITH TIES alongside LIMIT BY", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); + throw Exception("Can not use WITH TIES alongside LIMIT BY/DISTINCT ON", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); - if (limit_by_length) + if (distinct_on_expression_list) throw Exception("Can not use DISTINCT ON alongside LIMIT BY", ErrorCodes::DISTINCT_ON_AND_LIMIT_BY_TOGETHER); limit_by_length = limit_length; @@ -348,6 +359,16 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } + if (distinct_on_expression_list) + { + /// DISTINCT ON and LIMIT BY are mutually exclusive, checked before + assert (limit_by_expression_list == nullptr); + + /// Transform `DISTINCT ON expr` to `LIMIT 1 BY expr` + limit_by_expression_list = distinct_on_expression_list; + limit_by_length = std::make_shared(Field{UInt8(1)}); + } + /// Because TOP n in totally equals LIMIT n if (top_length) limit_length = top_length; diff --git a/tests/queries/0_stateless/01917_distinct_on.reference b/tests/queries/0_stateless/01917_distinct_on.reference index 09e5879c7f6..b5b231e5786 100644 --- a/tests/queries/0_stateless/01917_distinct_on.reference +++ b/tests/queries/0_stateless/01917_distinct_on.reference @@ -1,3 +1,8 @@ 1 1 1 2 2 2 1 2 2 +1 1 1 +2 2 2 +1 2 2 +1 1 1 +2 2 2 diff --git a/tests/queries/0_stateless/01917_distinct_on.sql b/tests/queries/0_stateless/01917_distinct_on.sql index f9b12ca6f05..b7875719c92 100644 --- a/tests/queries/0_stateless/01917_distinct_on.sql +++ b/tests/queries/0_stateless/01917_distinct_on.sql @@ -4,8 +4,20 @@ CREATE TABLE t1 (`a` UInt32, `b` UInt32, `c` UInt32 ) ENGINE = Memory; INSERT INTO t1 VALUES (1, 1, 1), (1, 1, 2), (2, 2, 2), (1, 2, 2); SELECT DISTINCT ON (a, b) a, b, c FROM t1; +SELECT DISTINCT ON (a, b) * FROM t1; +SELECT DISTINCT ON (a) * FROM t1; -SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { serverError 590 } +SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 590 } + +SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError 62 } +SELECT DISTINCT ON a a, b FROM t1; -- { clientError 62 } + +-- "Code: 47. DB::Exception: Missing columns: 'DISTINCT'" - error can be better +SELECT DISTINCT ON (a, b) DISTINCT a, b FROM t1; -- { serverError 47 } +SELECT DISTINCT DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } + +SELECT ALL DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } +SELECT DISTINCT ON (a, b) ALL a, b FROM t1; -- { clientError 62 } DROP TABLE IF EXISTS t1; From 3cef256f7905db0c797c5b93faa3806ef3f2ec94 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jul 2021 12:22:08 +0300 Subject: [PATCH 816/931] Errorcode DISTINCT_ON_AND_LIMIT_BY_TOGETHER -> UNSUPPORTED_METHOD --- src/Common/ErrorCodes.cpp | 1 - src/Parsers/ParserSelectQuery.cpp | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index e165e184cd3..f4ceef2896a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -557,7 +557,6 @@ M(587, CONCURRENT_ACCESS_NOT_SUPPORTED) \ M(588, DISTRIBUTED_BROKEN_BATCH_INFO) \ M(589, DISTRIBUTED_BROKEN_BATCH_FILES) \ - M(590, DISTINCT_ON_AND_LIMIT_BY_TOGETHER) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 8272a130422..2b7f6bcaaf9 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -17,12 +17,12 @@ namespace DB namespace ErrorCodes { - extern const int TOP_AND_LIMIT_TOGETHER; - extern const int WITH_TIES_WITHOUT_ORDER_BY; + extern const int FIRST_AND_NEXT_TOGETHER; extern const int LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED; extern const int ROW_AND_ROWS_TOGETHER; - extern const int FIRST_AND_NEXT_TOGETHER; - extern const int DISTINCT_ON_AND_LIMIT_BY_TOGETHER; + extern const int TOP_AND_LIMIT_TOGETHER; + extern const int UNSUPPORTED_METHOD; + extern const int WITH_TIES_WITHOUT_ORDER_BY; } @@ -286,7 +286,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) throw Exception("Can not use WITH TIES alongside LIMIT BY/DISTINCT ON", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); if (distinct_on_expression_list) - throw Exception("Can not use DISTINCT ON alongside LIMIT BY", ErrorCodes::DISTINCT_ON_AND_LIMIT_BY_TOGETHER); + throw Exception("Can not use DISTINCT ON alongside LIMIT BY", ErrorCodes::UNSUPPORTED_METHOD); limit_by_length = limit_length; limit_by_offset = limit_offset; From 481211692955d7c654fb6b6b61b8807d08a9feee Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 6 Jul 2021 13:08:09 +0300 Subject: [PATCH 817/931] FunctionSQLJSON ContextPtr build fix --- src/Functions/FunctionSQLJSON.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index 9e469c4ebac..497909b5242 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -144,8 +144,8 @@ template typename Impl> class FunctionSQLJSON : public IFunction, WithConstContext { public: - static FunctionPtr create(ContextConstPtr context_) { return std::make_shared(context_); } - FunctionSQLJSON(ContextConstPtr context_) : WithConstContext(context_) { } + static FunctionPtr create(ContextPtr context_) { return std::make_shared(context_); } + explicit FunctionSQLJSON(ContextPtr context_) : WithConstContext(context_) { } static constexpr auto name = Name::name; String getName() const override { return Name::name; } From cd89138d3eb82458a63b0452991022b2599e70ef Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 6 Jul 2021 12:36:44 +0300 Subject: [PATCH 818/931] FunctionsLogical const result for non const arguments fix --- src/Functions/FunctionsLogical.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 3806ee7511c..f427c9a9440 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -575,12 +575,12 @@ ColumnPtr FunctionAnyArityLogical::getConstantResultForNonConstArgum if constexpr (std::is_same_v) { if (has_false_constant) - result_type->createColumnConst(0, static_cast(false)); + result_column = result_type->createColumnConst(0, static_cast(false)); } else if constexpr (std::is_same_v) { if (has_true_constant) - result_type->createColumnConst(0, static_cast(true)); + result_column = result_type->createColumnConst(0, static_cast(true)); } return result_column; From d32d8ceff65980d9d73fe93fb211a6a40eb61e73 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 6 Jul 2021 06:58:16 -0400 Subject: [PATCH 819/931] Changing TestFlows output to use classic mode instead of just new fails. --- docker/test/testflows/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index 9fa028fedca..c20e742fea1 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -73,4 +73,4 @@ RUN set -x \ VOLUME /var/lib/docker EXPOSE 2375 ENTRYPOINT ["dockerd-entrypoint.sh"] -CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] +CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] From 53b23775a9867793a3ea9864a82a9d1b9b32327a Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 13:58:53 +0300 Subject: [PATCH 820/931] Fix drop part --- src/Storages/MergeTree/DropPartsRanges.cpp | 9 +++------ src/Storages/MergeTree/DropPartsRanges.h | 17 +++++++++++++---- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 11 +++++++---- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/Storages/MergeTree/DropPartsRanges.cpp b/src/Storages/MergeTree/DropPartsRanges.cpp index 0dfcc18ea3e..583f91b1e9d 100644 --- a/src/Storages/MergeTree/DropPartsRanges.cpp +++ b/src/Storages/MergeTree/DropPartsRanges.cpp @@ -1,5 +1,4 @@ #include -#include namespace DB { @@ -20,7 +19,7 @@ bool DropPartsRanges::isAffectedByDropRange(const std::string & new_part_name, s { if (!drop_range.isDisjoint(entry_info)) { - postpone_reason = fmt::format("Has DROP RANGE with entry. Will postpone it's execution.", drop_range.getPartName()); + postpone_reason = fmt::format("Has DROP RANGE affecting entry {} producing part {}. Will postpone it's execution.", drop_range.getPartName(), new_part_name); return true; } } @@ -33,22 +32,20 @@ bool DropPartsRanges::isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & return isAffectedByDropRange(entry.new_part_name, postpone_reason); } -void DropPartsRanges::addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * /*log*/) +void DropPartsRanges::addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry) { if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to add entry of type {} to drop ranges, expected DROP_RANGE", entry->typeToString()); - //LOG_DEBUG(log, "ADD DROP RANGE {}", *entry->getDropRange(format_version)); MergeTreePartInfo entry_info = MergeTreePartInfo::fromPartName(*entry->getDropRange(format_version), format_version); drop_ranges.emplace(entry->znode_name, entry_info); } -void DropPartsRanges::removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * /*log*/) +void DropPartsRanges::removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry) { if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to remove entry of type {} from drop ranges, expected DROP_RANGE", entry->typeToString()); - //LOG_DEBUG(log, "REMOVE DROP RANGE {}", *entry->getDropRange(format_version)); drop_ranges.erase(entry->znode_name); } diff --git a/src/Storages/MergeTree/DropPartsRanges.h b/src/Storages/MergeTree/DropPartsRanges.h index fea6ce3be4e..4d512263058 100644 --- a/src/Storages/MergeTree/DropPartsRanges.h +++ b/src/Storages/MergeTree/DropPartsRanges.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include @@ -8,26 +8,35 @@ namespace DB { +/// All drop ranges in ReplicatedQueue. +/// Used to postpone execution of entries affected by DROP RANGE class DropPartsRanges { private: MergeTreeDataFormatVersion format_version; - std::map drop_ranges; + /// znode_name -> drop_range + std::unordered_map drop_ranges; public: explicit DropPartsRanges(MergeTreeDataFormatVersion format_version_) : format_version(format_version_) {} + /// Entry is affected by DROP_RANGE and must be postponed bool isAffectedByDropRange(const ReplicatedMergeTreeLogEntry & entry, std::string & postpone_reason) const; + + /// Part is affected by DROP_RANGE and must be postponed bool isAffectedByDropRange(const std::string & new_part_name, std::string & postpone_reason) const; + /// Already has equal DROP_RANGE. Don't need to assign new one bool hasDropRange(const MergeTreePartInfo & new_drop_range_info) const; - void addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * log); + /// Add DROP_RANGE to map + void addDropRange(const ReplicatedMergeTreeLogEntryPtr & entry); - void removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry, Poco::Logger * log); + /// Remove DROP_RANGE from map + void removeDropRange(const ReplicatedMergeTreeLogEntryPtr & entry); }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index f607a559564..b8d36488335 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -169,9 +169,10 @@ void ReplicatedMergeTreeQueue::insertUnlocked( } else { - drop_ranges.addDropRange(entry, log); + drop_ranges.addDropRange(entry); auto drop_range = *entry->getDropRange(format_version); - /// DROP PARTS removes parts from virtual parts + + /// DROP PARTS (not DROP PARTITIONS) removes parts from virtual parts. MergeTreePartInfo drop_range_info = MergeTreePartInfo::fromPartName(drop_range, format_version); if (!drop_range_info.isFakeDropRangePart() && virtual_parts.getContainingPart(drop_range_info) == drop_range) virtual_parts.removePartAndCoveredParts(drop_range); @@ -271,7 +272,7 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( if (entry->type == LogEntry::DROP_RANGE) { - drop_ranges.removeDropRange(entry, log); + drop_ranges.removeDropRange(entry); } if (entry->type == LogEntry::ALTER_METADATA) @@ -284,7 +285,7 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( { if (entry->type == LogEntry::DROP_RANGE) { - drop_ranges.removeDropRange(entry, log); + drop_ranges.removeDropRange(entry); } for (const String & virtual_part_name : entry->getVirtualPartNames(format_version)) @@ -996,6 +997,8 @@ bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & pa { std::lock_guard lock(state_mutex); + /// FIXME get rid of actual_part_name. + /// If new covering part jumps over DROP_RANGE we should execute drop range first if (drop_ranges.isAffectedByDropRange(part_name, reject_reason)) return false; From 8ab722a6aff23b84e4ebf78bb55cb4f2b957eab4 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 6 Jul 2021 07:06:22 -0400 Subject: [PATCH 821/931] Small test fixes. --- .../window_functions/tests/common.py | 4 ++ .../testflows/window_functions/tests/misc.py | 42 +++++++++---------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/tests/testflows/window_functions/tests/common.py b/tests/testflows/window_functions/tests/common.py index ef694b19a0e..3a6ac95bd9b 100644 --- a/tests/testflows/window_functions/tests/common.py +++ b/tests/testflows/window_functions/tests/common.py @@ -374,6 +374,10 @@ def create_table(self, name, statement, on_cluster=False): node = current().context.node try: with Given(f"I have a {name} table"): + if on_cluster: + node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster}") + else: + node.query(f"DROP TABLE IF EXISTS {name}") node.query(statement.format(name=name)) yield name finally: diff --git a/tests/testflows/window_functions/tests/misc.py b/tests/testflows/window_functions/tests/misc.py index 8251e751ed9..aca24edfe9c 100644 --- a/tests/testflows/window_functions/tests/misc.py +++ b/tests/testflows/window_functions/tests/misc.py @@ -10,16 +10,16 @@ def subquery_expr_preceding(self): expected = convert_output(""" sum | unique1 -----+--------- - 0 | 0 - 1 | 1 - 3 | 2 - 5 | 3 - 7 | 4 - 9 | 5 - 11 | 6 - 13 | 7 - 15 | 8 - 17 | 9 + 0 | 0 + 1 | 1 + 3 | 2 + 5 | 3 + 7 | 4 + 9 | 5 + 11 | 6 + 13 | 7 + 15 | 8 + 17 | 9 """) execute_query( @@ -272,7 +272,7 @@ def windows_with_same_partitioning_but_different_ordering(self): but different ordering. """ expected = convert_output(""" - first | last + first | last ------+----- 7 | 7 7 | 9 @@ -367,16 +367,16 @@ def in_view(self): expected = convert_output(""" number | sum_rows ---------+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 """) execute_query( From e9540f06214cec7dbc037e62db4862cc5885bb54 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 14:13:13 +0300 Subject: [PATCH 822/931] Remove debug logs --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index b8d36488335..856b5f1bf0c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1028,14 +1028,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( } if (entry.type != LogEntry::DROP_RANGE && drop_ranges.isAffectedByDropRange(entry, out_postpone_reason)) - { - //LOG_DEBUG(log, "POSTPONE ENTRY {} ({}) PRODUCING PART {} BECAUSE OF DROP RANGE {}", entry.znode_name, entry.typeToString(), entry.new_part_name); return false; - } - else - { - //LOG_DEBUG(log, "NO DROP RANGE FOUND FOR PART {} OF TYPE {}", entry.new_part_name, entry.typeToString()); - } /// Check that fetches pool is not overloaded if ((entry.type == LogEntry::GET_PART || entry.type == LogEntry::ATTACH_PART) From 869a41ffb9542ad994f658847883ed4589d931d0 Mon Sep 17 00:00:00 2001 From: dankondr Date: Mon, 25 Jan 2021 17:09:29 +0300 Subject: [PATCH 823/931] Add leftPadString() function --- src/Functions/leftPadString.cpp | 194 ++++++++++++++++++++++ src/Functions/registerFunctionsString.cpp | 4 + src/Functions/ya.make | 1 + 3 files changed, 199 insertions(+) create mode 100644 src/Functions/leftPadString.cpp diff --git a/src/Functions/leftPadString.cpp b/src/Functions/leftPadString.cpp new file mode 100644 index 00000000000..cdcfb46eb73 --- /dev/null +++ b/src/Functions/leftPadString.cpp @@ -0,0 +1,194 @@ +#include +#include +#include + +#include +#include +#include + +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + struct LeftPadStringImpl + { + static void vector( + const ColumnString::Chars & data, + const ColumnString::Offsets & offsets, + const size_t length, + const String & padstr, + ColumnString::Chars & res_data, + ColumnString::Offsets & res_offsets) + { + size_t size = offsets.size(); + res_data.resize((length + 1 /* zero terminator */) * size); + res_offsets.resize(size); + + const size_t padstr_size = padstr.size(); + + ColumnString::Offset prev_offset = 0; + ColumnString::Offset res_prev_offset = 0; + for (size_t i = 0; i < size; ++i) + { + size_t data_length = offsets[i] - prev_offset - 1 /* zero terminator */; + if (data_length < length) + { + for (size_t j = 0; j < length - data_length; ++j) + res_data[res_prev_offset + j] = padstr[j % padstr_size]; + memcpy(&res_data[res_prev_offset + length - data_length], &data[prev_offset], data_length); + } + else + { + memcpy(&res_data[res_prev_offset], &data[prev_offset], length); + } + res_data[res_prev_offset + length] = 0; + res_prev_offset += length + 1; + res_offsets[i] = res_prev_offset; + } + } + + static void vectorFixed( + const ColumnFixedString::Chars & data, + const size_t n, + const size_t length, + const String & padstr, + ColumnFixedString::Chars & res_data) + { + const size_t padstr_size = padstr.size(); + const size_t size = data.size() / n; + res_data.resize(length * size); + for (size_t i = 0; i < size; ++i) + { + if (length < n) + { + memcpy(&res_data[i * length], &data[i * n], length); + } + else + { + for (size_t j = 0; j < length - n; ++j) + res_data[i * length + j] = padstr[j % padstr_size]; + memcpy(&res_data[i * length + length - n], &data[i * n], n); + } + } + } + }; + + class FunctionLeftPadString : public IFunction + { + public: + static constexpr auto name = "leftPadString"; + static FunctionPtr create(const ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + bool isVariadic() const override { return true; } + size_t getNumberOfArguments() const override { return 0; } + + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + size_t number_of_arguments = arguments.size(); + + if (number_of_arguments != 2 && number_of_arguments != 3) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", + getName(), + toString(number_of_arguments)); + + if (!isStringOrFixedString(arguments[0])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); + + if (!isNativeNumber(arguments[1])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}", + arguments[1]->getName(), + getName()); + + if (number_of_arguments == 3 && !isStringOrFixedString(arguments[2])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of third argument of function {}", + arguments[2]->getName(), + getName()); + + return arguments[0]; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + { + const ColumnPtr str_column = arguments[0].column; + String padstr = " "; + if (arguments.size() == 3) + { + const ColumnConst * pad_column = checkAndGetColumnConst(arguments[2].column.get()); + if (!pad_column) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of third ('pad') argument of function {}. Must be constant string.", + arguments[2].column->getName(), + getName()); + + padstr = pad_column->getValue(); + } + + const ColumnConst * len_column = checkAndGetColumnConst(arguments[1].column.get()); + if (!len_column) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of second ('len') argument of function {}. Must be a positive integer.", + arguments[1].column->getName(), + getName()); + Int64 len = len_column->getInt(0); + if (len <= 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Illegal value {} of second ('len') argument of function {}. Must be a positive integer.", + arguments[1].column->getName(), + getName()); + + if (const ColumnString * strings = checkAndGetColumn(str_column.get())) + { + auto col_res = ColumnString::create(); + LeftPadStringImpl::vector( + strings->getChars(), strings->getOffsets(), len, padstr, col_res->getChars(), col_res->getOffsets()); + return col_res; + } + else if (const ColumnFixedString * strings_fixed = checkAndGetColumn(str_column.get())) + { + auto col_res = ColumnFixedString::create(len); + LeftPadStringImpl::vectorFixed(strings_fixed->getChars(), strings_fixed->getN(), len, padstr, col_res->getChars()); + return col_res; + } + else + { + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of first ('str') argument of function {}. Must be a string or fixed string.", + arguments[0].column->getName(), + getName()); + } + } + }; +} + +void registerFunctionLeftPadString(FunctionFactory & factory) +{ + factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerAlias("lpad", "leftPadString", FunctionFactory::CaseInsensitive); +} + +} diff --git a/src/Functions/registerFunctionsString.cpp b/src/Functions/registerFunctionsString.cpp index f6f95489f82..1c487981844 100644 --- a/src/Functions/registerFunctionsString.cpp +++ b/src/Functions/registerFunctionsString.cpp @@ -33,6 +33,9 @@ void registerFunctionRegexpQuoteMeta(FunctionFactory &); void registerFunctionNormalizeQuery(FunctionFactory &); void registerFunctionNormalizedQueryHash(FunctionFactory &); void registerFunctionCountMatches(FunctionFactory &); +void registerFunctionEncodeXMLComponent(FunctionFactory & factory); +void registerFunctionDecodeXMLComponent(FunctionFactory & factory); +void registerFunctionLeftPadString(FunctionFactory & factory); void registerFunctionEncodeXMLComponent(FunctionFactory &); void registerFunctionDecodeXMLComponent(FunctionFactory &); void registerFunctionExtractTextFromHTML(FunctionFactory &); @@ -74,6 +77,7 @@ void registerFunctionsString(FunctionFactory & factory) registerFunctionCountMatches(factory); registerFunctionEncodeXMLComponent(factory); registerFunctionDecodeXMLComponent(factory); + registerFunctionLeftPadString(factory); registerFunctionExtractTextFromHTML(factory); #if USE_BASE64 registerFunctionBase64Encode(factory); diff --git a/src/Functions/ya.make b/src/Functions/ya.make index d6da7eadd35..ba14e9a3e02 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -332,6 +332,7 @@ SRCS( jumpConsistentHash.cpp lcm.cpp least.cpp + leftPadString.cpp lengthUTF8.cpp less.cpp lessOrEquals.cpp From 75e26b93d066b424eababe08d29eda9f0e95edec Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 15:05:18 +0300 Subject: [PATCH 824/931] Review bug fixes --- src/DataStreams/TTLAggregationAlgorithm.cpp | 2 +- src/DataStreams/TTLColumnAlgorithm.cpp | 3 ++- src/DataStreams/TTLDeleteAlgorithm.cpp | 3 ++- src/Storages/MergeTree/TTLMergeSelector.cpp | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/DataStreams/TTLAggregationAlgorithm.cpp b/src/DataStreams/TTLAggregationAlgorithm.cpp index 12d28ff4aea..287ecb7dd6e 100644 --- a/src/DataStreams/TTLAggregationAlgorithm.cpp +++ b/src/DataStreams/TTLAggregationAlgorithm.cpp @@ -37,7 +37,7 @@ TTLAggregationAlgorithm::TTLAggregationAlgorithm( aggregator = std::make_unique(params); - if (isMinTTLExpired()) + if (isMaxTTLExpired()) new_ttl_info.finished = true; } diff --git a/src/DataStreams/TTLColumnAlgorithm.cpp b/src/DataStreams/TTLColumnAlgorithm.cpp index 5c0a5e1ae83..1318ea382db 100644 --- a/src/DataStreams/TTLColumnAlgorithm.cpp +++ b/src/DataStreams/TTLColumnAlgorithm.cpp @@ -21,7 +21,8 @@ TTLColumnAlgorithm::TTLColumnAlgorithm( new_ttl_info = old_ttl_info; is_fully_empty = false; } - else + + if (isMaxTTLExpired()) new_ttl_info.finished = true; } diff --git a/src/DataStreams/TTLDeleteAlgorithm.cpp b/src/DataStreams/TTLDeleteAlgorithm.cpp index f1bbe6d4b7d..ea7a0b235ec 100644 --- a/src/DataStreams/TTLDeleteAlgorithm.cpp +++ b/src/DataStreams/TTLDeleteAlgorithm.cpp @@ -9,7 +9,8 @@ TTLDeleteAlgorithm::TTLDeleteAlgorithm( { if (!isMinTTLExpired()) new_ttl_info = old_ttl_info; - else + + if (isMaxTTLExpired()) new_ttl_info.finished = true; } diff --git a/src/Storages/MergeTree/TTLMergeSelector.cpp b/src/Storages/MergeTree/TTLMergeSelector.cpp index ab686c9952d..6a42ce039ac 100644 --- a/src/Storages/MergeTree/TTLMergeSelector.cpp +++ b/src/Storages/MergeTree/TTLMergeSelector.cpp @@ -111,8 +111,9 @@ bool TTLDeleteMergeSelector::isTTLAlreadySatisfied(const IMergeSelector::Part & if (only_drop_parts) return false; + /// All TTL satisfied if (!part.ttl_infos->hasAnyNonFinishedTTLs()) - return false; + return true; return !part.shall_participate_in_merges; } From c6e13e6e2e52e643ad7791088542259a9e15e0c7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jul 2021 15:18:47 +0300 Subject: [PATCH 825/931] Add leading zeros in function bin --- src/Common/hex.h | 15 ------------- src/Functions/FunctionsCoding.h | 22 ++++--------------- .../0_stateless/01926_bin_unbin.reference | 8 +++---- 3 files changed, 8 insertions(+), 37 deletions(-) diff --git a/src/Common/hex.h b/src/Common/hex.h index 69bc6f4f79f..82eff776244 100644 --- a/src/Common/hex.h +++ b/src/Common/hex.h @@ -1,6 +1,5 @@ #pragma once #include -#include /// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly. @@ -47,20 +46,6 @@ inline void writeBinByte(UInt8 byte, void * out) memcpy(out, &bin_byte_to_char_table[static_cast(byte) * 8], 8); } -inline size_t writeBinByteNoLeadZeros(UInt8 byte, char * out) -{ - if (byte == 0) - return 0; - - int clz = std::countl_zero(byte); - for (Int8 offset = sizeof(UInt8) * 8 - clz - 1; offset >= 0; --offset) - { - *out = ((byte >> offset) & 1) ? '1' : '0'; - ++out; - } - return sizeof(UInt8) * 8 - clz; -} - /// Produces hex representation of an unsigned int with leading zeros (for checksums) template inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index 33b26afc8dc..72f2aa1be1c 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -1260,7 +1260,7 @@ struct HexImpl { UInt8 byte = x >> offset; - /// Leading zeros. + /// Skip leading zeros if (byte == 0 && !was_nonzero && offset) continue; @@ -1349,26 +1349,12 @@ struct BinImpl UInt8 byte = x >> offset; /// Skip leading zeros - if (byte == 0 && !was_nonzero) + if (byte == 0 && !was_nonzero && offset) continue; - /// First non-zero byte without leading zeros - if (was_nonzero) - { - writeBinByte(byte, out); - out += word_size; - } - else - { - size_t written = writeBinByteNoLeadZeros(byte, out); - out += written; - } was_nonzero = true; - } - if (!was_nonzero) - { - *out = '0'; - ++out; + writeBinByte(byte, out); + out += word_size; } *out = '\0'; ++out; diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index ace28af5211..96104d0e86f 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -1,8 +1,8 @@ -0 -1 -1010 -1111111 +00000000 +00000001 +00001010 +01111111 11111111 00110000 0011000100110000 From a50a98c595e978c6e3c15e64609501f7ef030cf8 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jul 2021 15:24:30 +0300 Subject: [PATCH 826/931] Add copuple cases to test bin_unbin --- tests/queries/0_stateless/01926_bin_unbin.reference | 3 +++ tests/queries/0_stateless/01926_bin_unbin.sql | 3 +++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index 96104d0e86f..f84a858e449 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -4,6 +4,9 @@ 00001010 01111111 11111111 +0000000100000000 +0000000111111111 +0000001000000000 00110000 0011000100110000 111001101011010110001011111010001010111110010101 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql index 3593448d407..555770d09c6 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.sql +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -4,6 +4,9 @@ select bin(1); select bin(10); select bin(127); select bin(255); +select bin(256); +select bin(511); +select bin(512); select bin('0'); select bin('10'); select bin('测试'); From 96536a9cbebe06522e2bd332003f4a59009d7aca Mon Sep 17 00:00:00 2001 From: Vladimir Date: Tue, 6 Jul 2021 15:32:28 +0300 Subject: [PATCH 827/931] Update tests/queries/0_stateless/01917_distinct_on.sql --- tests/queries/0_stateless/01917_distinct_on.sql | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01917_distinct_on.sql b/tests/queries/0_stateless/01917_distinct_on.sql index b7875719c92..e394f219b62 100644 --- a/tests/queries/0_stateless/01917_distinct_on.sql +++ b/tests/queries/0_stateless/01917_distinct_on.sql @@ -7,7 +7,7 @@ SELECT DISTINCT ON (a, b) a, b, c FROM t1; SELECT DISTINCT ON (a, b) * FROM t1; SELECT DISTINCT ON (a) * FROM t1; -SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 590 } +SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 1 } SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError 62 } SELECT DISTINCT ON a a, b FROM t1; -- { clientError 62 } @@ -20,4 +20,3 @@ SELECT ALL DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } SELECT DISTINCT ON (a, b) ALL a, b FROM t1; -- { clientError 62 } DROP TABLE IF EXISTS t1; - From 5b0bc8a7fbf846cb45012e8009d58341656bf12c Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Jul 2021 16:16:20 +0300 Subject: [PATCH 828/931] Update arcadia_skip_list.txt --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 903c72f044a..838a2da9aff 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -253,3 +253,4 @@ 01923_different_expression_name_alias 01932_null_valid_identifier 00918_json_functions +01889_sql_json_functions From 3a69d06fc9444701cc7b50eff12efe0043b6804b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Jul 2021 16:36:18 +0300 Subject: [PATCH 829/931] try fix flaky tests --- tests/clickhouse-test | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 63624246190..4de3dc7c2bf 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -41,6 +41,7 @@ MESSAGES_TO_RETRY = [ "Operation timed out", "ConnectionPoolWithFailover: Connection failed at try", "DB::Exception: New table appeared in database being dropped or detached. Try again", + "is already started to be removing by another replica right now", DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME ] From 1f53404e668e46b50874236f76b426e42d38b872 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Jul 2021 16:58:12 +0300 Subject: [PATCH 830/931] better retries --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 4de3dc7c2bf..73bd19f8174 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -29,7 +29,7 @@ import string import multiprocessing from contextlib import closing -DISTRIBUTED_DDL_TIMEOUT_MSG = "is executing longer than distributed_ddl_task_timeout (=120)" +DISTRIBUTED_DDL_TIMEOUT_MSG = "is executing longer than distributed_ddl_task_timeout" MESSAGES_TO_RETRY = [ "DB::Exception: ZooKeeper session has been expired", From 9b3ceda57c74f34316673dcbb37abd8b608d1302 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Tue, 6 Jul 2021 17:10:29 +0100 Subject: [PATCH 831/931] Increment ZooKeeperWatch metric only if the callback is registered --- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index a717052a1ba..a7e3a73ddd0 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -566,7 +566,6 @@ void ZooKeeper::sendThread() if (info.watch) { info.request->has_watch = true; - CurrentMetrics::add(CurrentMetrics::ZooKeeperWatch); } if (expired) @@ -773,6 +772,8 @@ void ZooKeeper::receiveEvent() if (add_watch) { + CurrentMetrics::add(CurrentMetrics::ZooKeeperWatch); + /// The key of wathces should exclude the root_path String req_path = request_info.request->getPath(); removeRootPath(req_path, root_path); From 24f5ad8920104a7ca93f3f85e80704b26aaf8688 Mon Sep 17 00:00:00 2001 From: Nicolae Vartolomei Date: Tue, 6 Jul 2021 17:13:13 +0100 Subject: [PATCH 832/931] Subtract number of watch callbacks as this is what we actually count --- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index a7e3a73ddd0..37cc1dddce2 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -906,6 +906,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) { std::lock_guard lock(watches_mutex); + Int64 watch_callback_count = 0; for (auto & path_watches : watches) { WatchResponse response; @@ -915,6 +916,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) for (auto & callback : path_watches.second) { + watch_callback_count += 1; if (callback) { try @@ -929,7 +931,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) } } - CurrentMetrics::sub(CurrentMetrics::ZooKeeperWatch, watches.size()); + CurrentMetrics::sub(CurrentMetrics::ZooKeeperWatch, watch_callback_count); watches.clear(); } From b44bd174cc40ba1fdd8e4e185a5e383621529687 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Jul 2021 19:14:22 +0300 Subject: [PATCH 833/931] Change error code for DISTINCT ON and LIMIT BY, finally --- src/Parsers/ParserSelectQuery.cpp | 4 ++-- tests/queries/0_stateless/01917_distinct_on.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 2b7f6bcaaf9..255595caa0e 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -20,8 +20,8 @@ namespace ErrorCodes extern const int FIRST_AND_NEXT_TOGETHER; extern const int LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED; extern const int ROW_AND_ROWS_TOGETHER; + extern const int SYNTAX_ERROR; extern const int TOP_AND_LIMIT_TOGETHER; - extern const int UNSUPPORTED_METHOD; extern const int WITH_TIES_WITHOUT_ORDER_BY; } @@ -286,7 +286,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) throw Exception("Can not use WITH TIES alongside LIMIT BY/DISTINCT ON", ErrorCodes::LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED); if (distinct_on_expression_list) - throw Exception("Can not use DISTINCT ON alongside LIMIT BY", ErrorCodes::UNSUPPORTED_METHOD); + throw Exception("Can not use DISTINCT ON alongside LIMIT BY", ErrorCodes::SYNTAX_ERROR); limit_by_length = limit_length; limit_by_offset = limit_offset; diff --git a/tests/queries/0_stateless/01917_distinct_on.sql b/tests/queries/0_stateless/01917_distinct_on.sql index e394f219b62..75dd8c0b7b8 100644 --- a/tests/queries/0_stateless/01917_distinct_on.sql +++ b/tests/queries/0_stateless/01917_distinct_on.sql @@ -7,7 +7,7 @@ SELECT DISTINCT ON (a, b) a, b, c FROM t1; SELECT DISTINCT ON (a, b) * FROM t1; SELECT DISTINCT ON (a) * FROM t1; -SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 1 } +SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 62 } SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError 62 } SELECT DISTINCT ON a a, b FROM t1; -- { clientError 62 } From a8fdc41193e07e1f17bdf5172bcbf93165aac81a Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 19:51:23 +0300 Subject: [PATCH 834/931] Fix bug and add more trash to test --- src/Storages/MergeTree/DropPartsRanges.cpp | 4 +++- .../MergeTree/ReplicatedMergeTreeLogEntry.cpp | 10 ++++++++++ .../MergeTree/ReplicatedMergeTreeLogEntry.h | 4 ++++ .../MergeTree/ReplicatedMergeTreeQueue.cpp | 19 +++++++++++++------ .../0_stateless/01154_move_partition_long.sh | 13 +++++++++++++ 5 files changed, 43 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/DropPartsRanges.cpp b/src/Storages/MergeTree/DropPartsRanges.cpp index 583f91b1e9d..ab808f59970 100644 --- a/src/Storages/MergeTree/DropPartsRanges.cpp +++ b/src/Storages/MergeTree/DropPartsRanges.cpp @@ -46,7 +46,9 @@ void DropPartsRanges::removeDropRange(const ReplicatedMergeTreeLogEntryPtr & ent if (entry->type != ReplicatedMergeTreeLogEntry::DROP_RANGE) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to remove entry of type {} from drop ranges, expected DROP_RANGE", entry->typeToString()); - drop_ranges.erase(entry->znode_name); + auto it = drop_ranges.find(entry->znode_name); + assert(it != drop_ranges.end()); + drop_ranges.erase(it); } bool DropPartsRanges::hasDropRange(const MergeTreePartInfo & new_drop_range_info) const diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp index d326ad10370..18e90952721 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp @@ -431,6 +431,16 @@ std::optional ReplicatedMergeTreeLogEntryData::getDropRange(MergeTreeDat return {}; } +bool ReplicatedMergeTreeLogEntryData::isDropPart(MergeTreeDataFormatVersion format_version) const +{ + if (type == DROP_RANGE) + { + auto drop_range_info = MergeTreePartInfo::fromPartName(new_part_name, format_version); + return !drop_range_info.isFakeDropRangePart(); + } + return false; +} + Strings ReplicatedMergeTreeLogEntryData::getVirtualPartNames(MergeTreeDataFormatVersion format_version) const { /// Doesn't produce any part diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h index eb82572b107..3752c9deb8f 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h @@ -143,6 +143,10 @@ struct ReplicatedMergeTreeLogEntryData /// Returns fake part for drop range (for DROP_RANGE and REPLACE_RANGE) std::optional getDropRange(MergeTreeDataFormatVersion format_version) const; + /// This entry is DROP PART, not DROP PARTITION. They both have same + /// DROP_RANGE entry type, but differs in information about drop range. + bool isDropPart(MergeTreeDataFormatVersion format_version) const; + /// Access under queue_mutex, see ReplicatedMergeTreeQueue. bool currently_executing = false; /// Whether the action is executing now. bool removed_by_other_entry = false; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 856b5f1bf0c..aaa76009d74 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -170,12 +170,11 @@ void ReplicatedMergeTreeQueue::insertUnlocked( else { drop_ranges.addDropRange(entry); - auto drop_range = *entry->getDropRange(format_version); - /// DROP PARTS (not DROP PARTITIONS) removes parts from virtual parts. - MergeTreePartInfo drop_range_info = MergeTreePartInfo::fromPartName(drop_range, format_version); - if (!drop_range_info.isFakeDropRangePart() && virtual_parts.getContainingPart(drop_range_info) == drop_range) - virtual_parts.removePartAndCoveredParts(drop_range); + /// DROP PART remove parts, so we remove it from virtual parts to + /// preserve invariant virtual_parts = current_parts + queue + if (entry->isDropPart(format_version)) + virtual_parts.removePartAndCoveredParts(*entry->getDropRange(format_version)); queue.push_front(entry); } @@ -266,7 +265,15 @@ void ReplicatedMergeTreeQueue::updateStateOnQueueEntryRemoval( if (auto drop_range_part_name = entry->getDropRange(format_version)) { - current_parts.remove(*drop_range_part_name); + MergeTreePartInfo drop_range_info = MergeTreePartInfo::fromPartName(*drop_range_part_name, format_version); + + /// DROP PART doesn't have virtual parts so remove from current + /// parts all covered parts. + if (entry->isDropPart(format_version)) + current_parts.removePartAndCoveredParts(*drop_range_part_name); + else + current_parts.remove(*drop_range_part_name); + virtual_parts.remove(*drop_range_part_name); } diff --git a/tests/queries/0_stateless/01154_move_partition_long.sh b/tests/queries/0_stateless/01154_move_partition_long.sh index 1ce40770e46..b57f94b66eb 100755 --- a/tests/queries/0_stateless/01154_move_partition_long.sh +++ b/tests/queries/0_stateless/01154_move_partition_long.sh @@ -72,6 +72,7 @@ function drop_partition_thread() done } + function optimize_thread() { while true; do @@ -85,12 +86,23 @@ function optimize_thread() done } +function drop_part_thread() +{ + while true; do + REPLICA=$(($RANDOM % 16)) + part=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.parts WHERE active AND database='$CLICKHOUSE_DATABASE' and table='dst_$REPLICA' ORDER BY rand() LIMIT 1") + $CLICKHOUSE_CLIENT -q "ALTER TABLE dst_$REPLICA DROP PART '$part'" 2>/dev/null + sleep 0.$RANDOM; + done +} + #export -f create_drop_thread; export -f insert_thread; export -f move_partition_src_dst_thread; export -f replace_partition_src_src_thread; export -f drop_partition_thread; export -f optimize_thread; +export -f drop_part_thread; TIMEOUT=60 @@ -102,6 +114,7 @@ timeout $TIMEOUT bash -c move_partition_src_dst_thread & timeout $TIMEOUT bash -c replace_partition_src_src_thread & timeout $TIMEOUT bash -c drop_partition_thread & timeout $TIMEOUT bash -c optimize_thread & +timeout $TIMEOUT bash -c drop_part_thread & wait for ((i=0; i<16; i++)) do From 1c39df068eb114b50be6b9608a3c6fca8ce1fbb7 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 19:52:54 +0300 Subject: [PATCH 835/931] Remove accident change --- tests/queries/0_stateless/01154_move_partition_long.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/01154_move_partition_long.sh b/tests/queries/0_stateless/01154_move_partition_long.sh index b57f94b66eb..dd16b2dc63d 100755 --- a/tests/queries/0_stateless/01154_move_partition_long.sh +++ b/tests/queries/0_stateless/01154_move_partition_long.sh @@ -72,7 +72,6 @@ function drop_partition_thread() done } - function optimize_thread() { while true; do From 3dee74df54ad3c1827510601c159abc650fc97c2 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 6 Jul 2021 19:53:54 +0300 Subject: [PATCH 836/931] Comment --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index f97ab74bd28..e49d80fc832 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -102,7 +102,7 @@ private: ActiveDataPartSet virtual_parts; - /// + /// Dropped ranges inserted into queue DropPartsRanges drop_ranges; /// A set of mutations loaded from ZooKeeper. From 717775d8c020a88b1cf0dfeda5de7bf245f2051d Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Tue, 6 Jul 2021 21:06:34 +0300 Subject: [PATCH 837/931] Links again --- docs/en/interfaces/http.md | 2 +- docs/ru/interfaces/http.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index f4237cc2eae..0f497f9af80 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -18,7 +18,7 @@ Ok. Web UI can be accessed here: `http://localhost:8123/play`. -![Web UI](../images/play.png#) +![Web UI](../images/play.png) In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 83a6a30a071..fcd9b949ad8 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -17,7 +17,7 @@ Ok. Веб-интерфейс доступен по адресу: `http://localhost:8123/play`. -![Веб-интерфейс](../images/play.png#) +![Веб-интерфейс](../images/play.png) В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). From c0798df656ee5ad919bb7a2ba7f35a0ce66717d4 Mon Sep 17 00:00:00 2001 From: olgarev <56617294+olgarev@users.noreply.github.com> Date: Tue, 6 Jul 2021 21:08:11 +0300 Subject: [PATCH 838/931] Apply suggestions from code review Co-authored-by: Anna <42538400+adevyatova@users.noreply.github.com> --- docs/ru/interfaces/http.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index fcd9b949ad8..895172ab319 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -19,7 +19,7 @@ Ok. ![Веб-интерфейс](../images/play.png) -В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен всегда возвращается «Ok.» (с переводом строки на конце). +В скриптах проверки доступности вы можете использовать `GET /ping` без параметров. Если сервер доступен, всегда возвращается «Ok.» (с переводом строки на конце). ``` bash $ curl 'http://localhost:8123/ping' @@ -29,8 +29,7 @@ Ok. Запрос отправляется в виде URL параметра с именем `query`. Или как тело запроса при использовании метода POST. Или начало запроса в URL параметре query, а продолжение POST-ом (зачем это нужно, будет объяснено ниже). Размер URL ограничен 16KB, это следует учитывать при отправке больших запросов. -В случае успеха вам вернётся код ответа 200 и результат обработки запроса в теле ответа. -В случае ошибки вам вернётся код ответа 500 и текст с описанием ошибки в теле ответа. +В случае успеха возвращается код ответа 200 и результат обработки запроса в теле ответа, в случае ошибки — код ответа 500 и текст с описанием ошибки в теле ответа. При использовании метода GET выставляется настройка readonly. То есть, для запросов, модифицирующих данные, можно использовать только метод POST. Сам запрос при этом можно отправлять как в теле POST запроса, так и в параметре URL. @@ -81,7 +80,7 @@ ECT 1 По умолчанию данные возвращаются в формате [TabSeparated](formats.md#tabseparated). -Можно попросить любой другой формат с помощью секции FORMAT запроса. +Можно указать любой другой формат с помощью секции FORMAT запроса. Кроме того, вы можете использовать параметр URL-адреса `default_format` или заголовок `X-ClickHouse-Format`, чтобы указать формат по умолчанию, отличный от `TabSeparated`. From e7a938f860bcecc9f2062f007636b1a27334f7ca Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 6 Jul 2021 17:31:20 -0400 Subject: [PATCH 839/931] Updating map_type tests due to JSON changes. Crossing out tests with (U)Int64 keys due to double quoting bug. --- tests/testflows/map_type/regression.py | 7 ++++ tests/testflows/map_type/tests/common.py | 20 ----------- tests/testflows/map_type/tests/feature.py | 41 +++++++++++------------ 3 files changed, 26 insertions(+), 42 deletions(-) diff --git a/tests/testflows/map_type/regression.py b/tests/testflows/map_type/regression.py index b239d91ccf9..9f9c2b2b261 100755 --- a/tests/testflows/map_type/regression.py +++ b/tests/testflows/map_type/regression.py @@ -89,6 +89,13 @@ xfails = { [(Fail, "LowCardinality(FixedString) as key not supported")], "tests/table map with value string/LowCardinality(String) for key and value": [(Fail, "LowCardinality(String) as key not supported")], + # JSON related + "tests/table map with duplicated keys/Map(Int64, String))": + [(Fail, "new bug due to JSON changes")], + "tests/table map with key integer/UInt64": + [(Fail, "new bug due to JSON changes")], + "tests/table map with value integer/UInt64": + [(Fail, "new bug due to JSON changes")] } xflags = { diff --git a/tests/testflows/map_type/tests/common.py b/tests/testflows/map_type/tests/common.py index a3a0d0ef0b1..6ce1b6ab8a6 100644 --- a/tests/testflows/map_type/tests/common.py +++ b/tests/testflows/map_type/tests/common.py @@ -12,26 +12,6 @@ def getuid(): testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" return testname + "_" + str(uuid.uuid1()).replace('-', '_') -@TestStep(Given) -def allow_experimental_map_type(self): - """Set allow_experimental_map_type = 1 - """ - setting = ("allow_experimental_map_type", 1) - default_query_settings = None - - try: - with By("adding allow_experimental_map_type to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) - default_query_settings.append(setting) - yield - finally: - with Finally("I remove allow_experimental_map_type from the default query settings"): - if default_query_settings: - try: - default_query_settings.pop(default_query_settings.index(setting)) - except ValueError: - pass - @TestStep(Given) def create_table(self, name, statement, on_cluster=False): """Create table. diff --git a/tests/testflows/map_type/tests/feature.py b/tests/testflows/map_type/tests/feature.py index 5fd48844825..5d7c900d591 100755 --- a/tests/testflows/map_type/tests/feature.py +++ b/tests/testflows/map_type/tests/feature.py @@ -254,19 +254,19 @@ def table_map_select_key_with_value_string(self, type, data, output): RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0") ) @Examples("type data output", [ - ("Map(Int8, Int8)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{1:127,2:0,3:-128}}', Name("Int8")), - ("Map(Int8, UInt8)", "('2020-01-01', map(1,0,2,255))", '{"d":"2020-01-01","m":{1:0,2:255}}', Name("UInt8")), - ("Map(Int8, Int16)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{1:32767,2:0,3:-32768}}', Name("Int16")), - ("Map(Int8, UInt16)", "('2020-01-01', map(1,0,2,65535))", '{"d":"2020-01-01","m":{1:0,2:65535}}', Name("UInt16")), - ("Map(Int8, Int32)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{1:2147483647,2:0,3:-2147483648}}', Name("Int32")), - ("Map(Int8, UInt32)", "('2020-01-01', map(1,0,2,4294967295))", '{"d":"2020-01-01","m":{1:0,2:4294967295}}', Name("UInt32")), + ("Map(Int8, Int8)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":127,"2":0,"3":-128}}', Name("Int8")), + ("Map(Int8, UInt8)", "('2020-01-01', map(1,0,2,255))", '{"d":"2020-01-01","m":{"1":0,"2":255}}', Name("UInt8")), + ("Map(Int8, Int16)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":32767,"2":0,"3":-32768}}', Name("Int16")), + ("Map(Int8, UInt16)", "('2020-01-01', map(1,0,2,65535))", '{"d":"2020-01-01","m":{"1":0,"2":65535}}', Name("UInt16")), + ("Map(Int8, Int32)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":2147483647,"2":0,"3":-2147483648}}', Name("Int32")), + ("Map(Int8, UInt32)", "('2020-01-01', map(1,0,2,4294967295))", '{"d":"2020-01-01","m":{"1":0,"2":4294967295}}', Name("UInt32")), ("Map(Int8, Int64)", "('2020-01-01', map(1,9223372036854775807,2,0,3,-9223372036854775808))", '{"d":"2020-01-01","m":{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}}', Name("Int64")), ("Map(Int8, UInt64)", "('2020-01-01', map(1,0,2,18446744073709551615))", '{"d":"2020-01-01","m":{1:"0",2:"18446744073709551615"}}', Name("UInt64")), ("Map(Int8, Int128)", "('2020-01-01', map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", '{"d":"2020-01-01","m":{1:"170141183460469231731687303715884105727",2:"0",3:"-170141183460469231731687303715884105728"}}', Name("Int128")), ("Map(Int8, Int256)", "('2020-01-01', map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", '{"d":"2020-01-01","m":{1:"57896044618658097711785492504343953926634992332820282019728792003956564819967",2:"0",3:"-57896044618658097711785492504343953926634992332820282019728792003956564819968"}}', Name("Int256")), ("Map(Int8, UInt256)", "('2020-01-01', map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", '{"d":"2020-01-01","m":{1:"0",2:"115792089237316195423570985008687907853269984665640564039457584007913129639935"}}', Name("UInt256")), - ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(1)))", '{"d":"2020-01-01","m":{1:1}}', Name("toNullable")), - ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(NULL)))", '{"d":"2020-01-01","m":{1:null}}', Name("toNullable(NULL)")), + ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(1)))", '{"d":"2020-01-01","m":{"1":1}}', Name("toNullable")), + ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(NULL)))", '{"d":"2020-01-01","m":{"1":null}}', Name("toNullable(NULL)")), ]) def table_map_with_value_integer(self, type, data, output): """Check what values we can insert into map type column with value integer. @@ -281,8 +281,8 @@ def table_map_with_value_integer(self, type, data, output): ("Map(String, Array(Int8))", "('2020-01-01', map('key',[]))", '{"d":"2020-01-01","m":{"key":[]}}', Name("empty array")), ("Map(String, Array(Int8))", "('2020-01-01', map('key',[1,2,3]))", '{"d":"2020-01-01","m":{"key":[1,2,3]}}', Name("non-empty array of ints")), ("Map(String, Array(String))", "('2020-01-01', map('key',['1','2','3']))", '{"d":"2020-01-01","m":{"key":["1","2","3"]}}', Name("non-empty array of strings")), - ("Map(String, Array(Map(Int8, Int8)))", "('2020-01-01', map('key',[map(1,2),map(2,3)]))", '{"d":"2020-01-01","m":{"key":[{1:2},{2:3}]}}', Name("non-empty array of maps")), - ("Map(String, Array(Map(Int8, Array(Map(Int8, Array(Int8))))))", "('2020-01-01', map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])]))", '{"d":"2020-01-01","m":{"key":[{1:[{1:[1]}]},{2:[{2:[3]}]}]}}', Name("non-empty array of maps of array of maps")), + ("Map(String, Array(Map(Int8, Int8)))", "('2020-01-01', map('key',[map(1,2),map(2,3)]))", '{"d":"2020-01-01","m":{"key":[{"1":2},{"2":3}]}}', Name("non-empty array of maps")), + ("Map(String, Array(Map(Int8, Array(Map(Int8, Array(Int8))))))", "('2020-01-01', map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])]))", '{"d":"2020-01-01","m":{"key":[{"1":[{"1":[1]}]},{"2":[{"2":[3]}]}]}}', Name("non-empty array of maps of array of maps")), ]) def table_map_with_value_array(self, type, data, output): """Check what values we can insert into map type column with value Array. @@ -294,12 +294,12 @@ def table_map_with_value_array(self, type, data, output): RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0") ) @Examples("type data output", [ - ("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{127:1,0:1,-128:1}}', Name("Int8")), - ("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"d":"2020-01-01","m":{0:1,255:1}}', Name("UInt8")), - ("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{32767:1,0:1,-32768:1}}', Name("Int16")), - ("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"d":"2020-01-01","m":{0:1,65535:1}}', Name("UInt16")), - ("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"d":"2020-01-01","m":{2147483647:1,0:1,-2147483648:1}}', Name("Int32")), - ("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"d":"2020-01-01","m":{0:1,4294967295:1}}', Name("UInt32")), + ("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"127":1,"0":1,"-128":1}}', Name("Int8")), + ("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"d":"2020-01-01","m":{"0":1,"255":1}}', Name("UInt8")), + ("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"32767":1,"0":1,"-32768":1}}', Name("Int16")), + ("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"d":"2020-01-01","m":{"0":1,"65535":1}}', Name("UInt16")), + ("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"d":"2020-01-01","m":{"2147483647":1,"0":1,"-2147483648":1}}', Name("Int32")), + ("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"d":"2020-01-01","m":{"0":1,"4294967295":1}}', Name("UInt32")), ("Map(Int64, Int8)", "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"d":"2020-01-01","m":{"9223372036854775807":1,"0":1,"-9223372036854775808":1}}', Name("Int64")), ("Map(UInt64, Int8)", "('2020-01-01', map(0,1,18446744073709551615,1))", '{"d":"2020-01-01","m":{"0":1,"18446744073709551615":1}}', Name("UInt64")), ("Map(Int128, Int8)", "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{"d":"2020-01-01","m":{170141183460469231731687303715884105727:1,0:1,"-170141183460469231731687303715884105728":1}}', Name("Int128")), @@ -716,7 +716,7 @@ def cast_tuple_of_two_arrays_to_map(self, tuple, type, exitcode, message): ) @Examples("tuple type exitcode message check_insert", [ ("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", - 0, '{"m":{1:"Ready",2:"Steady",3:"Go"}}', False, Name("int -> int")), + 0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> int")), ("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(String, String)", 0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> string")), ("((['1', '2', '3'], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", @@ -728,7 +728,7 @@ def cast_tuple_of_two_arrays_to_map(self, tuple, type, exitcode, message): ("(([[1]],['hello']))", "Map(String, String)", 53, 'DB::Exception: Type mismatch in IN or VALUES section', True, Name("array -> string")), ("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", - 0, '{"m":{1:2,3:4}}', False, Name("array of two tuples")), + 0, '{"m":{"1":2,"3":4}}', False, Name("array of two tuples")), ("(([1, 2], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", 53, "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", True, Name("unequal array sizes")), @@ -767,7 +767,7 @@ def cast_array_of_two_tuples_to_map(self, tuple, type, exitcode, message): RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid("1.0") ) @Examples("tuple type exitcode message check_insert", [ - ("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", 0, '{"m":{1:2,3:4}}', False, + ("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", 0, '{"m":{"1":2,"3":4}}', False, Name("array of two tuples")), ("(([(1,2),(3)]))", "Map(UInt8, UInt8)", 130, "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", True, @@ -1188,8 +1188,5 @@ def performance(self, len=10, rows=6000000): def feature(self, node="clickhouse1"): self.context.node = self.context.cluster.node(node) - with Given("I allow experimental map type"): - allow_experimental_map_type() - for scenario in loads(current_module(), Scenario): scenario() From ad4c069b4ebae636e777ccd39190add441203cb1 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 6 Jul 2021 17:50:12 -0400 Subject: [PATCH 840/931] Fixing syntax error. --- .../tests/array_tuple_map.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py index 550122c5b86..938beabfff4 100644 --- a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py +++ b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py @@ -334,10 +334,10 @@ def map_func(self, data_type, node=None): execute_query(f"SELECT * FROM {table_name}") with Scenario(f"mapAdd with {data_type}"): - sql = f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + sql = (f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") if data_type.startswith("Decimal"): node.query(sql, exitcode=43, message="Exception:") else: @@ -349,8 +349,10 @@ def map_func(self, data_type, node=None): table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') with When("I insert the output into a table"): - sql = (f"INSERT INTO {table_name} SELECT mapAdd(([{to_data_type(data_type,1)},{to_data_type(data_type,2)}]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + sql = (f"INSERT INTO {table_name} SELECT mapAdd((" + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," + f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") exitcode, message = 0, None @@ -361,7 +363,8 @@ def map_func(self, data_type, node=None): execute_query(f"""SELECT * FROM {table_name} ORDER BY a ASC""") with Scenario(f"mapSubtract with {data_type}"): - sql = (f"SELECT mapSubtract(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + sql = (f"SELECT mapSubtract((" + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") From 42a844546229e56c51a3ec986467ced52e4ed972 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Jul 2021 01:12:56 +0300 Subject: [PATCH 841/931] Fix constness of custom TLDs Before this patch the functions below returns incorrect type for consts, and hence optimize_skip_unused_shards does not work: - cutToFirstSignificantSubdomainCustom() - cutToFirstSignificantSubdomainCustomWithWWW() - firstSignificantSubdomainCustom() --- .../URL/FirstSignificantSubdomainCustomImpl.h | 28 ++++++++++++++++--- .../0_stateless/01601_custom_tld.reference | 6 ++++ .../queries/0_stateless/01601_custom_tld.sql | 8 ++++++ .../01940_custom_tld_sharding_key.reference | 1 + .../01940_custom_tld_sharding_key.sql | 2 ++ 5 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/01940_custom_tld_sharding_key.reference create mode 100644 tests/queries/0_stateless/01940_custom_tld_sharding_key.sql diff --git a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h index 4670d610725..70cf30a7384 100644 --- a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h +++ b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h @@ -60,14 +60,25 @@ public: return arguments[0].type; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override { const ColumnConst * column_tld_list_name = checkAndGetColumnConstStringOrFixedString(arguments[1].column.get()); FirstSignificantSubdomainCustomLookup tld_lookup(column_tld_list_name->getValue()); - /// FIXME: convertToFullColumnIfConst() is suboptimal - auto column = arguments[0].column->convertToFullColumnIfConst(); - if (const ColumnString * col = checkAndGetColumn(*column)) + auto column = arguments[0].column; + + if (const ColumnConst * const_col = checkAndGetColumnConst(column.get())) + { + const String & data = const_col->getValue(); + const String & res = scalar(tld_lookup, data); + + auto col_res = ColumnString::create(); + col_res->insert(res); + + auto col_const_res = ColumnConst::create(std::move(col_res), input_rows_count); + return col_const_res; + } + else if (const ColumnString * col = checkAndGetColumn(*column)) { auto col_res = ColumnString::create(); vector(tld_lookup, col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); @@ -107,6 +118,15 @@ public: prev_offset = offsets[i]; } } + + static String scalar(FirstSignificantSubdomainCustomLookup & tld_lookup, const String & data) + { + Pos start; + size_t length; + Extractor::execute(tld_lookup, &data[0], data.size(), start, length); + String output(start, length); + return output; + } }; } diff --git a/tests/queries/0_stateless/01601_custom_tld.reference b/tests/queries/0_stateless/01601_custom_tld.reference index e056505f273..04204ebf02a 100644 --- a/tests/queries/0_stateless/01601_custom_tld.reference +++ b/tests/queries/0_stateless/01601_custom_tld.reference @@ -22,3 +22,9 @@ foobar.com foobar.com foobar.com xx.blogspot.co.at +-- www +www.foo +foo +-- vector +xx.blogspot.co.at + diff --git a/tests/queries/0_stateless/01601_custom_tld.sql b/tests/queries/0_stateless/01601_custom_tld.sql index 688dd419858..ceb00d5ff19 100644 --- a/tests/queries/0_stateless/01601_custom_tld.sql +++ b/tests/queries/0_stateless/01601_custom_tld.sql @@ -29,3 +29,11 @@ select cutToFirstSignificantSubdomainCustom('http://foobar.com', 'public_suffix_ select cutToFirstSignificantSubdomainCustom('http://foobar.com/foo', 'public_suffix_list'); select cutToFirstSignificantSubdomainCustom('http://bar.foobar.com/foo', 'public_suffix_list'); select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at', 'public_suffix_list'); + +select '-- www'; +select cutToFirstSignificantSubdomainCustomWithWWW('http://www.foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom('http://www.foo', 'public_suffix_list'); + +select '-- vector'; +select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); +select cutToFirstSignificantSubdomainCustom('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); diff --git a/tests/queries/0_stateless/01940_custom_tld_sharding_key.reference b/tests/queries/0_stateless/01940_custom_tld_sharding_key.reference new file mode 100644 index 00000000000..0989a305613 --- /dev/null +++ b/tests/queries/0_stateless/01940_custom_tld_sharding_key.reference @@ -0,0 +1 @@ +foo.com diff --git a/tests/queries/0_stateless/01940_custom_tld_sharding_key.sql b/tests/queries/0_stateless/01940_custom_tld_sharding_key.sql new file mode 100644 index 00000000000..5d38cfb18dc --- /dev/null +++ b/tests/queries/0_stateless/01940_custom_tld_sharding_key.sql @@ -0,0 +1,2 @@ +select * from remote('127.{1,2}', view(select 'foo.com' key), cityHash64(key)) where key = cutToFirstSignificantSubdomainCustom('foo.com', 'public_suffix_list') settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; +select * from remote('127.{1,2}', view(select 'foo.com' key), cityHash64(key)) where key = cutToFirstSignificantSubdomainCustom('bar.com', 'public_suffix_list') settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; From b0a3a7180f19dda2e29cf09a1e2ff84ed5d530c5 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Tue, 6 Jul 2021 13:48:13 +0300 Subject: [PATCH 842/931] Replace print() with logging.debug() in integration tests --- .../test_replicated_mutations/test.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_replicated_mutations/test.py b/tests/integration/test_replicated_mutations/test.py index 40a2b15ffaf..12a49ec22d8 100644 --- a/tests/integration/test_replicated_mutations/test.py +++ b/tests/integration/test_replicated_mutations/test.py @@ -1,3 +1,4 @@ +import logging import random import threading import time @@ -90,7 +91,7 @@ class Runner: i += 1 try: - print('thread {}: insert for {}: {}'.format(thread_num, date_str, ','.join(str(x) for x in xs))) + logging.debug(f"thread {thread_num}: insert for {date_str}: {xs}") random.choice(self.nodes).query("INSERT INTO test_mutations FORMAT TSV", payload) with self.mtx: @@ -100,7 +101,7 @@ class Runner: self.total_inserted_rows += len(xs) except Exception as e: - print('Exception while inserting,', e) + logging.debug(f"Exception while inserting: {e}") self.exceptions.append(e) finally: with self.mtx: @@ -128,7 +129,7 @@ class Runner: continue try: - print('thread {}: delete {} * {}'.format(thread_num, to_delete_count, x)) + logging.debug(f"thread {thread_num}: delete {to_delete_count} * {x}") random.choice(self.nodes).query("ALTER TABLE test_mutations DELETE WHERE x = {}".format(x)) with self.mtx: @@ -138,7 +139,7 @@ class Runner: self.total_deleted_rows += to_delete_count except Exception as e: - print('Exception while deleting,', e) + logging.debug(f"Exception while deleting: {e}") finally: with self.mtx: self.currently_deleting_xs.remove(x) @@ -185,10 +186,9 @@ def test_mutations(started_cluster): assert runner.total_mutations > 0 all_done = wait_for_mutations(nodes, runner.total_mutations) - - print("Total mutations: ", runner.total_mutations) + logging.debug(f"Total mutations: {runner.total_mutations}") for node in nodes: - print(node.query( + logging.debug(node.query( "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames")) assert all_done @@ -233,9 +233,9 @@ def test_mutations_dont_prevent_merges(started_cluster, nodes): t.join() for node in nodes: - print(node.query( + logging.debug(node.query( "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames")) - print(node.query( + logging.debug(node.query( "SELECT partition, count(name), sum(active), sum(active*rows) FROM system.parts WHERE table ='test_mutations' GROUP BY partition FORMAT TSVWithNames")) assert all_done From 5e120b8d37a17c09232b490e3c438085e7c6ac8d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Jul 2021 09:42:13 +0300 Subject: [PATCH 843/931] Fix stack-buffer-overflow in custom TLDs due to StringHashTable copy 8 bytes at a time ASan reports [1]: ==164==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f0209dd4abf at pc 0x00000b75b7c5 bp 0x7f0209dd4760 sp 0x7f0209dd4758 READ of size 8 at 0x7f0209dd4abf thread T4 (TCPHandler) 0 0xb75b7c4 in auto StringHashTable > >::dispatch > > const, StringRef const&, StringHashTable > >::FindCallable>(StringHashTable > > const&, StringRef const&, StringHashTable > >::FindCallable&&) obj-x86_64-linux-gnu/../src/Common/HashTable/StringHashTable.h:283:21 1 0xb75b7c4 in StringHashTable > >::has(StringRef const&, unsigned long) const obj-x86_64-linux-gnu/../src/Common/HashTable/StringHashTable.h:365:16 2 0xb75b7c4 in DB::TLDList::has(StringRef const&) const obj-x86_64-linux-gnu/../src/Common/TLDListsHolder.cpp:31:26 3 0x1c4a6046 in void DB::ExtractFirstSignificantSubdomain::executeCustom(DB::FirstSignificantSubdomainCustomLookup const&, char const*, unsigned long, char const*&, unsigned long&, char const**) (/usr/bin/clickhouse+0x1c4a6046) 4 0x1c4a3586 in DB::FunctionCutToFirstSignificantSubdomainCustomImpl, DB::NameCutToFirstSignificantSubdomainCustom>::executeImpl(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long) const (/usr/bin/clickhouse+0x1c4a3586) 5 0x10d96e34 in DB::IFunction::executeImplDryRun(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long) const (/usr/bin/clickhouse+0x10d96e34) 6 0x10d9648b in DB::FunctionToExecutableFunctionAdaptor::executeDryRunImpl(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long) const (/usr/bin/clickhouse+0x10d9648b) 7 0x200ed79b in DB::IExecutableFunction::executeWithoutLowCardinalityColumns(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long, bool) const obj-x86_64-linux-gnu/../src/Functions/IFunction.cpp:212:15 8 0x200ee436 in DB::IExecutableFunction::execute(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long, bool) const obj-x86_64-linux-gnu/../src/Functions/IFunction.cpp:257:22 9 0x20cd6f6f in DB::ActionsDAG::addFunction(std::__1::shared_ptr const&, std::__1::vector >, std::__1::basic_string, std::__1::allocator >) obj-x86_64-linux-gnu/../src/Interpreters/ActionsDAG.cpp:214:37 10 0x2124c8a7 in DB::ScopeStack::addFunction(std::__1::shared_ptr const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&, std::__1::basic_string, std::__1::allocator >) obj-x86_64-linux-gnu/../src/Interpreters/ActionsVisitor.cpp:570:51 11 0x2125c80d in DB::ActionsMatcher::Data::addFunction(std::__1::shared_ptr const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&, std::__1::basic_string, std::__1::allocator >) obj-x86_64-linux-gnu/../src/Interpreters/ActionsVisitor.h:169:27 12 0x2125c80d in DB::ActionsMatcher::visit(DB::ASTFunction const&, std::__1::shared_ptr const&, DB::ActionsMatcher::Data&) obj-x86_64-linux-gnu/../src/Interpreters/ActionsVisitor.cpp:1061:14 13 0x212522fb in DB::ActionsMatcher::visit(DB::ASTFunction const&, std::__1::shared_ptr const&, DB::ActionsMatcher::Data&) obj-x86_64-linux-gnu/../src/Interpreters/ActionsVisitor.cpp:971:17 14 0x2121354e in DB::InDepthNodeVisitor const>::visit(std::__1::shared_ptr const&) obj-x86_64-linux-gnu/../src/Interpreters/InDepthNodeVisitor.h:34:13 15 0x211e17c7 in DB::ExpressionAnalyzer::getRootActions(std::__1::shared_ptr const&, bool, std::__1::shared_ptr&, bool) obj-x86_64-linux-gnu/../src/Interpreters/ExpressionAnalyzer.cpp:421:48 16 0x21204024 in DB::ExpressionAnalyzer::getConstActions(std::__1::vector > const&) obj-x86_64-linux-gnu/../src/Interpreters/ExpressionAnalyzer.cpp:1423:5 17 0x230f7216 in DB::KeyCondition::getBlockWithConstants(std::__1::shared_ptr const&, std::__1::shared_ptr const&, std::__1::shared_ptr) obj-x86_64-linux-gnu/../src/Storages/MergeTree/KeyCondition.cpp:385:103 18 0x22877f9e in DB::(anonymous namespace)::replaceConstantExpressions(std::__1::shared_ptr&, std::__1::shared_ptr, DB::NamesAndTypesList const&, std::__1::shared_ptr, std::__1::shared_ptr const&) obj-x86_64-linux-gnu/../src/Storages/StorageDistributed.cpp:280:34 19 0x22877f9e in DB::StorageDistributed::skipUnusedShards(std::__1::shared_ptr, std::__1::shared_ptr const&, std::__1::shared_ptr const&, std::__1::shared_ptr) const obj-x86_64-linux-gnu/../src/Storages/StorageDistributed.cpp:1091:5 20 0x2285d215 in DB::StorageDistributed::getOptimizedCluster(std::__1::shared_ptr, std::__1::shared_ptr const&, std::__1::shared_ptr const&) const obj-x86_64-linux-gnu/../src/Storages/StorageDistributed.cpp:1015:32 21 0x2285a9c4 in DB::StorageDistributed::getQueryProcessingStage(std::__1::shared_ptr, DB::QueryProcessingStage::Enum, std::__1::shared_ptr const&, DB::SelectQueryInfo&) const obj-x86_64-linux-gnu/../src/Storages/StorageDistributed.cpp:500:40 22 0x2183a4b2 in DB::InterpreterSelectQuery::getSampleBlockImpl() obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:616:31 23 0x21828db4 in DB::InterpreterSelectQuery::InterpreterSelectQuery(std::__1::shared_ptr const&, std::__1::shared_ptr, std::__1::shared_ptr const&, std::__1::optional, std::__1::shared_ptr const&, DB::SelectQueryOptions const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&, std::__1::shared_ptr const&)::$_1::operator()(bool) const obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:506:25 24 0x2181b652 in DB::InterpreterSelectQuery::InterpreterSelectQuery(std::__1::shared_ptr const&, std::__1::shared_ptr, std::__1::shared_ptr const&, std::__1::optional, std::__1::shared_ptr const&, DB::SelectQueryOptions const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&, std::__1::shared_ptr const&) obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:509:5 25 0x21817cbe in DB::InterpreterSelectQuery::InterpreterSelectQuery(std::__1::shared_ptr const&, std::__1::shared_ptr, DB::SelectQueryOptions const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectQuery.cpp:161:7 26 0x21dd0eb5 in std::__1::__unique_if::__unique_single std::__1::make_unique const&, std::__1::shared_ptr&, DB::SelectQueryOptions&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&>(std::__1::shared_ptr const&, std::__1::shared_ptr&, DB::SelectQueryOptions&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) obj-x86_64-linux-gnu/../contrib/libcxx/include/memory:2068:32 27 0x21dd0eb5 in DB::InterpreterSelectWithUnionQuery::buildCurrentChildInterpreter(std::__1::shared_ptr const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectWithUnionQuery.cpp:212:16 28 0x21dcd0e7 in DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(std::__1::shared_ptr const&, std::__1::shared_ptr, DB::SelectQueryOptions const&, std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) obj-x86_64-linux-gnu/../src/Interpreters/InterpreterSelectWithUnionQuery.cpp:134:13 29 0x211afe79 in std::__1::__unique_if::__unique_single std::__1::make_unique&, std::__1::shared_ptr&, DB::SelectQueryOptions const&>(std::__1::shared_ptr&, std::__1::shared_ptr&, DB::SelectQueryOptions const&) obj-x86_64-linux-gnu/../contrib/libcxx/include/memory:2068:32 30 0x211afe79 in DB::InterpreterFactory::get(std::__1::shared_ptr&, std::__1::shared_ptr, DB::SelectQueryOptions const&) obj-x86_64-linux-gnu/../src/Interpreters/InterpreterFactory.cpp:110:16 31 0x22273f97 in DB::executeQueryImpl(char const*, char const*, std::__1::shared_ptr, bool, DB::QueryProcessingStage::Enum, bool, DB::ReadBuffer*) obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:524:28 32 0x22270ce2 in DB::executeQuery(std::__1::basic_string, std::__1::allocator > const&, std::__1::shared_ptr, bool, DB::QueryProcessingStage::Enum, bool) obj-x86_64-linux-gnu/../src/Interpreters/executeQuery.cpp:913:30 33 0x23905879 in DB::TCPHandler::runImpl() obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:312:24 34 0x2392b81c in DB::TCPHandler::run() obj-x86_64-linux-gnu/../src/Server/TCPHandler.cpp:1622:9 35 0x2ab1fd8e in Poco::Net::TCPServerConnection::start() obj-x86_64-linux-gnu/../contrib/poco/Net/src/TCPServerConnection.cpp:43:3 36 0x2ab20952 in Poco::Net::TCPServerDispatcher::run() obj-x86_64-linux-gnu/../contrib/poco/Net/src/TCPServerDispatcher.cpp:115:20 37 0x2adfa3f4 in Poco::PooledThread::run() obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/ThreadPool.cpp:199:14 38 0x2adf4716 in Poco::ThreadImpl::runnableEntry(void*) obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/Thread_POSIX.cpp:345:27 39 0x7f02e66f2608 in start_thread (/lib/x86_64-linux-gnu/libpthread.so.0+0x9608) 40 0x7f02e6619292 in clone (/lib/x86_64-linux-gnu/libc.so.6+0x122292) Address 0x7f0209dd4abf is located in stack of thread T4 (TCPHandler) at offset 447 in frame 0 0x1c4a2c6f in DB::FunctionCutToFirstSignificantSubdomainCustomImpl, DB::NameCutToFirstSignificantSubdomainCustom>::executeImpl(std::__1::vector > const&, std::__1::shared_ptr const&, unsigned long) const (/usr/bin/clickhouse+0x1c4a2c6f) This frame has 16 object(s): [32, 40) 'ref.tmp.i168' [64, 72) 'tmp_data.i.i' [96, 104) 'tmp_length.i.i' [128, 136) 'domain_end.i.i' [160, 216) 'ref.tmp.i132' [256, 312) 'ref.tmp.i' [352, 360) 'tld_lookup' [384, 408) 'ref.tmp' [448, 472) 'ref.tmp11' <== Memory access at offset 447 partially underflows this variable [512, 536) 'ref.tmp14' [576, 632) 'ref.tmp20' [672, 696) 'ref.tmp65' [736, 760) 'ref.tmp66' [800, 824) 'ref.tmp67' [864, 888) 'ref.tmp68' [928, 952) 'ref.tmp78' HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork (longjmp and C++ exceptions *are* supported) Thread T4 (TCPHandler) created by T0 here: 0 0xb51940a in pthread_create (/usr/bin/clickhouse+0xb51940a) 1 0x2adf3a9f in Poco::ThreadImpl::startImpl(Poco::SharedPtr >) obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/Thread_POSIX.cpp:202:6 2 0x2adf699a in Poco::Thread::start(Poco::Runnable&) obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/Thread.cpp:128:2 3 0x2adfa998 in Poco::PooledThread::start() obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/ThreadPool.cpp:85:10 4 0x2adfa998 in Poco::ThreadPool::ThreadPool(int, int, int, int) obj-x86_64-linux-gnu/../contrib/poco/Foundation/src/ThreadPool.cpp:252:12 5 0xb582c25 in DB::Server::main(std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) obj-x86_64-linux-gnu/../programs/server/Server.cpp:915:22 6 0x2ab511a5 in Poco::Util::Application::run() obj-x86_64-linux-gnu/../contrib/poco/Util/src/Application.cpp:334:8 7 0xb56a89c in DB::Server::run() obj-x86_64-linux-gnu/../programs/server/Server.cpp:392:25 8 0x2ab956f7 in Poco::Util::ServerApplication::run(int, char**) obj-x86_64-linux-gnu/../contrib/poco/Util/src/ServerApplication.cpp:611:9 9 0xb566519 in mainEntryClickHouseServer(int, char**) obj-x86_64-linux-gnu/../programs/server/Server.cpp:171:20 10 0xb56224a in main obj-x86_64-linux-gnu/../programs/main.cpp:366:12 11 0x7f02e651e0b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2) SUMMARY: AddressSanitizer: stack-buffer-overflow obj-x86_64-linux-gnu/../src/Common/HashTable/StringHashTable.h:283:21 in auto StringHashTable > >::dispatch > > const, StringRef const&, StringHashTable > >::FindCallable>(StringHashTable > > const&, StringRef const&, StringHashTable > >::FindCallable&&) Shadow bytes around the buggy address: 0x0fe0c13b2900: 00 00 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00 0x0fe0c13b2910: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0x0fe0c13b2920: f1 f1 f1 f1 f8 f2 f2 f2 00 f2 f2 f2 00 f2 f2 f2 0x0fe0c13b2930: 00 f2 f2 f2 f8 f8 f8 f8 f8 f8 f8 f2 f2 f2 f2 f2 0x0fe0c13b2940: f8 f8 f8 f8 f8 f8 f8 f2 f2 f2 f2 f2 00 f2 f2 f2 =>0x0fe0c13b2950: f8 f8 f8 f2 f2 f2 f2[f2]00 00 00 f2 f2 f2 f2 f2 0x0fe0c13b2960: 00 00 00 f2 f2 f2 f2 f2 f8 f8 f8 f8 f8 f8 f8 f2 0x0fe0c13b2970: f2 f2 f2 f2 f8 f8 f8 f2 f2 f2 f2 f2 f8 f8 f8 f2 0x0fe0c13b2980: f2 f2 f2 f2 f8 f8 f8 f2 f2 f2 f2 f2 f8 f8 f8 f2 0x0fe0c13b2990: f2 f2 f2 f2 f8 f8 f8 f3 f3 f3 f3 f3 00 00 00 00 0x0fe0c13b29a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 Shadow byte legend (one shadow byte represents 8 application bytes): Addressable: 00 Partially addressable: 01 02 03 04 05 06 07 Heap left redzone: fa Freed heap region: fd Stack left redzone: f1 Stack mid redzone: f2 Stack right redzone: f3 Stack after return: f5 Stack use after scope: f8 Global redzone: f9 Global init order: f6 Poisoned by user: f7 Container overflow: fc Array cookie: ac Intra object redzone: bb ASan internal: fe Left alloca redzone: ca Right alloca redzone: cb Shadow gap: cc ==164==ABORTING [1]: https://clickhouse-test-reports.s3.yandex.net/26041/42a844546229e56c51a3ec986467ced52e4ed972/functional_stateless_tests_flaky_check_(address)/stderr.log v2: Replace String with string_view in custom TLD for scalar v3: use ColumnString::getDataAt() --- .../URL/FirstSignificantSubdomainCustomImpl.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h index 70cf30a7384..3aee0141073 100644 --- a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h +++ b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h @@ -69,11 +69,11 @@ public: if (const ColumnConst * const_col = checkAndGetColumnConst(column.get())) { - const String & data = const_col->getValue(); - const String & res = scalar(tld_lookup, data); + const ColumnString * col_str = checkAndGetColumn(const_col->getDataColumn()); + const std::string_view & sv = scalar(tld_lookup, col_str->getDataAt(0)); auto col_res = ColumnString::create(); - col_res->insert(res); + col_res->insert(sv); auto col_const_res = ColumnConst::create(std::move(col_res), input_rows_count); return col_const_res; @@ -119,13 +119,12 @@ public: } } - static String scalar(FirstSignificantSubdomainCustomLookup & tld_lookup, const String & data) + static std::string_view scalar(FirstSignificantSubdomainCustomLookup & tld_lookup, const StringRef & data) { Pos start; size_t length; - Extractor::execute(tld_lookup, &data[0], data.size(), start, length); - String output(start, length); - return output; + Extractor::execute(tld_lookup, &data.data[0], data.size, start, length); + return {start, length}; } }; From 5bc05337128f0295a7d2e7ce20bc17bb517a053f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Jul 2021 10:42:33 +0300 Subject: [PATCH 844/931] Add a note for padded to 8 bytes keys in StringHashTable --- src/Common/HashTable/StringHashTable.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Common/HashTable/StringHashTable.h b/src/Common/HashTable/StringHashTable.h index b05d119e0e9..d30271d65db 100644 --- a/src/Common/HashTable/StringHashTable.h +++ b/src/Common/HashTable/StringHashTable.h @@ -237,7 +237,12 @@ public: // 1. Always memcpy 8 times bytes // 2. Use switch case extension to generate fast dispatching table // 3. Funcs are named callables that can be force_inlined + // // NOTE: It relies on Little Endianness + // + // NOTE: It requires padded to 8 bytes keys (IOW you cannot pass + // std::string here, but you can pass i.e. ColumnString::getDataAt()), + // since it copies 8 bytes at a time. template static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) { From d87607b160e6757c57ff0f14a9c66a565ea55262 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Jul 2021 11:09:38 +0200 Subject: [PATCH 845/931] AsynchronousMetrics: Don't assume temperature is always positive --- src/Interpreters/AsynchronousMetrics.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 11ecf547714..5c49adf6fe7 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -1024,7 +1024,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti ReadBufferFromFile & in = *thermal[i]; in.rewind(); - uint64_t temperature = 0; + Int64 temperature = 0; readText(temperature, in); new_values[fmt::format("Temperature{}", i)] = temperature * 0.001; } @@ -1041,7 +1041,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti for (const auto & [sensor_name, sensor_file] : sensors) { sensor_file->rewind(); - uint64_t temperature = 0; + Int64 temperature = 0; readText(temperature, *sensor_file); if (sensor_name.empty()) From 35ca8b97ac6a4a1897968b48cd86fc1170221981 Mon Sep 17 00:00:00 2001 From: Vladimir Date: Wed, 7 Jul 2021 12:48:39 +0300 Subject: [PATCH 846/931] Set distinct_on_expression_list to null white transforming to limit_by --- src/Parsers/ParserSelectQuery.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 255595caa0e..b1f7570878f 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -367,6 +367,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) /// Transform `DISTINCT ON expr` to `LIMIT 1 BY expr` limit_by_expression_list = distinct_on_expression_list; limit_by_length = std::make_shared(Field{UInt8(1)}); + distinct_on_expression_list = nullptr; } /// Because TOP n in totally equals LIMIT n From a47a9ef39a9ed6c8e5e0c26f56cd3210ca901864 Mon Sep 17 00:00:00 2001 From: Victor Date: Wed, 7 Jul 2021 13:38:56 +0300 Subject: [PATCH 847/931] Update distinct.md --- docs/ru/sql-reference/statements/select/distinct.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/statements/select/distinct.md b/docs/ru/sql-reference/statements/select/distinct.md index 6616f421486..f57c2a42593 100644 --- a/docs/ru/sql-reference/statements/select/distinct.md +++ b/docs/ru/sql-reference/statements/select/distinct.md @@ -6,7 +6,7 @@ toc_title: DISTINCT Если указан `SELECT DISTINCT`, то в результате запроса останутся только уникальные строки. Таким образом, из всех наборов полностью совпадающих строк в результате останется только одна строка. -## Обработк NULL {#null-processing} +## Обработка NULL {#null-processing} `DISTINCT` работает с [NULL](../../syntax.md#null-literal) как-будто `NULL` — обычное значение и `NULL==NULL`. Другими словами, в результате `DISTINCT`, различные комбинации с `NULL` встретятся только один раз. Это отличается от обработки `NULL` в большинстве других контекстов. From a24686d300b67f2ec34060ba38a222aa959e509e Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 7 Jul 2021 15:04:07 +0300 Subject: [PATCH 848/931] fix serialization of type Map to JSON --- .../Serializations/SerializationMap.cpp | 45 ++++++++++--------- .../Serializations/SerializationMap.h | 4 +- .../0_stateless/01939_type_map_json.reference | 10 ++++- .../0_stateless/01939_type_map_json.sql | 17 +++++-- 4 files changed, 46 insertions(+), 30 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index fa882bef7ca..ff8bc518dc0 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -80,8 +80,13 @@ void SerializationMap::deserializeBinary(IColumn & column, ReadBuffer & istr) co } -template -void SerializationMap::serializeTextImpl(const IColumn & column, size_t row_num, bool quote_key, WriteBuffer & ostr, Writer && writer) const +template +void SerializationMap::serializeTextImpl( + const IColumn & column, + size_t row_num, + WriteBuffer & ostr, + KeyWriter && key_writer, + ValueWriter && value_writer) const { const auto & column_map = assert_cast(column); @@ -98,17 +103,9 @@ void SerializationMap::serializeTextImpl(const IColumn & column, size_t row_num, if (i != offset) writeChar(',', ostr); - if (quote_key) - { - writeChar('"', ostr); - writer(key, nested_tuple.getColumn(0), i); - writeChar('"', ostr); - } - else - writer(key, nested_tuple.getColumn(0), i); - + key_writer(key, nested_tuple.getColumn(0), i); writeChar(':', ostr); - writer(value, nested_tuple.getColumn(1), i); + value_writer(value, nested_tuple.getColumn(1), i); } writeChar('}', ostr); } @@ -170,11 +167,12 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr, void SerializationMap::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - serializeTextImpl(column, row_num, /*quote_key=*/ false, ostr, - [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) - { - subcolumn_serialization->serializeTextQuoted(subcolumn, pos, ostr, settings); - }); + auto writer = [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) + { + subcolumn_serialization->serializeTextQuoted(subcolumn, pos, ostr, settings); + }; + + serializeTextImpl(column, row_num, ostr, writer, writer); } void SerializationMap::deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -188,11 +186,14 @@ void SerializationMap::deserializeText(IColumn & column, ReadBuffer & istr, cons void SerializationMap::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - /// We need to double-quote integer keys to produce valid JSON. - const auto & column_key = assert_cast(column).getNestedData().getColumn(0); - bool quote_key = !WhichDataType(column_key.getDataType()).isStringOrFixedString(); - - serializeTextImpl(column, row_num, quote_key, ostr, + serializeTextImpl(column, row_num, ostr, + [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) + { + /// We need to double-quote all keys (including integers) to produce valid JSON. + WriteBufferFromOwnString str_buf; + subcolumn_serialization->serializeText(subcolumn, pos, str_buf, settings); + writeJSONString(str_buf.str(), ostr, settings); + }, [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) { subcolumn_serialization->serializeTextJSON(subcolumn, pos, ostr, settings); diff --git a/src/DataTypes/Serializations/SerializationMap.h b/src/DataTypes/Serializations/SerializationMap.h index bf68689f1e4..6523d5388d0 100644 --- a/src/DataTypes/Serializations/SerializationMap.h +++ b/src/DataTypes/Serializations/SerializationMap.h @@ -60,8 +60,8 @@ public: SubstreamsCache * cache) const override; private: - template - void serializeTextImpl(const IColumn & column, size_t row_num, bool quote_key, WriteBuffer & ostr, Writer && writer) const; + template + void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, KeyWriter && key_writer, ValueWriter && value_writer) const; template void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && reader) const; diff --git a/tests/queries/0_stateless/01939_type_map_json.reference b/tests/queries/0_stateless/01939_type_map_json.reference index 9b831c29608..4c19bc3c0dc 100644 --- a/tests/queries/0_stateless/01939_type_map_json.reference +++ b/tests/queries/0_stateless/01939_type_map_json.reference @@ -4,5 +4,11 @@ {'key1':1,'key2':2} {"key1":"1","key2":"2"} 1 {"m":{"key1":1,"key2":2}} {'key1':1,'key2':2} {"key1":1,"key2":2} 1 -{"m1":{"k1":"1","k2":"2"},"m2":{"1":2,"2":3}} -{"m1":{"k1":1,"k2":2},"m2":{"1":2,"2":3}} +{"m":{"2020-10-10":"v1","2020-10-11":"v2"}} +{'2020-10-10':'v1','2020-10-11':'v2'} {"2020-10-10":"v1","2020-10-11":"v2"} 1 +{"m":{"11":"v1","22":"v2"}} +{11:'v1',22:'v2'} {"11":"v1","22":"v2"} 1 +{"m":{"11":"v1","22":"v2"}} +{11:'v1',22:'v2'} {"11":"v1","22":"v2"} 1 +{"m1":{"k1":"1","k2":"2"},"m2":{"1":2,"2":3},"m3":{"2020-10-10":"foo"}} +{"m1":{"k1":1,"k2":2},"m2":{"1":2,"2":3},"m3":{"2020-10-10":"foo"}} diff --git a/tests/queries/0_stateless/01939_type_map_json.sql b/tests/queries/0_stateless/01939_type_map_json.sql index 4ad25f3c073..df782334c90 100644 --- a/tests/queries/0_stateless/01939_type_map_json.sql +++ b/tests/queries/0_stateless/01939_type_map_json.sql @@ -11,9 +11,18 @@ SELECT map('key1', number, 'key2', number * 2) AS m FROM numbers(1, 1) SELECT map('key1', number, 'key2', number * 2) AS m, toJSONString(m) AS s, isValidJSON(s) FROM numbers(1, 1) SETTINGS output_format_json_quote_64bit_integers = 0; -CREATE TEMPORARY TABLE map_json (m1 Map(String, UInt64), m2 Map(UInt32, UInt32)); +SELECT map('2020-10-10'::Date, 'v1', '2020-10-11'::Date, 'v2') AS m FORMAT JSONEachRow; +SELECT map('2020-10-10'::Date, 'v1', '2020-10-11'::Date, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); -INSERT INTO map_json FORMAT JSONEachRow {"m1" : {"k1" : 1, "k2" : 2}, "m2" : {"1" : 2, "2" : 3}}; +SELECT map(11::UInt64, 'v1', 22::UInt64, 'v2') AS m FORMAT JSONEachRow; +SELECT map(11::UInt64, 'v1', 22::UInt64, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); -SELECT m1, m2 FROM map_json FORMAT JSONEachRow; -SELECT m1, m2 FROM map_json FORMAT JSONEachRow SETTINGS output_format_json_quote_64bit_integers = 0; +SELECT map(11::Int128, 'v1', 22::Int128, 'v2') AS m FORMAT JSONEachRow; +SELECT map(11::Int128, 'v1', 22::Int128, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); + +CREATE TEMPORARY TABLE map_json (m1 Map(String, UInt64), m2 Map(UInt32, UInt32), m3 Map(Date, String)); + +INSERT INTO map_json FORMAT JSONEachRow {"m1" : {"k1" : 1, "k2" : 2}, "m2" : {"1" : 2, "2" : 3}, "m3" : {"2020-10-10" : "foo"}}; + +SELECT m1, m2, m3 FROM map_json FORMAT JSONEachRow; +SELECT m1, m2, m3 FROM map_json FORMAT JSONEachRow SETTINGS output_format_json_quote_64bit_integers = 0; From e361f3120f306335126a3753ea377d73c12d76fb Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Wed, 7 Jul 2021 08:34:46 -0400 Subject: [PATCH 849/931] * Fixing race condition between starting `tail -f` command and the file removal that follows. --- tests/testflows/ldap/authentication/tests/common.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index ec6a66c0257..e1615f3ee97 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -153,7 +153,10 @@ def add_config(config, timeout=300, restart=False, modify=False): with node.cluster.shell(node.name) as bash: bash.expect(bash.prompt) - bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + # make sure tail process is launched and started to follow the file + bash.expect("<==") + bash.expect("\n") with When("I add the config", description=config.path): command = f"cat < {config.path}\n{config.content}\nHEREDOC" @@ -170,7 +173,10 @@ def add_config(config, timeout=300, restart=False, modify=False): with Finally(f"I remove {config.name}"): with node.cluster.shell(node.name) as bash: bash.expect(bash.prompt) - bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + # make sure tail process is launched and started to follow the file + bash.expect("<==") + bash.expect("\n") with By("removing the config file", description=config.path): node.command(f"rm -rf {config.path}", exitcode=0) From 31e0e5cec7f437e19e10db031910f027c64291b3 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 7 Jul 2021 16:22:15 +0300 Subject: [PATCH 850/931] pass buffers explicitly --- .../Serializations/SerializationMap.cpp | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index ff8bc518dc0..a76784695a9 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -103,9 +103,9 @@ void SerializationMap::serializeTextImpl( if (i != offset) writeChar(',', ostr); - key_writer(key, nested_tuple.getColumn(0), i); + key_writer(ostr, key, nested_tuple.getColumn(0), i); writeChar(':', ostr); - value_writer(value, nested_tuple.getColumn(1), i); + value_writer(ostr, value, nested_tuple.getColumn(1), i); } writeChar('}', ostr); } @@ -145,13 +145,13 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr, if (*istr.position() == '}') break; - reader(key, key_column); + reader(istr, key, key_column); skipWhitespaceIfAny(istr); assertChar(':', istr); ++size; skipWhitespaceIfAny(istr); - reader(value, value_column); + reader(istr, value, value_column); skipWhitespaceIfAny(istr); } @@ -167,9 +167,9 @@ void SerializationMap::deserializeTextImpl(IColumn & column, ReadBuffer & istr, void SerializationMap::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - auto writer = [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) + auto writer = [&settings](WriteBuffer & buf, const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) { - subcolumn_serialization->serializeTextQuoted(subcolumn, pos, ostr, settings); + subcolumn_serialization->serializeTextQuoted(subcolumn, pos, buf, settings); }; serializeTextImpl(column, row_num, ostr, writer, writer); @@ -178,34 +178,34 @@ void SerializationMap::serializeText(const IColumn & column, size_t row_num, Wri void SerializationMap::deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { deserializeTextImpl(column, istr, - [&](const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) + [&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) { - subcolumn_serialization->deserializeTextQuoted(subcolumn, istr, settings); + subcolumn_serialization->deserializeTextQuoted(subcolumn, buf, settings); }); } void SerializationMap::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { serializeTextImpl(column, row_num, ostr, - [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) + [&settings](WriteBuffer & buf, const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) { /// We need to double-quote all keys (including integers) to produce valid JSON. WriteBufferFromOwnString str_buf; subcolumn_serialization->serializeText(subcolumn, pos, str_buf, settings); - writeJSONString(str_buf.str(), ostr, settings); + writeJSONString(str_buf.str(), buf, settings); }, - [&](const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) + [&settings](WriteBuffer & buf, const SerializationPtr & subcolumn_serialization, const IColumn & subcolumn, size_t pos) { - subcolumn_serialization->serializeTextJSON(subcolumn, pos, ostr, settings); + subcolumn_serialization->serializeTextJSON(subcolumn, pos, buf, settings); }); } void SerializationMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { deserializeTextImpl(column, istr, - [&](const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) + [&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) { - subcolumn_serialization->deserializeTextJSON(subcolumn, istr, settings); + subcolumn_serialization->deserializeTextJSON(subcolumn, buf, settings); }); } From 7f292c600643aa763b3cde8de62ee7b61ffe8853 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Wed, 7 Jul 2021 12:02:50 -0400 Subject: [PATCH 851/931] Changing output back to new fails only. --- docker/test/testflows/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index c20e742fea1..d39ec12fb82 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -73,4 +73,4 @@ RUN set -x \ VOLUME /var/lib/docker EXPOSE 2375 ENTRYPOINT ["dockerd-entrypoint.sh"] -CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] +CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n0 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] From 9b09f215c40721b79bfe0f9afbc0628c5e487bb3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 7 Jul 2021 20:44:30 +0300 Subject: [PATCH 852/931] Fix max parallel stream for joined pipeline --- src/Processors/QueryPipeline.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp index 14b60d0b14c..2b882ee93ab 100644 --- a/src/Processors/QueryPipeline.cpp +++ b/src/Processors/QueryPipeline.cpp @@ -350,6 +350,7 @@ std::unique_ptr QueryPipeline::joinPipelines( left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end()); left->pipe.holder = std::move(right->pipe.holder); left->pipe.header = left->pipe.output_ports.front()->getHeader(); + left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams); return left; } From bff1fa1c58a39b1bd83418f07521e63d7f36e65d Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 7 Jul 2021 20:51:07 +0300 Subject: [PATCH 853/931] Add tests/performance/join_max_streams.xml --- tests/performance/join_max_streams.xml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 tests/performance/join_max_streams.xml diff --git a/tests/performance/join_max_streams.xml b/tests/performance/join_max_streams.xml new file mode 100644 index 00000000000..1505e1d6e6e --- /dev/null +++ b/tests/performance/join_max_streams.xml @@ -0,0 +1,5 @@ + + SELECT * FROM (SELECT 1 AS k FROM numbers_mt(1)) t1 LEFT JOIN (SELECT 1 AS k FROM numbers_mt(10000000000) WHERE number = 1) t2 USING k + SELECT * FROM (SELECT 1 AS k FROM numbers_mt(1)) t1 LEFT JOIN (SELECT 1 AS k FROM numbers_mt(10000000000) GROUP BY k) t2 USING k + SELECT * FROM (SELECT 1 AS k FROM numbers_mt(1)) t1 LEFT JOIN (SELECT 1 AS k FROM numbers_mt(10000000000) WHERE number = 1) t2 ON t1.k = t2.k + From b9357402e0e6c77e4357a87d40336dbbeb139606 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Wed, 7 Jul 2021 20:55:03 +0300 Subject: [PATCH 854/931] Test image from the CH blog --- docs/en/interfaces/http.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 0f497f9af80..a4e3d2f71df 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -18,7 +18,7 @@ Ok. Web UI can be accessed here: `http://localhost:8123/play`. -![Web UI](../images/play.png) +![Web UI](https://github.com/ClickHouse/clickhouse-blog-images/blob/master/en/2021/reading-from-external-memory/all-single-read.png) In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. From 02c6b55630ba988721172a127a5e1b6514918c18 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Wed, 7 Jul 2021 21:29:33 +0300 Subject: [PATCH 855/931] Back --- docs/en/interfaces/http.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index a4e3d2f71df..0f497f9af80 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -18,7 +18,7 @@ Ok. Web UI can be accessed here: `http://localhost:8123/play`. -![Web UI](https://github.com/ClickHouse/clickhouse-blog-images/blob/master/en/2021/reading-from-external-memory/all-single-read.png) +![Web UI](../images/play.png) In health-check scripts use `GET /ping` request. This handler always returns “Ok.” (with a line feed at the end). Available from version 18.12.13. From c6f8f2844c025aee5253a896a1868339661f606c Mon Sep 17 00:00:00 2001 From: ImgBotApp Date: Wed, 7 Jul 2021 19:12:16 +0000 Subject: [PATCH 856/931] [ImgBot] Optimize images *Total -- 79.00kb -> 57.71kb (26.95%) /docs/ru/images/play.png -- 36.44kb -> 25.98kb (28.71%) /docs/en/images/play.png -- 36.44kb -> 25.98kb (28.71%) /website/images/index/hackernews.svg -- 1.08kb -> 1.00kb (7.78%) /website/images/yandex.png -- 4.35kb -> 4.08kb (6.28%) /website/images/logo-180x180.png -- 0.69kb -> 0.68kb (0.57%) Signed-off-by: ImgBotApp --- docs/en/images/play.png | Bin 37317 -> 26602 bytes docs/ru/images/play.png | Bin 37317 -> 26602 bytes website/images/index/hackernews.svg | 8 +------- website/images/logo-180x180.png | Bin 702 -> 698 bytes website/images/yandex.png | Bin 4457 -> 4177 bytes 5 files changed, 1 insertion(+), 7 deletions(-) diff --git a/docs/en/images/play.png b/docs/en/images/play.png index 583bd8a7ad9cccacefe859b0144911438b771946..b75aebe40895e1e6d92aa2189e2c910df3eaff5f 100644 GIT binary patch literal 26602 zcmb@t2UJr__czSF3V2aLrAfC!=%Vy0N>n-tC3F;N(t9sn5x4{aA)z-3ASLwP0wN+( z0@4X3)X+l-z2-gWectu_*ZSV|e(PJ`C+D2u*)@`@6;zxDbBRGostmz#p(?RyG} zBjC~7B?=0U2NV=5FDWRb6DcT|om1-}vcL-}bJgcc6y)=NpD?)y;K^UkFAUr$C}_W( z|NIfhPs;{8q;^+U2U8QMsp+rKg?)+mKtW-JQ&v*c@g5_dI;a`>pcapdVTBUM-(ZXO zZCQTLw)$)|{MQ4n`$iXkm4ZEg)nmhFjLl0ay6ch2njZo2n3`4h=o9u0Ga(8dY8d&A zhwu|wmR{=8nA4_ID#O3&u#V|S^GYbwOJs+$LBG35Pe1PW?4%T*Y3Ty<8SJ-}CnRu* zyq6`NF6Bbu)Z?R65`l#MRZ6v`;}X zz57r~>X49NR$N#J0*AEFF%l?skRc%=gs$7-^Ziw49VwzP)-TN(+*%4K~o%zsbVl(;0{YxXQ-D@~UlwGat0Ny4p-W z-NpUu810$m>6a_`;bH2{8G1hU2DWqa`%3qe}G{O!2)!GjD zPoAAv={Ve3JCYE70}gMWYpD8$eyQF0+I&OIA zyi`rXzZ#an`o675Ja7EfSjk9;S;^&$qGpkL>8ibQJ%;9~p#AkpLH_oYl@)3(|IHR^ zE}bbTAX2OQ$6n5J#ip2o?tANwK9X(=Y5)x*C(Q1!k<;nPqC&It%saz-Q5-VE!^745 zr%H7vBTYFvW)LQ3W?{1?|Ap?Px%O!CJdeU*hi0Jb$-&km>mK7Ke;*f@tv7rC+rF0s zo!UjTk=7eFnvT~Sm?8)x&1bs+J4+-e-onzl2);G87z{?HA>MAJm^ZKy(mf|w0e2;iRb`ya^9UuV5~U&{B7_pqRaI4;%@xouxIvp; zB0k@kB=lhRjh)AlS;qD^`|r8!-`{PK%t~3=*SS(v8c7N^wl22r0tX+0q7DYN$j%RT z*MgFV4!35u4hJ>3NPRQ;OSBu`56kfQvbIIiTlFgn+tgRQuj3~_r;xHoX` z5ueJIjh3LG=9DQF6&1ceyzoAYWYXyw z;>D3!YOlafe}>Vih3QyPh-v3i%{#u0FKXwjWb^oRXdeMckAvVo>G<^W<;&w3w~>(% zudtVvkx*&o98?$s=@O~3O?VOe#B18@5U>` zxzn`WBhy)xD($h-u5eZ+El5urmJ+l>_BiVaa+MF*nH^)xLT7ZV(xV%yO#-{%=VIib zgIpacfteC^MNgFsR;w!1w;P6_@d*iWhlr`Oy_&X+Q-W|%es_+nI+%BfXNB5qBfh){ zjyjQHr)54l95(>tMn?~TrGz5#Fh#VPmoZc#1!4Xe8b&Zwh})o8^4_~v3@sXGQ9YV= zVJ;&7omUIbQIQYE{1GQRaih|(o4aDDPfnC$YS4e;O--V@RRqTErF~N=+@1Pz+qu zm-S1j=i!?;Mn}*GPeyf5NRvt(msr4pF;tUX^l z7w@}TbiQgdgJx95eKs)rDtgFVgR!hDzC(dfS;4GIq{4SZ$rr`265~1&uzD3) zH)%DacgMu$vc3&&ZXURM2g=KeO*n`c9Bnh-wq#Xl+kGN~2Bq{ZZ!K?{i%KhqO+QGI zZZ|u4muAbS;_ckkFqhA-Vp~5ITgQRcwl(1On(`WJ2>YJfIy3!EE4R&Jxz`k5Q3Q&3 zs)q=0_H_Y@ysVu3d}U(yBz-uk0+bTb>WM8v5SH|FrH<{5uF~45sF)XwnD;O+7|_xt zsz+sha#h!UeM(z#6i2Xv@R`Em7~A?~kM+5K}}BN#2j(FWd?;=ua@vV6-MMJ)F#=3i*&F0a9ID;B~EKN~B)_#lHF&OE%q2*x~E{n^-fPBG4$ z^OBY0`pLm#&z>%W%Bj~bKO8%aBV8{G?5)Y_?*G*B*N)d7z*rFr)%*(wEVvAHHdFzv z_s~ejFU=F-b4$P}lvhm(GMv4vb6c-pCM@Mi51j8dNkyK7vr=j5 z!SK}bk+@_oBC(QxTfwTq&Bol;{?;Sv9G{PKuhY1@ru|*63m+!Vf2O1^v^svN@3tr% z7h7H@x%R-|I2R@c9@)It;`EEf(o` zZT2!yk%@?#y=}Vu`aGY3z+zR$WSHu^Bbmd6|d>taH|=u_ZcC>`qV&g^K3W)%M3=vfG1UlP zyL2FnTip_cKfw%f(S$t7e4!Sr-SUT>ry3%8T`WZ=F6Mk)=nICZmVFH-@T;6l1&4tC zPI(<`8f)nyzEN+gx3{TRLBT={N6r0pdwaVj{Pww~>-msGI+qA}+2)&(LZR))MDDJ~ z58{e$Z71K!PGix(N;@jpQ%^{oJf6beSN*O z!rB9w_AE3yU)>-+Zs=*$-&+7le{bnp{msqK?@pO2q?0qa%9Gbl=UO&T{JqUfA64wU z@j#8+>#dr9R=4kTH3$AK-;2@+tDq|ZmP%Lta-e#SGN3dBFSKCa!ON52)M3)t_^$@Y zq3O4iYd1Rurwy^y(K^mw3?*fdG<0L?!|Q>$-~g+HLQuF%Zb!D7MH|4I`1QVS_D=)m z<+Py^{H7x-N;Mr5a!l!FsdSoGqyfX@^-F(gSp_ck)vDx&3Pzt%?=J@`LjNK_qI;vI zv4@ujEDWNey_hF9yB`fs;HK;(jH)hIis$-ailp1)M zM;J4!zBuHceMB(oTvxY@sTk8V78>zn_SJPeDeg3@NZ{|Z&Ir)+dDU|{1Kbw~YgzXv zb}Pp*pS`Q;+Ry{@dS#cG6oOzSrju>(a{j?!FGf*&V(X{93=W-7BcD+IJAKbP$7N@n zYp0#^NYy`|a|P_|Fy_pgEjs8TwNfBIC44F_7sRG>r)k+;?QrcM$fLLA$Rsh-mk+~3 z^@7?$OqIQ|K!XkDu3MC$g;np0`|fs#SYz_+oZAY_mhN|jFaHsSv)|xkU-Qyue>yXC z<8mxuW?+{R2h53Bwk?aPTYxoNNeO28WHr(?FU&r6g|0xGmcZmoVA2qz9d`>RyAWLq zhNeHY_DiKEQSibb=lb-7bgwIAJJ?6UM!Lbaz&R6QTTtU|h*ALYyIo9XD>hh)Bj0sc zpb0A%x2srN;c>HSF|1@5s=V$zD*8M@9+;Ww znT=Bp4`1(2#B9!V_hcuk?Va@6@O3WO`Uf^n701t=gqW@v8;7iR`Z^qA0X7Wj@=ESv24kLXtF^KIp!*bia#wvQcnDNmJrd?sCI9>-m^=%NK=170s` zWxZ-%$-ImA+mMSeK)%~xy?IlsIPaz9NJIf>d|b%PCo`cRDt|B?%v}3(b-$U6>DTo1 z${eZieqy(%Mk5~^NVH%5)Y@5njd1neu6AdC2?x7-t#>BO1&mvGQj*=Ec#YM#j{Bp# z*d_B|quA3TDn;H~K6mid-=(umc~*9)FrXzPe@1@znv>W+Rl^@iB`244m>-^(ba}PBeFEmuV31m``1pr5*a_{p{Y(uC^OMP$Xs?8GcTL-cs+)wwI$ zjAgYG()1tX+m(`d{-!GAuBdZlMQNk&pB5Dam2FP-%uC`c?&3)WEsnoj1} z{-0=%WPwxqh?N2GWOz9Xhg-n_IJroew!)JGhClwpssd;H^C2pWw&JxwaW~YE>RtX& zzG;NVtwQ&{d$$Jv{;DGQ4M5wNJ4AFbUkdp%ooQ~E@&3)LTmB(WRv$9$^GKUS@=UO z)cHn-UFgZoKeUnMyh=+L_G)Wqw>sWLK~<>AoknuMW}RjwZ`lBI-N1Ga2C*=}t95H$ z*&oB=PJe`NX&~yn?i9!9t9Oh`E;(2|$utJLSdF;xCZxS^V1hANzV_U=aTZ~Xex6HJ zI0lb3>d>(S<5Jga0MUh%cU!rwXAHMEhrr8O*(+0s7bxkOLIAsvGW@TctnANzk4)}s zLq4nMBD+1a^L3>FHDHvgTgwdAP~T5>W9*xdlbn8#!VwG}@XL|#{Za0!Uy|u8EBR8) zfZEk)Joti@_M$W3TRqDSL(*b+6AD0wnhE_Qx>}IX!n#&b5PI}0qa&)E5+AE@A&D1s z;r`w%zZ?G~J;ZX+-_ayxA=7}pIz@(>nth==Eh$AGvX8RoKKD%A!t-t=T%qU**06U8FA0|QUstI3;I5nzX@ZkMj^l*5wA1^%JTH92s=Hur zKWB2^@VdFV$;dBl4(!ug>S^E}=(zd~0K3=T4l$?pBZ$W^6JP4*Of1EdHePl_ zJK5Vaq%she>7?1Un0%GTLQJ38y@*GwQp6w=o~AhGMuAbHp|)jpw} z{(tINl``=x2us{}HvLL!S?orVraO9d!P3Csvvz5JK^_XEO5)!|sNsG@P3z_QZb!Y- zDg9~ZGFqH=r=t*-v@Wq2T92k*DV3CYKubkkG-hOa5DaKuSL$v)k%jIxSgcXasS zt1z5Yed3FIvn`4Zb$+tRFF?C5c>e&~1Tb$J z!S6WnC5S4l5889_7J$(7lU9~0cN$vmGYZ)-F!FV-xHq4N7&I-dwOwBa5>1iVgW7>q zqnh1ReidV05ITpZ(-!wGHi^zH*=^s{`-}Zbk8Bq`y+(!A{X~+ape;Y(nv-n3f4mZx z2~ASPsRDNNySX(xvo$2xY&`i1uPL~?vNBtgJ!ezLG`O^#Uq!m%>M0)MC;SR9^)NHe z`f~SQws<0)B~N-}HgxK=h?FoSpXOxkTzvVQw#2%MLZ6nJ7`VrgQ?Rofie0lHi!ryOuhwK?V zBG`B=!q=bh58| z_}lYIDl|)SrE_v=(Jm!RU4eR?xflWgHg46$Pvv#qjZ^7tuJ^yEaaXuWrjF`_)_2nG z*@(&(PTtQ*23C2IUc~22d!Z<40Uv&g8^C-aFhTZ!SqRvL>pJ!CQ!i<%g)nFJEtz{4 zjwBbo?km%%=opieX8dJFYO3+_bm#eICJ40i)C+Rk?U%a#MSmfgfe^1jpCEk3wSm!Gr)Wa5;7lQ|84y>#qPL}-Fd9=m1U$o>b)kI0Wf0cx%4Q-%L&) z(TdVy=ju`rfdjQuI+;q=lERtVsl&L-?fRhFlPfnGPPTyv$#D_4>i)%X=-v$v7nqaN zM#k9=PmadPvS!c^=EtQ5+J}orP;si9)ck;g2tD; zax!eVN@rzO)4cGI9trY}rVT*~rUgMR;y6n{ofBpC>RNIWGw~5F{APvyP+vd4Ayj#7 z?dbTp5h0%t$8rURfaG>{hv`T!l~&~se*!`ygKR02A_nn)7B5H#yAHmBSU(WA_-r7` ztY3`+F^}rI|C~u|sM8asS^4|Jqiq4`mn6BzWo0-HRZ40iQ&o5#b(f$d!B1Sk9jT3SZo!4L)*IeB@e<5K7mf|rdRqS44LBT}RoWX9816aC zuN83GUR%gt8q}&;IHDR_{zn~aT^O~#Ws?s_v7=!=x74g6hVc2tHDhXObon)7E6%iP zmNX{Lr31E*&Q>dT^IL9V(p0%HLL-p}h_4Zoj*))Wf%cE3NNx)7h2Klj#2OR%)FOEd*{NHL-sT=(4Q8+P#kU*+CXi~&x6iKY-_MqHEsIMxKep9X z1>$)0XY$z|F;>mKaos$EM30%-ciqRvC47w2eucZq2sYrY2(D;mW;aKd^NpD6-rPx- zba`^zbP`U_14PZ!95_S{DV-8Da|Z&On@NVk4ywFf^Tn>Y@%zW8K#a0sRCBPSrsLrFThf9g=z#}hMVuM6WB0He~6Pl=+px4*wXI%3v#f{6ROK}>x zb6W=h(iTt8I}Y4DfA(y4%lpWK`O-x`*%`onz&yV77>xTH01rn^pLQsSeCy#?;p2?_ zp4C$3Xx9Ng_Vz(5-(ih>zL-nJ5#J+He}9t|073xAf(~%#X;gap`=Kz{2!Q8k&0w2G z8c@@O0uTVi9__9IFsc{73W#A0_smBNk_QqF{`S5iFLSLCmHqAl6QaYO$e#F*qw8Xm z3hH$l5F|F=;9RhQQsjYtA*8V#^P8K_cVb410%Kp?3k?fHbljtRz!(AtzET`G#LOGrVkzv zuPrfIS$3L`YO%Cfja&dZ-Q0Yz+OTJM9tVEGjp&q;zOOthED2z}EXuJ+L;w#$SscK6 zP1Vl33Pf-L9s-zhXZTEw08giUJb=DbC^IFY*_GPF*In1vogUFY~Bn0-bUOLjWXFP(Z-AIq(!f3e5bs z+J62j%x3;Bgw8mid;9|epK|Q&?LlDhGT%AY{;9E+UwCJSp0BeD3VF*1^Us0uR-u8E ziIGs!jou?6Ey$)yHG?jYfzfzGV>-pM`4;|eB)b@>0As$uCOZQEI>QT~{uGU}?`8Vw z#ey{Ch8Om}*NnX%;ky3h((U(+UnUI4C$`8pIE_i>+FC+11!2{3m+l`chgiAVIUuf` z*SYqjKrN&gTAp)>@-*8*inliE=ZXaI^!!!1W#>>_EciYQ0zX!Ek?J$WuC9LFdz|)c7`#(noBo&*P-JKhLUm?rs7birFrmqTk~(Ajvr#!?C)utacfIdkU`h6%*-djnV0E=e0rjtOv40p zbpSqOK2hKW@i20dQo2@u^`Rkd_EqT7M)&g@Y!dWrk8nX2&>hRRgfBlss1sys+G{@5 zGcd=w3Ig3%*{4^R;@$t!D^T={F>8)A&sZtRKTbACM?3vv-K3)r3&B2SAIWr~LbA2< zKJrbJIl$nM_PhX3nN;==z=VHMeQEIe&3Rc~EonYGS}pk%-6O96vU9Y8rl#X`1|Y;) zM&0t9c|N%3*}a$-KY+Or9zzti8%zZr6M@xpywsP4{Z*Wr4!M~kMBHau+R&hw7YyC! zA4!LxiZ^ESfv-%zLJFnd>pm-+^50gMPP1a@3XXu)qw)m7X)mtBbG58s_tF8em?p4% zWr{W*^1=-)yNj|~E`0^6RiO%Bw9)fE*f#!m+fGrf0?SK$O2; zsys;gO{yAV-{FP3uYA;HcJ>wE9dn5dxPyI;Kj;n^l>Q_W$X$4w;Lm_=>goZ-123P0 z`!urWVMs=0DY9~(wjxOsUT?M0wL`CbrWah?!ok0l|FKFBw)hW-JZIN;87^(TmW02h znU!Z8GrAOCDUpi>6-ZD6%EzC5AxZm;IlGi7{6FnF*I3ix@dnbfW5(%u|TZz94e2@Ph$U|FpYrUa2X7*uNQX$efI8z zsCAUJJmR`(Z63O{ z#KzvFCBQ_IV%vREeRR%d8yokdCuREgrr+c1Q@phnfeVhJOII1&#tX*@WvW}F z(=1Ujd-&jpkzy2}1sLEGC}aspNYe0-W%HG?P${53>41uvBMMC8-VaI^ve)5BTe7Ne%n@_ zW4l_+YVp#Fk#U_-!j4P|vcn8wHg2}BA>^W4==+dUD?gSO@ykow^iV}pu zBeZ^D!eLW9+{erAm6uvQo06hEN$0?A)nV9j{KYo|9(ZUyci6}zs<#Am9i|=N=E!UE z2@A5~j7{wATj)Mr6R|eg9asbe$@!9zYsr&3NFf6tH_O1(vayX#=sAF6o~*|M_#5nC zmd)l7O~?m_q?Q-m)((Y35%07fI77a_kl$?^fbcLyDXDz8+8G_=?Tq$z-m2GQsLHw2 zrVWxv7*W+@0I)mPqt~$v+toL>JPYtV5mY@B z&$6*`r8JL4Uhy&ffbz|3V#2(%+4_FZK;vk9MJb>HR)9iXl71S1=0PH3|F8}g+7FIv zzUm5aXSkYgH8(cU4%5_=iD!d;gYd8#p0;^;*TG|V#M#_;^@5=B1dAV&7C*&mX~;}R zm-$s%+I*AU&7W>9A^;q-sAqJ=I#Q5gx-QB*?)DmxuLGoG!9#^^+ZE+Gxm%jOx{8t> z)H02+2an|B4bn&KoWnQb=R^`~+7i*XWePxy-(Seq8Y$Fz z13@V)VRtqh>7c`a_1p&3!SSj<-FBeVb`Yg!tFJ3JHNEq{`IQI6iojlu>?p&>`Ox zl>u!f>o;jYJh_VgsW{lkSP+5(5YpEiAWx_7qnz_w{qb4BrVIRf$i&=^TTFY`oDdq< zE-J!cE3-h#olDkecZ$!7oiX~h5fDtFf=tEwy%yCwNM3QcaT&j){hVSfp?mGct6avM ziJRfn3GgRioF2HZkcaPJcN_xlqg_W7huYHJWO5md!-ZUTlUTWeZR&4cX#+xadYi@J z4Y##9XwC`0Ss3gL`QG_jvIH31ndt z3fbhgb3_%Uz}?~caQ=33o(7}*9nQwb1hhP3=F=|u}!~&a4 z7if42HPW=%p1Sg+1d6UB@hLCE?}Qi&ULCvsIADPY+Tqaf?I_7a;Jl#D%dnjP#*4saqbbj06SoSJARW4FU+ zd$Wr%K`*CbwEG@TdC!fuB)6LMHAb`l#J}gsH{&SEKg1s*Ndp|-WW)T3q|@A|V#iTu zvxcH^rwT-{Md0kI*i_eXxOFr~q0~vl;TrjDff#@j1_iN|D{P!pZJV^?<$awqO36#S zyLZyNy$Es?M#(@cK4xYl+UC9Wv*f$EG#lgJnA>SL#=Q?=Lu| zC+nJCEx`nR;+9!@k=Dyb$@n3r^TSoX4}eOj@ps-Tz@5TTXI6SAyV`u6PSr5T`%|Z& z7`{pUbXlKRmG0i29v&6O$k@9xhsCYOv`s-pYa2c;VC7V4!-@^8F-NTnz|oeV=Ps3#Z=) znVmM6ACglJfH|%d-qyU)*ErH0%XptWi^`1}ytHzRTAV7(_t}<6PEh(AkZi>`jHMrA z&QgA9o6JR^-3;f=Q{e^r@THTrNCS2H+O4xcX12u?yu(Rc7 zD;5#GU+z8c#INb_ZM8KV+eNu! za|cs6fke5z5q4=$!5GA$+AJ`0%(VU+%x2xsvijH-qdD+3Mamo$6zRL5z3*kgUw<{1 zud@^J`Z?WNmmZQQUl=d1)}1wYr6t%0$g`YlcWb8`VV4!p zk;jO?lXK!6gWbLK1lx2WY^u32GjL&RqRxwM114WV{yDyQhVmEn2E=dXwH(=dXSyjl zIr%gUFDsIBm=0tUYhr>PU!y&J{a`;PvTK8L)T?q)I66X|Lq-R&UsbTRPsdOs!ANV> z?Ebo7k=RPTH*`ttc#q3>`+#6c#L9c_{5)e-%)@zY{%FVz#u-U?9TA@%+wBn*oHv)s zVwx}dMXi?rZ%@0TxuP5EN)D2vQV&%3!sO%=FiSBt`$PU4?t6cBO@-FQ04!TC$7UvascX#ridh1fDFl1c<%U(ykzXiJ8+L0`}Yz`7lJwV6GO3fTC079a+c zV_Ht18lL03f*JCL{;m7XeurCgg!I|oyc++z-bbfd)cYfs-jzC$GdDQclKievd~rMy zSLJACy7H*JpEA1$Cx_+Ht=}vCR+E_GQ zh+|%nu!Ea|xtuO)`fmW-3^=A^Kf)_Sku6ua=r+(euLG`JP9R6y`;b(2mwJyVX3C1;4y5`?R5=+B@~<2R1D z<;5VAyPVNzMzGwHvtvVJ{m)e^^Ve2Ba-7i##<%oOkN^+7J)uAct6pAeC@;Seu7v6X z?VnGPMw7a&I(+i)%E5JM5*(@#1KIl=R3CZ@QE zSgD2+xcj3rmq%qHn99A}gI0w$E-KD@7wBTO`w00zM5&_66NYXes>aNBm8l5I@WskU?G9BN(q$qWSLoIB+=ARdn~M9 z38@$BwepN8paL)rP<&e$!_Mo?XdsSa@J0T7NF)Wyp^LM0|k=b7yk6q`@@RPgz1asPQQT!5PA z=tF-(bqf?Z-;%^LuKNps&Rb7T$9p!OLU zQG&q|BjDBm4j~9G{nP}}nlY%ykUKJFGZ_9Zuf)MMWKjGTrLo{HRT>D6YN{^7vS~ME z83>E}((pY6+?3%4bH<=cywf(KD_}@&i9=<*p%^2Wh8K4465tB`TnT@(68=o*=YLgvUKN0?ihDFZmItfknyxhoZo$a|a`9h*?=hq`69if$gFC-Q_p^5miO_f9`d4ysdx;4;wx$ z7idrQND6xMY-Rmf-_O~SgGUEOS5aj+HiV_ra?S867%-rp3>!kGmf$bS1wvAYI4~;T3IRj z8-Tl@-6Mu*r%}}Bb0I2G2ScnOo>0i0DCxe@g0x3JNL5QA69svRXw$)(10Wp`QA`CH zL8b`)Rmt+GE7u?EYl48q6W&(g0f#}^iU$%K7x?}$TCt*Bl_b7=5liMq#4d2(G!%1a zA1UO={8ZgmkIhwPNNKbMUl-Mu23*lS>sU+Pq~p&YtHejdfB6#gyh1mXy@K;bDoYY8 zglax?j~5Pz)!va+1bPU>luf}gukrB!jK5;XmOP%C{upWm5v6?WD)G;eRR&b~iVxy` zY=fc>lQ%fk!#wTo9qO?URl6Rc+|l=ZShwxK;qo(YZh*1H`yYoA?F2+Js`7jCMpgm$ zLWAL=!+j9o&jeb#VfsG#BQzMkaYq|MNRE*;%H57NF?Z6dzmHAqU#iP^uxy7OxC}^d zQ~uD*UVFB&7(Gos>^>#?RVv6J%PN~up%7PBBf!tc;QVsb4jpY?9ven;Ymn(7C&5=WKEG950}(!dUZXt7z#fFoOg3N$!q=ApljOt%zQ#606aW-aXLRBf=^qs}0%z)U@Sw ztq%bN2Z)sF%wzZHBtw7I1j|cel5V*>zb+<)5(Y% zKx`tE$?TUSU6kLG2#cHt1n+@p!_zVg0VbOF4^MB+=FxjZrgtwKf0oYDDD?X7$_R#y ztVDE_vjb5Ha6p76be-xvRSj5r;0eLVDyo)G{?Fns6j7N3Ec`SBR)$_~S2c6Ti2b#z z>gylxbwPg&K+)RGyt8=&L)BKfL4QV3P3pA+C%AyXV=laW_d2GN(_M18LVTFj9R*@w zm#pyz@=kcCZHs>;J1^U%u-8rQU4L%k*!lu+*e@Arpg_8)+kQ5$b4MU08U^1Pi*|j} zG^`&C*VI0}u8B}&7`kBF;reX=n#&AiM|&KNEv%^&WKWH~{24Bd$t><*`RC9u5(UJ; zz{ox5_=>0a5Fa&%V>gGn)U0FiP=qw5W%TDlWQg%qUmzT_%d23&u-Wnsv=Vu>$PwF; z@N_-iH_J797Mjr)(HRrHnqu;YXV$s2z+-{Lf@eq|59fSnFZcO76T+|l()Sd_y516|L>_d4^W(s`2T^y z6#uu`_#Z>~@5(w1|7Q~a*CC(x=09`t-<1Ib?w95ApX+~<_)q=wZ~rp||3`uP@5=vk z!v8~}1PcC(Tl+Vm-U`%31n)0mZ(-Re|D@61^XM(lNa*_ZeVCE3K$8B&y)PBTfY|?! zq5me;2;>F!3JL-zZqEDH=uJN91yDV%`A)!$6Z{1oQcUA^>2pxPkrpGnlqcS&*MB#- zz03m~zZ4%21FrY^RsQ#LHzE%ZZ{p)qsm}RLK{hL}yt1-ThoLG1`3q?Hx9_Noi3YD; z`!jOD>27Q_Dvu5FkTJ*m&c8HSb`c>Vlm_wi;YwQ!KBIgpPc zvEBWw|AJnCImL+lk6ApnYeIQC-<#iF2ZvVY%ri4Oz<6PXzc8>%xBVWpsVfRzA9IBk zXUy}dgv3LQb*y07v;2XM;G79v`rpu->B~Vr=(Q>(n=a*$bMi(#ITvBs#+}Be5i0BL z+`o~wMjZ%`EU8PmE|9c1_n(ZnYSg^BrH@gp;5l@>z+BKCAX zYDcN|u3TR=!!CL#oWTJC9EDQFKdr2Z{|)Ss{><`(yArZL0eu6`354%ORDyG+rA-$M z^=6uG{Y&$AF}FP^h~rcb!Ha zRyvHHS5i?loh~o7h12v>{X0k<#{3C2n$8ZibOf|KRYxH~={J*n(s_&T6FNIMmOG3M z19$gOIv}1Q&3nI`lxt5+U#N)F=cIys+MDaxV1f{3-0|p^m`83_TYTkAXGek6{qZgE3_^zujckZf@nljk4W^TD zR5g-@o>z7SX#EFXJ?~A?wo_BObkoExY}F))EydPg`GhO}N&lX(3Twl({h?y37cC2U zsAX(}{f&RuEN5evoZ63dU1Xymh`%3g2+_+1S}WYW12nhioZZ&3P!hD?yDv<3TI~r+ z;yjT^!IO7m_Q_RJ2PN7)0Ucc#X9O=nRRsNz>6nRG*L94Rn&?@(=93CtlOI|98DkFM zvB`?eO4)>Xp?x$tQ0fMY$7p@eB>HGIfw^(7MznRYwghI=*iYMyzu-6wReN)valbEA(4dcAKtRT^ zFg_8p);zybKE9SKyU&SH0i=WGexe!%RRKzR?XN2m^EKlQFc=FuOS#6IM?1m^57vH5(pe~WFf}60-8=3?~4X}o*eer z>&Y^XC>@uUZ#FwxokQ?ARMRJLS}2r@_P?&!RkO1D9_(gPy>|xS>c`mgQD)So^2$bk z7j_?GU-ZW%A+$CpTvFzCik|y5q9&qB<^%i>pG8l4o}5e&VQ7?1t`-m{uYS~3hOl2G z4Z%@qFEUZ*v*0IJX2vZ|wmn5WJ)eX_I`UqhHF=>gj{FIU;DMcuV0&qUj>9%OgHF7r z{7JCYH1cx+y=9b@aP>)5X;t#w9)&e?u#Y!+_D3Cp%ZsySe2kdJGm(`b<`kwGh__t5 zxl0)kAeTlIH}Y%GA2>U%n^``Jb(-%Er)|Jm*?GCAFUfg{z#a{soXw<1bNs5NJhC|R zwmMy|aYJruv2=W#!=z^$foNIb@4P#e>uHp*_Z%VAK&*}q(9UU+m&93Dq_KuC$UmG@ zu&mhinIi$Kv|&-^RmJ{<{s1yZl|#J2&XD)-%hodX?Y&`ccQqKeC8mVVY)wL)%-H!25pW0^Z>g(qXlYu95j~3tU3p-}D%WwMISM2s) zR1Msl`Osyxx+QGPDHVWfb*#u}suk$X$?52r7|60#GEw9?oY>o&01Ku9^0{WK>hDfy zY#o-7Li99TOqrZHIH>gVwS3NK35NCrm7faT?xbyI5ATQ-0FHpQ&A){?I(%$fl#W29 z3WH^0pryn7+K{IDzBx%Bp^e)Ke-1PaobqtcHVAHaZy&B(;>nlrbt1DFDLCbrfa$~0 z^~QSSX_^7cK3#)veH5^M8z#40%%&C}A-Wp2+a;WLkysJgChP`{M!JwGr4fOrL9KIiIa)%|jtT5Af!=hsEtAyr z?dR*qPFU{=6JpdveSSx8gT46iR&-dve)jk(^ri2?`^mumW@L(+^G;fWhy+XBMl)lR z_nT2ao^3vVf`0-wuzX`>&D-MD$@d%tZOYsLZmrg>W8AJcaBhMS(-ale89#Yuv%aT2^A= zB!+S-=Mim6mi0rK^uSu@z1M&O+o|g)RVaTTAJFP1e;jF~$Ivi$(EgNpLx3^?_NU96 z2>ybD$6=0Fy++qKGBix<+@`p8_5&se&N?QLc&D9F2adaz+~u4-wf5DX&tbibGx-OG z59)6i2Z?$t>ogQqEN&-l$y}a8WbtrR@0m73yRA+F#M^(y?HY&K*Lc-GLRjthdg7$z zgFdG9t+<;cb3{*MG(K8PX>|7_wQLuFk_>3m*5Xt*YvdeMaXkVm9*p;Qk9(z~8>;TR zws|$7C#^P?xHythF*i~%Yx`;IMEjbIx)UvcW7-KCW9+SG96HFnsW%GC5Im!Py3F|N z8k~%#>844eh@YkJ!feh=x==%rwd+C>4Eelfge$HDWRiP&h<@oLcS+AJ$19Yh8jD6^NL1VPS2_f6n{aucXb2!;pd!pX#_94Oc$t9eTvXF*&iTmmPa@SNG`CyW#`8aB) zgKWZj=+!CEITsTYw{n;v9<)XfscUr`W z%GL={jtoM`$bQH~I2dcTk-{YVZmdO8<4_o6XOP{s+I5ASNk zjaT$aK$(!{)$?&!7mT^D@T+Tzina?Eskpcu`*Qf@1h+_40|dP3G;Ot&u5GxD+AnQt z4d)d=?CP~;9U}VJ+rz6cz`j33-C8Ial@XblP-Q$B$_Z6XUBOfVv$!DVs|qGXBHDjZ zJ_EVE7~gwwP1b>Ny-+nCG2&tS7|DPKYzpNO8z0z0)T%Dha(onvKX0u7bL?q&Z%m{4 zim-kj87&j@^)c&aww2O8I~tqIB*la?&-sT^^RS;cw9AnJw+%&VxuDwu+F#zW4DR$X zG$PI5V8Ja~XB&nEF6MU@>0?&Wl`VS({JO^@E0%AaXuL+p6}~UrsUdmg8D$==D`0Ab zd6NaQ5rGR2Si#eO>1*=IK#}$ASnilhMsoC;+gm=`2d>N8VJpM4))RSM^;yY@Av%Z* z?oCx02Wqr`oJ}(?Dv|j(SbY?vAJb1y$|})8KH4t6QG3sNB1K^cv8arTE5~MXdn;6w zdnHRt{Xtb7y7}-`dDt$DS=8$e7nBtKtvn15HC+zp`hEDC;&O>}8M8_ZF6ZlSc!g1< z=o?X=sm-bc^wmv8#R10!87?!SZAm7i88or40n7D`t1I^0B7F@=*E)L*YnhoOXlT@H zFLLoRJk8G@@#RPMx;n48`%+O^6b4ts_iab{H>eFDmuS3>Zjk)x@(!*=D7oIgbjtYL zjA6s|cGu7Gn*|a;@XM)Fe^1m@-j(MnKOfMzTWM0u+P22yVFD8ouG0j8QPJmt4OA6; z?KoI|6D4|oM5lU@@YOvYw&KEFpLAErUrWZVt$h9)Ffu{gG z`WK4SnOX9r1V61mIS}%h=_2Rju3C$J27!jW@=wXJi9(zffrdEE()EJ)fCsqkTHgzz z1Au5mvimsfY&{~OSFAo&$(f_Bpxw#aHDp2cMa5@c4x;MLTQsj`W>%`1m+5#<)?e;R z_Hm0FAtx6X${U$k3!3aa+U zN-snB%H^*FBmEyw5%oO}ORF6psfybfP+*bQa8#~Jj~d781~s_^1k2xjwOn#TDMDHa z(`;4=*}w!iF5u6brMk&-W;@9zgqe|xX_l~NCD+_-RzpupM8GuD9;&LkAKQ}BiGzTk z_1nr;Clc<;nsr4FE*EZUfRT~bEmOYUT@h@I0dytbkdLjP`q!Tb(^}bE`AcsaymB+r zB-s>f?526mp)IdP>&2NZ&WLCbdn$IlTXIXKmb$LvI#h-n*^MwH4tZ)(X(R>0-IIO&6H)DUy!;(4UF{JHyHVu+!6Wl z)t$YLx3k~{CO@~$ect^ab8qb?V@FB9rQYny8 zz~WT*4{Qu$WnA1&^hd4Q4j0QMPn*cr>UOYX5Qa84^T*1k(ZEbcI62ksWMvPFOd1uN zD>yw$X)vEKF9ZK9K_=2(pk`{jZn>)COG0eP&p-EagM4}GFPH?9jWKnhRV%CperZF< zZ3VMk#QtUT z#G0(%9rMlC*FfNTpN+5k+JbvYx|ZjLgmuP%jswgEN;s8aZ%{9Gb;!Y&&3XY{;*&k; zDC;5+E1P5FoCBV$42F5`d~818_C?$y0(9(q^E{PedMTPW89F*2)^2LR(o!wNR31o2 zb8!d$nE3INam^)ptFGNf(H|vUsoygmS=q5m^(k`IG?yQk@Mvo&5V*ox*`BZ1G&5^n zQK#rEG=b=-rE6lq3l=>V_Q)npFt{MhiG@Sy#Hw}fI|+MT9O>2WbKF}nV5p%#WQ~3+ zty5-JZcX`{n)Z={cVEGc1^0AcV6AO0TzfC;rn8!K&CtW;eF@_ZrBEZk{n_#v3+L`HZ6Cn@%Xo8ioO8fIUCEwSP|wrg zi7VY{77=HmUvvKx(NF~ER3dGjd(n7h&P+T1wcF$w8oRQ2}yS{x!S?*|vo2BL-3 zNqC4YN`x$H>6S!MB&HAzg*V4MRrlv3f*wTgzeQEYmmw9yba`el9?C$=-2q_;Dsbww ze_(sTE0B$}kb60Capn`*Q6Cdz92tbZYVJA8MQkZ2prh9R4BJWuaY)Vmz=7^6nP6h7 zR3N#bJ2w|1G;!G!Rc%^iALljMT@+;g<{$Nhlb8|iogP+eU7RK?w=S=|KRFf^`o2(r zq|G(o;4erIRhoOeRoi7}XCHK*d2g$3mZ?sTHWytVmiKIS8y2;F_sx59;d>t+E;meU zI1QxKh&iW|0t@cx?A7vih8bu|ViY{4xkzW1>s(+{>{%4qpIevY~2F}-D0NORIT3sb;6uH73+GByOx)5ykK4GSly3 z>KV9l;84dn2u^#k?Nr)`ZmVu9tRW{>x;T6Cmnl}mf&6YNI86bQl=oD1Z6B3t_bE{N*6gb>?o{f!hb`4`><@*MamQ6N8#n;e z9Sm6BiMh>uwHaUP!pgn5Wm~%TNJ!l(KLlY7p73lDLDO+vHI(X879?d{UAXm+wrJ?~ z{QZe@L2>6;WDWoP*Uz z449C$`m`Q&E)EKHPrX1CpLzV)XXMln$`b5WUKQ4Y2JY4_HJI&9LVfnuq$c*)D;u`G zUg0=52e*^mmo4~3_uWu&O1I?c>_9pujXRR&l-FmoF&9` zX?xwWht_`VaqD!m>+I93VkfE3G|#7Wdipk^;DgOL#g5 zyj-@XzXO_e>gzh5z4KPB%qv-;vluMVNpq*MEg!5i&;T};V22X8p?gM5wbU18g(k#EqsbKSgnD3 zZ|BuDAtGn>NrEV1;eTKW{9y62=9Vj7ry1~$lKbT*aL%>;b0mZKbILESrN1rjY}oW6 zZ@3Ow6ngDC*ymYi{S3TE@38GHSZX<`xRbVLL4tOL1^VawP+UKGyla82uT?*G=~(^&jVj87ccT1di}E?5P_J^AwsA+>%gINxA?`xeF8eEG(m z*wb8t1;u$KSh*V4374I|V(J{il*`@Qloggz(;w5&>~5RcLahU<{w9dK*z!#PgSRo$ z7UbZpT{|_rpIQr|l>EehzH4aFUHNnW?DY2!L%n7u^|G6l?a^PV%jDMAR>zZ>#IoLx zx8A$Ob`NE#c%GHiMC#et)SAsYJ;o|(0Sb$#;O=s zC@vR!ShNR*^7@kRcmW$S!N>?a6zbn1niIY^vN%KYTw%tkvWvy*tVtKue96u8-(#jI zP5C2wQjR$6XW=fYG=*yx`SvowZ9!-{bzPc5O*X_}^`uTo2sKvZK-wkbJx5-oq;~!l&~^qWf0oj*uZN1UlocXr0DbkM zRqCH$+h)@`n1>%j5p3D zx|Gb;b;ZesHlv*~wT;!XLE^g_?Ycc!%xmhjGC6MXU%NMn|G9f3dvA`fD~mL`ZZk%h zSLfvSRY=9V+P2KX1Y(KA1{mzFVnPC^t6+=qP!f9*?xb zraj|SS|xlx2PQFdOm1fX zA=o8#vNE$O3A4bjf7GoM7WbI1HHR0DE?0cCP)K`H7ePhqE~#rkqrbW;D;|D)Mp{3P0JC-y zUd_`nQirk~UH`sJWg}ut>5*3d?CItLZ=Bqi&19BT72#JPmJBi!v%Up zR;#T>m()Nc`T$3l74OIn_v^NiO`8{kX4K-SjMbP|FlD46zv2 zzDdd)vu|@-;6GT(&tA_a{mM1TzEL}Ws8Tz2%w7*98+TREI?h*FtZ6E=Od6b!_84%D z_eHFc%mP84^f2?uL)xx5_$ZlVaF`CdaG39{5F()UEw5$~$`QY0WXIk|2&w;2Ayf?d zoB;{eMl19IDRIA+mCx-ToZvOxiXzAe?JfW(dy<_C8F`7x&W8v^x$ZF^zP1PohWZJB zmxIcW&_A3B*h6@6n8_^`0vZbwoUov}k|IdDYQg^XWN5OC^qPmRhTDuT)qlBFiKa>V zs7Pj*t)E(gwXZXA0EO6#tzf4*FX+!)=UGdo8#}FgeII?`H~ku%zJBlxUBf=|&Ty0Y z8P3!!6S&%ZB^V%eyNyy43yFv?6Q`cw;WTnFE=i3z=t=80#lUj=iqg4f!J&*?ax1~> z0jtlISWYPUO-I)fviYxvswkaXRSj63U}9W2p>&RM&;4q$R`#;zZh-a=o(2@_dn=Lze%lrFO`ttgChyVN)-&@IAle;r&Z*dU5+1J)4@sZ~n zd_DR6*Q0rXOm2(YO#iJGrux!HU*@qd07RjomcXeDU5r0z)~6^Bp$po$Ga38=XNKxt zH-{(RfeR8MJ8KDk=8&i=5V=Sq6;i8hkGhf&>u8 zgX}q2R8()+nNg-NFu=*;B>XL?XOC+ph*Rpn`zQ0zeT_^_!M{cZjYEzVpB4n480q8o zsw&2gnIW>lNLcww5ewT$qu%*d!m#ftrPd`-ofEzku<{&2_n~pUSv>Ryy#-}K*4Og- zXE^@YeJ6d|CI%o6-Y)^F^C3D-b~P9jO*kETHk66`!bznYbwB#*=bf9-QL@;f^yR%d zJh%UTG2+~b@yN~_Ubg`B;R$LSgwrsOG;F5PHOi!XCg7Hk-qYFaOA1vR6IYUXUOeYE z2NRgtH-GwJvf?dQ9eyy)ecWF^fgubD{(m^5e^2J5VeN5X*nn%UkQ+r`1!(ZGzXtw0 zf%1RxKmNY1Go2=U~|f4-ctWeK@j zW8*~C-HmuF6ATDNpsKX2o|%?Y&*$5Ch=m-kPYr(}A!!LtiL6;Yg)?^O^Aio(?sjbH%r3_xn!PVPw?TJVrbhR{7XR6MY-X;}Rt z$bs1J>?4KOqhS|Z23eP(Z9?oIS_4EK{qt+tqUT;VJrJ`9dwO7Jb>Mk#V78JoU0Cl+ z19%AG>J2*{4F83`%Z61zZwfjMFICnQPzLo8<&REaMGo>5MbFrY%}}vcgB~Qomn`j~ z5RxET5x1cRbgDcTO?E~uW!iG@vE5#y&G!rCy?~2gOf-77jk7Gj@ypp~$_hp_h_sKh zryb13*>Eij$c3;Ib%UpZ-^gYfg`lI{rN}34m-1h`+x8?sH$C^A;W-X7V*UA&LHZgW zq#B@%zuDQ{m;)|o)7A6V70pB*dJl&^Y&-{8s@(&XC_mcTbMZZ1M&p&IB=aCNq*3ys z2bPB%9&VV5>wrA&Y=VT037@@#clC)Lxzq6yE@;4F98qOTUIpg=c@3MF8oG+(ES6^w zS+5J$Znz9hS~_wqdvd&>{W1hVw?gms`J6`n{A!ykF$c^o3;>5sw+~o7ME<|0{r(?t zmghkuPaG`SXaL&#lz7+|5gL2<>-IbZ-hbhP|Fc2huU&8(Tu8*r(fHb9tR!oo_07JU zO)C|D>=>xuV2F8A(vq3q12U6vz6-!Xj2U_(2WkD`rZaU>9uBQwHu09zn$xLU7(S44>wfJP~s;qBnLEPG^R1Hc^Ec) zDy5-M-sM6-3Z9pTea2ait?!v<0JW!IcM+}5g**JqI2PdDw*4~ZC;>1o+*dUHOZ(_!=fW!SC$)gOUnFq1$KQ%{D0r4wD^M)++o;vO^N>i@;kY{ zXdv~UTMH`Y(Qy&SK{w>~(HAt|rg*_3<-)PUbA}vGSkB})D6dpr1nOhRb~U)mZj^16 z!af>{k`w*OUe_ci1wfN7{b%^teNF;JX_1&LYlhkD;VE@O3Fl)8`Wl=y33Y<}Wag{%X*UupIF$Z{{Q1KJv! zhO?`vse5|w$8}Yps&$uqp^TWNGqyYFU#gu8!S-Atm{s*g94|c8u}U*pPiFPu^_upS z02uIJ>PZNU)%)>5ePs6;@XY%MtmU`zpUlq6SlhI z^1#+lkk7&%9Ei{h`$!!jt?`Q#Yz|QhXUKm0o6db;Z9yrj($n8`(V^LJmesS2xXI;p zl)MOTYf;QALu+$T?fQl9CLF&LmopF6JZQGn^|PHrf#e+KEMM~$8)&ewUw3n=fl?=Mz$`vR^EKnnG+>1b_kMschG_m z6uGpWrVmMxpnbo{G2dJo+6eLpgqmt zf=(*L8d~spc*omUg1>3eqk$>h(aF#B7nfLEiY_6ss2hMC`XdhV{wQl!KMSG45M>P@ zCY8f+yk`_tuNJ^0>$5m$+y!Sn_8w}x-PUP{>d3PG0^mbFQ*@07Lb^E))JEU~JX?9Z zu?g>CfKV4JaB(L;>#un_b&KwzWkI5LUaE3iKE&EoJpkV08RME2a)bk3%JiPW#U6K% z26&w3jgY8Z2&;*@3+&^-bYRYTATy42WQ`MTaL*rfQbq)6$YC>WTo*ixSSVf(sN09kq>*4tlI(rbSqb zb}#}16)@6QA7nj>(Nffvlpw}ho@3f8AUetZ5rBVtdFjx zA2@vLHQpE|C?JgvKtK@t|6ZRG zX%TUO522hTWksR(p&-x^KyrSgnu353fk=u7s<>yIZTP9osb@WYP|8hnVpx@k#1klw z)KCAWgFPDtsi2Ec-V0_t4CyvR?>or? z#$SxFQbjkLq=bi!sP?<8Ehb4fY*w!NnoY5~0A)fuo_gzxkP+yoUdEbppNu~L_S?x9 zpLm@(Fa67<4o;DzeP(dMl-cE!M@(dJ!Oy@8LtR2*SP4_`b7wu8lSjG(n@W=C?Q;>DPkGG)UU=m3b>&5bIe<%VXHa2HLIC|&^Y6T);aL#`N1r4B}ps4O{6=RaVS+th^Fpf$nS^(1>K5MW<*N)H`C^!F7pFn4eV&dTSHh`jze=ywn} zE84)e{nd;(p~IgO)y_W^cDZV0fs?)VJee95YVW6A?-A_Qi&w*>KR;t&a9jq!^$OwQ z;u_o7M6Na2IX&H&e%ovxIbUl^Zq@iVLGMyOAd+T_Bl+oKh4ItPeo#E&gx(Ai6O?v~o3C2L}g|X5w|$&UybR>0}0GCZ?}iS~z8D6Z_g0iMMUEvlz%QG7(0Le^m`?hDvWbEU8Hv?{cqW;%6fVP&s=%8UJp1m z8*Q+O4%R;>Ep`wrYj7;!Co|~o{N0MSt+$H+2zC%)hJnQj?Wemr`4;En*#La?&j)>| z>(y)L7Hnzc3{n4E>3}Pjq8ST&F1+t4x3ZrDkeR9-_D9hLo{vbCl$9|}MdPQZ6*}L} z?QVg!zg{uVrhmMKFk~1U9DJP1e(&e(ypC$~++;PMTwl0_`S<7#jp(#$_AmFg$n&2u z*3X$A^J#-V99#X~k6^d&)pOEhVQskZ5TBo)G3ZOSYaI8bfQgQekFUi@Aq-1H84>=c zso>jsWUHmULM{A|V(QSvRo*>4tW8wBPU)<|##mjTJyDYWQ9lKqLqiCC-33FkWlR@- zsq(#E^?BdTh)4J`2txNiL z?*9C8puP3HSRS0S-s}*Vtmg^y9aGQUa1jC;0bw7wgYj3*!otGpd|yQlCo@UO$N=pG zM}r+0M@lKi<@|Q_vh|Ubl|>-%w1*0^v$G>1A@RFrh+wz#CNQ?Iqi&-5uNQK&|E)53-1!RRd0s!E#{r~zund6U}t9sRCeOH#?sOf4FiKgg1fb~l?e-e zvr8G!n7!tJu7NXii2RzGSU@e9`1tVV-xU=V`Q9IQ&z5V9d&W>o?4GBEkzx}P`rW!- zbfFPKBtJ#gRP~4RZf11>UI>`24wk_^0Ozkh=Y|lZIT6oWH9Tbd5=TZy8!=&F@HfxQ z%?Z1>uyfjN-mPtJZOH>BNGX?*)7+d=P(Wr!>74rn$YYuFvYGYOR+lHT^6{ zgK5G?Z4>4bV~Ob{6a&938IYPLs@>2?Noejl`FU_*KUYWiisBvPs<58%jy}1i9WCu$ znJDe)Qg85&85R}y1tnxm29tYJ2Ybp*TZI`O-XjFj>oS92vAfI~&cvh^X~)!Zg$NX! zmq$dc#cRYg6H=@>zT%Tkq#vH<%x9;;#?Qc1|@99E;aO6)sWNe*Hlxjl(rj z*&nZmf)8PYo=3TwUFw^9ALiLUgR2eJfs>0VSo7}pE9T}dM*6wAx#hZTNwn&f)vo8O z+=Ng>%*?Tyo13CY-!cSz(zf33n?T6O$f`}pFyi9kngMgdWYGD&HxiE(CH8)t{T^nX zo=k&z3MH;h_14sFO5`osQjmG^!E0ttwUJtxueT1&|lW}sZ zpb#872YRiOl1CUc)!R5m-6mol^5cZxhMe#5%#GtDwyNh%k--gf@wgpL)Lt$(+T403 zHx?EK8myNzIy~8bUyO&BM-3d}c3+)~Y_)sD14=mqD7=o&dO&7XLj(Hvn;S{0Fmz(h14QP6y+ufRg-OdCo2G_&_M6THiO%XafFGJKtMe5CXqf2jACS zLZ{9DrlWg92P)v!JdnKFF8nnV-wGrV$T-H){I{*dwsnkI8;d zn3EaYyA>*M)CvlHw*+{6?h4?X7XiR7fiUQ`4?_`pf3>orsi~>y9~=a|ZPWUFdN7eb zzqBL|Yvms7%m1+M^qsdG*xgThK7Uj>Y*)#G3yJ3R(q#R00vpJ2_Sw96#3$!g=cTeO zu{3i<@Z$&X*V*b(Jp(6m>*m!tNzL(dxTt&0Up!W|){fW_jNK$fEO&>;Ck*px<`3Ty zpJmi+7$8PVu;Q8yV=5yINDjX;f-&H8Gsj7Un;?y#nWf+Gck#1xxSyFFRKx_@KCM;CT} zJzxSGfufSjI({P@@NP_8ob(;t3Gxv*`vH}8}1?bbG(i_7m!H9BpVdNX^(;?3}cp83^aE56?oiKlnvq>n9% zqSkH8wLX_Z?zJ|_sqxxAUPuVLP&7F~PAx4u=Y1}KW(v`{?llDa|+tUg;^g|Y) zzz*lD?3;kw0_psCy9B$~X142k;{cTF{v8_5?pNe@KxgCtuawGaj=@&Ft-0a!_WbAd z%>2tOWS#HRn7-T-mOKdAaxN|F^_{FwAP%K`*4^f1I{wYs)_bJ>YnA?u-^UL6JKw^F zIy9WW%NCaL6N$OaYu}gGzC41hwKX>`yr3Tw>v&p`Ignp9G?oMCUs{)YqT(s#9d8e0 z?TQ9f{%06~kdb1$0U1litvQg*fwUye4sQQ*xr~(q;oaFhF5&a@^En5nSL00%C-c;o zn59f$#i@%Y`FEwY%PpMIp^B)sva<7wA$48$`E7H%SUcY0v=zE<@NM0Gp8JZ=+UdvY z{qYT~5%$T6m5XX8TQLfW--N+}XiRiQqfGYgNCS{3v%eMniK56uJTna38mf%<@BF0G zinnuNwe^)m30K@q2IChrW>PdN8ylW*{tLCG{zAF;7>HSNbVhm9Q4$@>?`}2&Vx8n4 zumG+FxLeYWy zJwLWI7SrsUNEvs={0?1xE(oGYTSUo3n+FRus19QA_~DU7#X;yk)aKAc7oU@NMFE!M zy-D65+q@ug*_Y3zI}G{Fdt2NlnIqvwzCnR@nUfL{5 z>$T`pQ&Zzd$?4iDv)ls!9hqHQYqK~^VMwV-`KzSHKlu+3QrdyiCjS1~B&1xwD{BHK z{jub{0|Fn?iSf!es#S*}iBVtRd=fFlSTO(xWGtC+Xy0+7-8cq=^=~2r#$;-mGVz$Z zlE$TY)SnX^LfL!`q}D3noSl0DkG})I8}sw0;-K1Yv?i{uu6jUlm>A<1lJC#E=zmQj zATP|x_%#Z8x9P!DUay{{q_z^U9T!9O+eMK!zNzK7W7R>ukhiRVi1kC2P6%@j^N!8T&AHj!!1Po+`dEXYVp`+%1jM} z9L3o5&Azb;Fb#Vs4l&Pk3mj-lp9i_|KeEejSzp0%A6_G)HrHw_bivw}?XhpEEJ(f& zlQr5stXBzRn&o~BgP7sI`y&j2bbv89w>SiS66PPWJYh4$ZX|9)3lcvL9UCV$-x@qx-kW^Icx9FXj6KkPuJ%Ud!K6rR7m6SD#yK;uV9$k&~5&E2DNZ zi9%8*i`=-ZYmMT`^NePfJ6SjIW$e?AZQMiZiVSUBv3p%i&@rJn@l5VWF4-Gqc?||C z6>8;34YRM%ck1t^=hu6_uLRRkU$$d_FkmNJe-FmY&j0ikc?xON4cYoQ=Q#>-oC7N z4RuD=cuQ`e#tGQ>EqNV3$G~+Sh*r!H;=!FD4h!*Xf9Ewd0F%2nODPT-xSw z&POeN@4`_63q$?)_=q9>0~cwY7r(UV(U+?&`x51`&075CzG~QBcCC6E5c*aP1cqDU zRY*Fncv8v*!$SqiDniS?R6QAoR!Cbj#A_dMOIvo-CShr+{JVNrrt%4+pNE8$Mqn+x zyWDr4V6xXQD}^V;r7J-!E{vL5AFL8M2&>n`f?B$CIO60XZIjF~xMPC~Aei@?oR9n5 zyd22^_AcXxh5Om$$%Pf_M1quFNGSbtBJht1Y-n^UeC z`GA3JTR3Y`1Xt2usyaU21(kV(ySpY|8VLtMR!k9#O%d)Q#0T_mcu6cZXE#y!V5S}- z&3U%Rr^HPRS5R_M4#jq{9?K<-mf-VBC2X6muFCXZPJ3)&9u9Euly8er+b)ZnfMP#g zVJ#p~u(f!t)Lob1K%isv4n$y&^4{wj}@&Y9Sa_B2PI&_R=sY0Q$ z1tX4hAkDw4$1FaiS&oeQu?Q4O4i4A#FOP`l?mJ017bmT!ZQ%&`c4X zpEe>?9&>#&#`ppuTLh=_xOoWkjc&oFN1wcNMO0C7PG1#{0?vxUtd84YBb5tnpzk)9 z-+Yk=Rw*lwcb`a=-lL=PJ`xg~GZ4S|@gqtNBkulX6QJYd;PHn6d zKbEx=^~B`h;)bj!C|eSSf+c-UQ6XZ-?BMU5Jyf($?>pqJzit#j<5+MWNg0yIc%eY0 z5MrH~nGA@P^NG=*%;FIw)6dS2#Ea0ClOXM5cLm#nT-(5U*-JoWLQn7-rk7TJ^3y~k zp#J)4oP!xJE&eUCZF6}7EX4%Br7c?o{lws?4^iv%C)3%g_%&Mmk6`P$;gxu~An^Pm z#I&?i%S8f%B}n{Ms?<&Yv1-HiU2AqjSJBwBHDRNKTYR78rNY>9h+s4KT47y0J1im+ z+UyQyWG0><(plw3Vjk-W$9Q^W2WlSkaXrA#m7{3B4=Hw317t*H+6Q% zKt}j=B(eLFbm$F{Q6jx1tc6*Jv>IM*hWG@!Tr|%nqJ%-w;!uQ$Ke`2V8uhHoyh5oN zC}d&!@E7LO;#b-z2@1h~_uuHzH|R3#)VkKn;s!hQ3u@#n4Ks)9s^!efG%L@>WYl5H zV!{8ZpIyI#j4Mj8ZRM(GV>NJ%<0Cl2VMk!1J04a7BQ@vN+8B@d2FL67Svtr%6p5Fw z%ART!3qQQ`lT7|`$-1R%3RG~cbE_d5nJi(~ z7aK5cO=dhR9zpFJy;<=zOLpp!_2jc;-p?56W0U=T?VI*!$+H2HsUw@?AcrYm!(+Pl zd-p-7CSg_iG_{R3QdMmy*g9q+Z@T0AgCP|OmOdv#1&58VPQ2Ik(DMgg?2HpwQ! zLQMjQu2fYWct7_@PCmh?s+YZi-bgDet7g}89FT^FhW5vu2&Yv6OrZ3gpH~4O8Ta#s zrN{KHCtMIa1=8j68mOj z)8)u)2ze|(9nn6Q(k`?S?y=q$7fca(d9%WxC(M&6EPM-Ur!IohMV56{%<@W1hy>re z7t6A;8Pd`UT3rOb32}%1HlNUplomS^I`LUt)B|omidv(BC64DuWF+`}cYg!{ZGpv| zXU7yCF7C3bny-0Cb=2O)S#jewbO_|mjHKCf$j&gM9PGEVH*h&=s3G37{yh^p3E8GA zy?sMDq}{BN+Uaxq%$#j?q)w%#`>>`PN>z&%b7o zd3|AY^G8Tp4w=kl`>F-Ug0aYpU#5)3LNKtH)2nG+j@q$N@-x2Z$@VGWd48qRR(8j0*TRd#7NCd=6;?+&aCa%Lmw7IPb>Wb zL!#~$G>0>WE#dJ2xk=5$f{Qib)yHG!SPswUsZEcDDOaY?S)-klLzJ!ad(|83@_$0! zA`1#zc=KQ(q#9roFJC?v^>`U7LvvAZm=~Cz=ETxx%h=eGUJx;^HD^TYd5rr5IV_Id`m4M;?h95vb1_#9+EXO9amn& ztE@FstHM@&3L5ilpk@)g#?RVbBgj0zH}F^;&597t@|Xb(`Vq(p_W8xFtbT1}SY{1P zCQ-&HDl6|IXvnaOtZ1T{l_0o(PnLjA z#Uu;?@UnDrCUez@@MYJBufWHv(?!?EJGGndMayBkCN9vwIlO$oxWp(I8h8Ho6UaiK zd}NZxe)Jd^I6Eg?U0rF?oATmMh3pqfC!1w9FzaXKj3D_v3U{4%X6neR$>t2f+#bT% z00{Em0>2I@t1eX`kfv@1;E(DvZwn{))_MCd=Fw#@@9CYVXc+Sjdcx_(RAhw zo?41e&O>q!>Pcde8pA#2KqQg2B3q-EfXNu-ON~3SV5eQJSO#feo^H7_F)2U1#93pG z6BpD(PW1R9l07Y1x+^tL$nHejLNt4 zh=3Hp|5_>tZ>tf2ME%DZXvNE~Dh}$bl1c%AdK>XF2Ll`|v02*(JwvxFyP{lu(Tn%< zo+;j3X;Rjr?4lE4joo-j*|l-V3HQM;Q|Pu+mWe6>@w% zDiR`}?hWB;3q(0h_Xhk2c;NerxnI{UKiU#VW%#wfdr8hM9w0u`bDS|ips+9$$_Gz@ z;$Y(lseQvkDqqGTaLkpueHy~$FpN#?w=Bj@-$fK#NM}Ez+Y}(y;1Dc9k!=p}N5-B; zX>!1?!1qbXsOfKoJYuB|9r+2QKS_qT7Dhz^dK!3PtrR%VkoIJ@WaB*FR|+X6?0i3y zr@LXchG4Xqh|wJ$TiNampH^2b$L|x(EjgnllVt65;DWU?#*!VH%{VRNH?O1@B0PRM zX8L1PORGR-bC=B?TbjvdH@IJVYtMPNIssIo5(H+kziVT zgq2uqB-kKv6#vLG-og|#@o%}8c(^E1cDWfEJkc{YGck2`YeCvUX8xZDvGg{=2VQjBEJS&W-l%Whpqe?03w+(VJ4T!v+vVxXAgD-XZ77H zRej3%(8$Q1l#b43_A_c=4^aS)^bvq}k`fbt3he2Ax$K&1v|Yn^#gj&!p0;Y(^gYfM z4rz8d#Q<6+n{XQ)9UbjAqqGunA`TABeSLiqp4TokZnifHKZJUKCS~ykHyvFhfUzg( zzwTh_v^s|YEh5XF0Nf_KExqYPX;27ujFU&84f$8F46Yvik3ag1!^}Szk*4-CHa0f? z#(4l&y%bUS-3dCqv2niP)(Ie{BI@eu(}Z7vPKeQ5k*t$|el>CE_ek`;eN`bvbQe|X z5@sdMD|NQXozJu70Vp}+mAw}9xs|^H!e_e*etBcMY`E;na+eN)A85SDm>pYxH6LhlI;B7kr9idfUPX_TNjcPG`Bv01 z{I`x2G#=N%@p-|N11Fo3K+b4aK_iAXR|^>P&`D$QV*mn>B%%HlQO9rAXdcC4pq4YH ze|VnKf+pph?()bujB+5bo51gdKJo)9l=;u>%8cOKHsZYI1vg4JL^zh?!~SFO6fIdV zssBv3IDCZ41XF6hQ7h<14MRkS{NU4oJX*MYZ}IAuWl=+8*5)w%*?JvvBKSe077hjc zQ0jxX!X%Q%?mi@>2_{219*tdx(|VCQwOw53mhNCY>pi+Sx8E!D9}0Tj2sh+;G$;W( zGZP(@lSPha8M%5#Mhfca*FjQl(A{m!_qP?Wf$5}r8>hXP)!dGFt08Rg%!DnXGdmcP#1<93ZubWb3h{2 zNBT+#D-(uKd@<0MVO$W$wFqCEV)#h)P%M7`P8m3wZPsR)ZL*69ax?`I38J6RKAcn}yDO>L z!aBn22o2&fZkTnXt_sDm;CS7gdYw)lem#UC+eUw>+*D% zRm{=dJ;m>B$&Z|aW6>@v;`!MV=$yU{DoqXn+4J$)Sq9r(KPz)0W8F$*k3q^%a&25t zka*9uP?J3E;^L+d=Zp!NM2@^**AB}0M!fnt@`5YyB3inFCV^i>U-T1+KUl?d~B};Rq#FpmMw4ie(peDRRo1X#L(G1 zrDdj6FnMQ|`6oyOu(aQSIH;n7vE}i{m*<5|OCs(%H;-4POl&7qOh^W42ewENlB9Yp z4vka&-A{&j9D~er?Ntmb5LC^#iRE9hyW%_AP;)P-?3^{sylvm2ZVZ)C-@+&OQqd!< zgc(|{rGN1PnOm%W+-LK@OwOZ4*HwA23o$bs* zgmOVNv+#byM49YXcAUutZRRhuv+9~62esJ%&9=?P=_=npv|Su3zP|4TIl<=cqkn6- z4*0U-FZ;(!?n!=sHX0RZ1!3!&fG%rqZ|~z?ygU|8nk7jMW5{0s`(ev38^FW=Hh?Wg z-(1g@KQ&Y0`T)Sth1&H6j@_KmL>jyI^Kr4DF8V#tIcIVDOAg`#P-rLL_8}uE_U^aW zY|j&F-v`J3 z_-33^lyljSRva*Fau~9Nq`LeQ>hPPqQo36i0#A6$p>(w4Pd8MI9trcce6V|t;YxY2 zHi5&>e6@c(^Gfr6wMQS3@Jp3r*)|pNEk#PAkMfO50Wdh-PmS7pa&t=0}=}T9TTwY@#zPfgJo0Yn{LGvsD#IoS92>ZR^?p zd`>DNUpLJyZoo`u$yz=>4E}?|vm{06aJF93ZB}Cd_SL^Y1sNMM|B&}MG0}pQw3|Di zuqmq2ko=l^*8@Hu>@E%M=*A_U=e2U1V3nsYY)RyIt@Et+WHb+}EgzT=i4bUdX-EXA zX8*4|1;^+*TSAo70wJS>To_>g@-psS!u1MSsppZmcs-hf80uKZ%=QD zXLCoZ*n-pOqJ_W9X?Z1(iMC=nl!R{OkdUnVZ!qMBEudh-AlBixIpXuk?qw1~?njVp;Fn`aJCMi;kv1gWr9%j2*UCfBak7}O^b<&shri4iU#30=bzck*Ga zU?4AjxqvU5E-*z|(v}yl*zUn&|k{h}rnV~-@M`c*UyByCt9(!QSX8ll0I-H9TeLKPSKr8VoxcJwvaO~}& zxII!$OoSbQGVJ4o)7WBizlB)xYz_|kteH)=Yk-oPr(4v~NRH2wAR$CSS)d1%k<_?l z{$8(+>mO$dmZi8T&&>Op8!3cqS$yh_*>^&B@1ex6a(pK77DH+~bR0=bEW0n_>Tkqn z3`WG_z^i#cfHrcyIgTv}3!zD2&q)`sjQ``To-p@4D!=3&Bi`FBjW>WP|Mk@y;JZnx zt79^@Ux5NS=U>{zw+$W;2K~;n4o9R5n}{D4Z5{zs09T`+v2oGswZ5LM^P%JN>70}f zK|Z)|B%X4m!|S0vdS4PTWdaQXVB&zLxP6}Jo8$C(+E3S!!kb$HmzP z*&k2Y9nzs5K|g-{&C?l=mf7W7cgm_drHqV7fNtI9Zu$g(#jhTnpJzH!Tv(bWE4GxS z&(^YN!D>+{VJa*k0PV8`lPLuIdwfCv*fUKXV4DE(1!ualjFXcw9zTdTg*653Ziu6fbh6N#-Pf zZt2{&!m5fuvvj`q^_}63MDESEg%~}Z$oW@9{0VVx`nDLLBUI}=2hI_I9iB(x;l>=k zXvU-UbJo-?H>OvyGD7gw<5#TYYYtXgIW+Y%vwL^C2NFeB*m z$5MwXrngV=G+~obTJ?nBMjXe&wyaqGK12PGzlwr8qTio_XY-h}lPqeuAAc_;dT#NK zry~dzJzIDj5C*tLR{C0dRW@3siEiYi zT}dbfeP&7}q|AxT&Kt#yN-peWe$_T~2M8>TX#%9}$u|~OPVfvQp~MXNZy&@bKbvYD zKB7}UJ~GT!wKkjtO$MIlQ$i8;OisAw=VipT=+;0d2T&v(zwwwOHC<1(kV(y&aV_Z{ zcF)`OLS*oIhB|GYpAK_ZLk)P8y>vcXWxuQ!^c&ru@*GvuNNyw_B#Y*ctjihcv)fh> zqI8KLPUgUczX$sgfT^n0+5)`MS(C6}B(D&T59ZoDSfDB>5K6 z#$AnwnvoI7{iELl+2^TJg+)KB^LUK0Yy7W4#wRBqy3M>jbbauD09ka8vOIqS6>TqF zeTxhM4#`;UgCZ~pKs(iR83u?#+xTFdONA)_KMNzmR1g&RaI&Rq2a~mA`xMC2Gx$8> z0CHnjA`_7K|7}`f(Bs?#h7OfE=-RZT^v;5fi+lU-eLaBbebocScfV{b_~jj7qy3MZ zky>wTY}|nw4tYAB$|{(B3y_F10ICL(z+-n+r|oKeScP`eq&0%8VQHjeftu~1J7Pwj(cHij^C^k2N&1l_bC;+pf;YXt`~E5`Gk=BB0+?e1 z0|Wg70~>{LJ}u*%)is~ZP?tY<28c~5y;;$2w;|QiE`B(<34-B96Vq`R6l5P1$@Mxt z)K~|moXMxaQIaMZ>I|pw%1_%!<~~_iYG(74h`+6c0-Amx0&W zNl>>k?li^ExeGy!y!`os{K3`4vjt(LJ-b8dk25qu1jEVTV_x4NwKC8`hS*mjfJ_W7 z2=dU5Ggoo&mkj886Q8|4fCAII6rw)}*=&By4=5@S>dLhmtQ6$|T6Ol^%F17QMxz03 z9(1owXypG(EN0;d@*3&Ls;RNr39hbY+S%PbJ~`RBKVOS*H45ny1mY$Du8@=v(@|eT z5dWhPuaAj%seUfvq^|x1up6}qJob=V+u9s>Fny!kLi&1IoDSXhV#VEm`$J*8J_dZI z+hPohiTMn$qwTJWzq}n+CJCBY|3^md{I=0L0Fciq8&n|#q3H|^zzl$<>)#~%uciX@ zV(%Wg$G`G#ZcYXw31Ho6bp=!}^Xa2)Z*LpfI{ZgCmei!fuv2S;wt(&(FVaPF0fqqZ zBy)30q%W^bJel7n%L@O0lywqi;pRhtM*KGqh!i6oOQJUhHb)|^etrPuAGy4f!bI}` zNcREOI?yr!U}&Kj3Rwc2(7x^8`9B4AfIF`%*>BhYfeV4Bd)t|(#c>}Nu>6maG?lLAi;QJgvbdps) zUldLO4r?!izMkSrHLB7zWGA!gHyRM$9rADHD|Sv@aXa9c#^>xVY`cD_g^@ek-+tGk z-8Co5naYozpIziNq*xb7yS$3AlUQ`^jYo5-I=jFi*v!)0j9btDZ*mt@3sAnT^Ufw= z=)Cyjy7P!@x&3~QXO%K_Xnn@K3m9q3U(cAKZa-mMtcre%b*n0Z0#7os`584F_TLaf z_7tvjm5Z$E!>5WX1o+GTS*1G<07XZC9g^&{(B`c(tjNtV4lPWsi_ z5c+`6r$Ag^C!4RoWuk_Mhehd|{@Vy|T?p3LuFI`#pL=Yg)uZ0muvSX%)X2z48SYrB zZk80CSkW|q2L3;x>7q)5eIqzptCs;ixNt!|%|E;}Mdf@vb8wTQ_)jK%;T~rUFxd+# z=v(rWtgAvc*-RaZJcs|*t-8MGR3sc2Aq5zr(+1SkRt6`W#rJ1P_kaIjpDf#SlXGMO z8zDK*zpcl~!NFm=7`&x`rl<+=PaU^6i2*T{kX_`PlV!`Z$3V}Rg~^jF$Kw?+8t9ae zFCkMeW-Ss{Tr`T;ha^M;;u1y$FO%+3*a5r0$WHiWNznGVH^@gOuUWu%vIoP9H8w3x z{fMUjPwgI2pyv?@(`%v#zE=dpQ>V>daH9!vC@Cih)hprH+0js2WGw&A$i`l)>(y;C zaFPFv;on36SrQQsh6^{WmMre9_)`B;k6Aj>ww7%a$eyrBMO{kum4AR?v6Y3!20|r4 z#H-0vMHAbJ>xk!(iuOWEu$W?>orD*UYm?q&0VNsYdk+oEohjCV1GKdlIRy0#6*-Kg zQhbcA)ULtrR>Sh~uW>9THL~sEX4N$bHXS(BWZ;sel=%ga`Im-!f;iH3lA&rOKP0H~ zQHRSc^k=^&{%_}cJ4~v7g~o|dm9QGrXjH@Fl#VB1+~M^!nkN@@5kdDRkJb)Ewqvg$ z$b2B16s#Gf8)pA7%P%a?=s9y7MJZYqy(r3S&rG;+WTEi&l}3Ski4{PK3nFunPY~y* zpQ0`4F+hh{L0^W&!A>wW(dIQB6@#ty2_$C$xb9X`tix1D-p7I$45YhNCpiU(fdc`# zHE5>0NFxlkgjMD&8>ZN3Y*GpH&a4*9vwEq$1|A%u>?HO7i-Nr!hNV@}xCmkr)P;Q7 zKeZ}cKj_5rod)mElhzkf> z`V&{X?hv(dKBEk6`PfH1WWnBE?DF1$31fRw2aYC5-P~Q2ot^}N{(}D` zunJ!&5)u@nV3^Ul91J_El}BAREX+N?5Xf#iuOK8kqw<8O9FM@BVDe~!zn3_}lG|lt zOHIm1j8ljfP!LH0>_%6y$!XEA?)YcQT)E-=JRc=1p_u*BJR-sKNtx+G#T9z_X1}7F zQLIy`;c41fG8jp0S*ch*4{cbo-uDx%Y<>4s5qX~6+R%}}M4Ne2=^zORHo5k(Fq{n6qnzF_ae+v4M4HMG52w-QTTJ#TGPq}@z5bu!Ibm4j4L->&Q{h@uNhy*_qFDKG zn4;q5ezz&lCj(XHjVhC7E;cq!}75(6zYla_XYSrg%q`NPXGrBnE zetFuEG>D?1)CV%nG@7hm;jTob2Q6}mM%&I~|2{ut^ zrpeQp3O{-%NVT=8a)aB?VEMMd9Nn<7JzOLTqp)ld%Qq5_)})6StnMIvR>y80CEY+( z#c+*{)o|)65-<~HS=JQ((37eriou>lE{nC6hJHKrOHlC~Ly_^oF3&9~Ne`kSt)(MH zT8!n?5R+WU1W|I$M3}4U>I?t#a*X3wHDtqXGF4Pg4Y60+R{HDAwF{Ah)P?;#Mp0zY zy)ca7SS$(ZxRREfm*>hbFXF_a>Kfn3#Mpis*6PnA2Rn8ldV)J*ID)Y*x9Xt94XoK) zG~7R@+**^fdL%{zCuO0iZcsm>(MB6uS(f*bG{w+}Ayq5QM4>Wcn;2Vho*?DoYugnB zOKOY=_0ebzLF@XO-h_XN#j2D`_(jH;+G9V^gi{MBb&RViAcQSH?cYjmArLLU2UJ@G zlz9c%yFD%p!2EHTbh25WO;*y33($&FFi%!0NM^=?^0~j-OOU*veN*}l&N{_0Qa9EA zveCsNHN?u1;2Ay|?moXNwA+s)=fi#e_QsZUR7_X5t|#Z=2D`tHtz6WGT{c=0!H~~q z_scuFbOoo(BH@;*YM4R`Gyx_d0TbD_Ja-TTID4TwU?9e=Dhg9lcuf~R2t0CL$IwU# z8;em1nKu0=DW&k6={5Lqf!wbo`Rzup4bnL!Wa;RG-!8pTS(7oVv1>?j)qZvnjG~RM zRyZf0yiEoMO}%g}l$DWH4Swy)b#$B-PL)+P=2kMLi6tfu5*bu98850V3I%Wq*rbM< zpjk0?@_mSS7CtT5grFc16m=t_!ph9i1c2D`^Piu^gKwO+P*GM;b1v3ZPdenx&ZsDi z<7xRG9v*q^bBCsy6#IMaK9EEVZ`CVqF!X6oQwxFc=k-IJAplsV;GG79Ckf)Vs)mDv1lk zi7HI{(5Tk)hHB3smvhtear3==wfM7as!e7O4#R(Z-dRMcg; z16&V`^pX|gRqFDHbegAB!2RlNgbYb%?^`v z?Z$Y0@k^-n!7rpClDTn5(Y4(<=CVZE;~a_cIx~!#U~Ul%_uO=4PgQWFtwY6x3aoRJ zcCqqsrs34XdCpDaOCII!)0Tt1_HjFl$iG4*fO&%tO1S~Q*jAbcwCN2Gb=7ywI6HSu z1KD7>!7qJuqX=%^EYWhSD~hnO;A$<&H~da&{r1$<;3~;F$tTcq&BchSUr76^l@j_E z4LB@)X$+oO!~77=KQ(-AODk43#au218Fv3u3~ZPKGD-{*OwXB9L2*&SSrbAznh9`W z^2Y6IX!-siuwM6d6F~I-*Gzhdac-Yv}INl#6?Xbyd0&qInTwy&StN3 zAc=4h;5W03a}QKEuWv?JMvu^Lq-aVknP=bN*v}JP+zz&h9uqZ9%8>JCaC9nS4wbxY z>XXf4)G=ifb2GyuVeFdMg<`cx*6bh>u5;OHna2m#wwKUa(N&Yg4nBg43#2{ow1E&zeR�}ivy$=#|&X-VE%FzVuB($rXus7$lMEZ16CTU>~C2Rny&IW$DP~nYsJ;0?Iz%c0 zR?6XV5M&5yjPh%!{1IRZ2);J|OK#;lE44T8KKi-uxs|CNHN=VQsPxG?W}^vd6gl>hvXmuKD|V?! z1!P$dldt0BS5-{}HS*x=Q$><9cy|7;O}y`4Jv=z6r@l<-m#2`R%xep3=BK?e zo*OKn#pYGcS|nBI{5@nW>9tSNH1&?K1vCW{X_IsPO1C<;e&N4fY%EC}F$h-a1S@5W zN8J#8+}n9+e(}vf-+E%d?9}{TK?3$?)U-r-l&R=7`b&G`gwF-~n)`bL;aAgeFX~wo zFkU^c_`^CAs~Z%mw>~W6ON<eV^s0 zcX~?$inv%vYLq|_W-$nG7!fiGwFu*5EqaRkRaGV4hr$+7lZ=U&H@?L#ebDR5{ zYjx!39mRL`9g3VQ2wrn!^G}65oQMuqRS7BOq33xi;NWy3uS^6ApNQ!^AjzCRrjg9V zSf6_?!?vn`W1-`$Q+!ho9tVQs53E~+V`ACj=^wPDq?nJJlZRYhPcH9gOH$PIvaLTj zZyTD#v9PJPx2HbTJ7>{~U7Y9>6BF6kE-7YTZ95-|FVh`vdF?jaJ^sS1O=~XcClT)r zYcG!v`Y*URMr$Rs2&k6Jf+OYv(tYb(*Virqf4twl^o^8la9y^&Z~72y&Mp5R_TDNg zt|xdGB@hViZi8EJcSvw2K!QUE?(Q-Hf?M$5Zo%DM1HlRI?m9Tk+zI)e`*!bIXRY&a z9{z7Ld-rasuKK#V`|IlUtF_07Uu*GPK8=9(#Pc-Z1%XYtZ2=uA%)hDMj_yd=at)5 z(>31<2U_ld-rb{JsXyw9_i=#RCl={;t<>}7ssfUsFYH@mnPJKK!#S9C&Zu+_q znOmCkw>`%+%6*~UIio(kosaY13as=D7o6f&imWg$1b4`GR!q&2jMLY^Q^Xf~ldb6) z7#PS`v9CnihowTg#^2aI+tJ9y<+n;fpU<-0^BL<%GLk^QwVK{R+?vzA^g5VZNBr)` zgAk{wye8*GsagE8OcH6FoEy8g=lhKC7nw18o-4o)ouO!W}Z3r!7q19O!%~ueA@PaCS zvr@q{8yB=By9LiOX6vk8&gx)OdnSPOti?SUtc(|OL_%4xa zhGcT>Z#WCxsOdbRnW;%L25uBw)2jHTCyO;OLl)iLDqxLcWH^`27e768fW~&}i-Z{M znZL^>Wz{i@v5D>Y`8>Bh`CJZGls`Q^&CC*edd>~Z58>J||LTN|zY+3t#HV3-D{Baq z$6tvHM!qrX#knn#-Yxt_{3~a%kC0?bUzx5as=9GJOAOKg2>6>!4!a8)$zUi@E@!}_ zG~1_XR>yerg6wU@%eyI)tqp{HH}7E;ByKYfosxeKPm)BF6gtXzUjC`E4$W8@t7+oC zl$zhFEw7ny5v1Hn{qo=Gol`J}C-wsB+ff6d(OAqhx9gQIUb_OjhPmM4l}YENTT*LV z3XR&tilq+oe_u>%5J~wpurjzQ{VMZS_*STYA*7G`@@kc%aG3RSdJp(VqvhcEuvbgs zZ$tBi%!LPKiG;a5r4ugp7W(JgU2%9~F>_Qy9qOV-g#=S(F7hkmPn3ARNqByvs<&BP z$iPPKYjvAi#-t@h#zK}Vwp|=Pou7|5(GC5UYB7>P;Z8|A4r;xlqK1#-_EcAhjH3k8D`Z=rh6(mQuCrSs>aRUSv|qp6l!P+fgh3Eg`J zwis1@dHpNa7J4#?^wSL8;)rjEK|M1zi^}GgzmAUbU7g7$9YWjMFk=_#8XAi-Ryy1o z`2Juc#n@4+E%nW=l*r(ntx8=LrdGR7jh*$CSeh!V%$s9It4>e!RFLP9{1wX73bjDd zkb-xnV`S8Ujsxh#%IvBFaY^VgbZ`rFY)98&jItycKPNxY+>WQ?Ha&ey2aPnan5sL^ zV^Do+Z9GzFCTOjDa2SCTb5fX+!EL*!%qOly_Q7laG^GvV;m#)#OKe5OCG;voH*J1V ze{d|5-?!SWp(b8`B`lw(Wudw`rQw{naS4536^;63yMmjGCk5g$|7bL-E>mjm5}{jl($8_ zaG0?Mom?=nLo3pQL6M&nmO`&Ks3FtNZKj}tV4*Fl9PL9-4x+1WYDr$iShbF;q3&DT z3%Ul|HA(K*->fZH8-grumEGK4LB-S~;&Kh+Qr(^tytvEz@b$F9E_C(-SKgFcbt#Lq z(iJbS6im-dy-fjSLYp$0u8^_I*&?S-k}8@~nsu1FJ6#ZrI0LsM06B;*(?Dq5%x%zX zenm}z5K$^^ei=qTgHf0A9Sh46pE?dYRyc2UdqGmiif!cHdo-bz=85`1rPv7z&qnSK z({ns-tT3B>UwkoidcSGiOk3z^H#&Zrtuh__WHmU>2E*}% z15v8y+pG6j#BMHFOYK2KX3{VCvGQRK&qk8Z-dMqc9wI^W{-lt_#@05^T1_m0VsbHm zpOTd7HBLFUS4%6Uj)IaCzpA>>tUNWkWJ$!p!NIA2h5qqz1m?rd63K$G>QK}tLys5aO)9KNyG zhUMpx<3-LC*71f(=1t5=*c_7%7@ydzsmZvMF>kckK5;kuBN_49+EOlpv2tT|h)LtZorf zy+BF2qkcC+tWup@zejC*;%+eJO8MxF)Y1H^D_3dX2P>-{kjw2vy)|FX=#pj0Nq%{I z+9`Q)NrtvJnrsZ7baMVSyGXgsYF$)j@TV59hyA0YyrTNd_553<@=9BjG#$Z`g;iyh zwS6}?-ch%=E5{a#iq<|Ttu0?b*VmJC%~7`nwsgx?8Dp7z2g?gRg|Zl}I>e!~4c~=v zWgK?Q7x-q+_7_>kDhe-n-p@1~7t+s4Ej7^P`nE1Klmw@a+CmTh{nhH7Z)M#BGl3xk zVS^Hqhp85;i}Jdf1m&*!bp}UU?By0pXdssY^swIHu1q`4%%{U#IdPd1znVp$%DKn|eV>y3g3z+m z8al^aThBPWZ`N{fn%R~pSr(F+4?{IO4Y$D%%RVtD=j|S<#4UvB)Yom-!`$5&6Q1gO>$w&ij}oQte~uqbM6Ct7r?@>WA3eHj5x zBTG2LBfAG&at2OWKIEq9(ld=-WU>l+zlS!0i&dkkB&AT5WU1kl4nM@BES^l)9lO-p z=t|2t(gLc16}*d^Xf_o=vb}UvC#@DSbL>|LB=y|3$XC;|Cw13-VYk;44fTm*+pO%d z43mBP8Wk|rE4mAlL{x-l)I4txanR}aS68In%VM<3(kf2pB_qDea>I(8xb(w3>@Rmj zHFzNPeX(SrSX`(s_jS>>;ah@Ql-;5tnK;9H8kgA9ib*1qO_5fMg$1P_qF!{!(^3jb z-;|aH&2z(Wj;xC4$xWZA5&7z?1g6FsF0{2bSL%59RMqF+KS~0#$+#sCEi3_I!J;if ztp^b^?97Z($3aJ=S$eMMjDcIHj zW}Q0!u{E*0{v9DeBCjEcEGukk7MGT^+{js%Wy#I9h;eyfHI^ zRTr)ko+ZnrRrbYU>bfx7@=iFR!%)fG8D96vnU)A!jKPHk4F!cq3xS44Tl&R@6blAP z0|rG~X9CyqRtq8wpZ92Wr>nmT=Xno%Jhy{L<`<{Mk50+v5R{{MYisMHD|Q#FE$#BS z>!s!=8v^SbKmYX@48Nf3@_@on!~Es)KyaI zOh-2`G(H}ZAI4c>f3EuSF2D6_cks&~GOUe2@`9L*=0rCCIwjq-it;6uQ_}w=`X3Se zj~2xJUnzpw{Lj!S+nI90%S3|4Qa*(fMfZQ1a3aTi)t`SB-jZfA!c@~R&%XTq#p2xq zOE{5RbN|0=z2(T~Ds63Zx%q0k!u&rVIAsFfx*Hlz=1L_bCMvhu!Bn92aKQ4L6u)HH zq7FSG+0j$^G+e2w=?>|uX=`f>Bl#8M?_)0ZGN#M92VSTeLnAx*oH_#s*4Nh)j`ntT z7VxBAfM>5sM$V)+P&o~b zV+BshzkDj!C|9Uze6cC@^vCcS5Arm=M^~PeX}BH3rl>pe{hwS`)E3@HvH4wyUHnoJ z?QLDtNBCY{FFnvLVO#$XK2EewtQd&Cr1E1lO*t_cW@WOBflGmn zx)_8GKch{7F76#715t9Uos=Ojm1~soA`{m2;P|r@ssCXBE^51iCWSmkY#gr^i3OGa zpW@V;31;tWR2H~M{lc}N5mT}6=J@!|NvgszdQOk|{Lxw}JDgYaEmHn^qiL8Jca`avKuFAL0(oG(&cXGvxZG_6HBY z>y$;}ON`>-r*tjzHBaMpZSvVvIyzTj*O2DYU`=)DURqZ%9(7ZJNK=`xI{c;3*k(JN z;YDFzD|LcZ- z+WB8;SHJ7VGN$lopB)=0pq9=?`@2a{smf7E$3t-mLg%_uwxVW+WB1RFkKDNq=ZRW# z^Vf>L!A$=Sjyra9(@wGaIqQOBZE|1`=a*%>pmBFJ|T9j$(4;2373$N{;8zc*n{Irz$3>d^PaI2xR{(R^a}ZN{ySC=NmcE z8=JlL46nZ{a0$KGAxL`RaH{7IGKigi9xnFKps?c892oDu5={M0RKchlcJ+G8o*i^l)Z}5mkKm5EH-jQ{e>k@^|2R$l zutgb8{o&K|fSKRX!Dv~;azftse7Q?>4AiLnMm0hrBL7&dPeE(0ArbD6WXIklmzP5% zt*_$A>DOYo&Y=c9wL(9ZK9}Y>w_X`f2nHeoMCG#E0-kc4w>kQT!~qiR=5l3sDyaBR z)ZINQPL3yU%sbPxgn&0oY4=Nj%wJDl4zw{~R}vuD1+DhzShSC1ZJ|3R2Ho6;2Y9e0 zd=vZeH?8tGA2@Cfc_3>qo3C@+83>I0sp1p9czNUj=|VzxcX8OhBifXp49Bnh;_55% zvB3XeZA9t|Tj$Wxn`lSISI+XuSSu4lixzT=UJ@X{VRq>iMf$f$}JhJ8N6#b>%y%lhlKX$$xhg19Ck+AkO;;dASLE3fmv-z@*%jfe&y}aYozPlx46>GPm*xc6_NmB(HW&#?gr`Ny8 z(&zIl_^r1$XIq179{wHo^<|zW0y+NFE@z(pH+~-bVXnAqJ?anXBsFayDt4W_?&cz9pBvWqklP`T`k}(b0(xh3CB50uv+-RPMh@ zjT4N{wYPnQiyC5N$&y2RHT1;7EBnrcB`1Iygwd&dPC!7Ub4eL&u7Pp5k=EI6dZ=pL zBh>=*kt3gp*WXDZgZR@0$wT0=JER2EdFkuo17{pSd_>o1?i-8lKAEWT@6% z8Vfgc&@k@nO1Vfeo{JsKk75X#(f~Zlh}uU%{__W{F@0HaBgUP#M_OBDKzQ~EcdxGzlByXD*qx-G zjdYb0?6!tnSd%I+?}33=lhqE^p?sMTYro}Ae;MA&qR^QW0~SPV<_Ze6TLyq$$|}5- zSG?F*_1CS!x_Jsj;3T@zhfqn1*L}tlEAT?@;zzd|pCY?_0$F&!%9ZmD7>*D+f!rs- ze>9QAE>PVfBN=tW zXl?nt#Y(x_z=j=)HrHG>dSXeI82U5~5L0vjfA{^>vtG{E&lj-6x%D&4jlnf3C!iKk z=Ot|i#0nnpFq5}D^BHVRD(Vt@e(0*PJhKIiz*z%RJw}KP z7Y_V1Jk-#LqgmP6h3K-1-o+$jc|`bpzMFZ4U4*b9yj`g+DT)*v4UQZkpgfbO44U+O z&4hpX38cV<%mafiyfh#-UZa(Kd%77?ZRThF$#6BeDK6Amz*PHdz-Nlb+wvSg)(`S^ z6-l1`?_|^I8_rA(9tU9VJ2PfQj2*-vpi8Dz$WUjY9oV844Cp-P@$2K_u6PR<%)fNO z?2WP3d-)Jk-y3%W-rT3a3`ur-|etH$cPXf^8 z`E(#OxM2^B$}-UEx*Uk9F%i`htO^+;U~{59vG~Gvye<>M)C^GvcI0$Hq{|2XA}TK%kb{O^HL02@A=DtvXf89yAIzvuEaS|Z?nw5Qc+!<$>~?u#d)?3 ze?F7aA`0+#mZ4)?`d?fC5ZZeqPwm6W*TW1M81&;=#$Ro&Go4+4drPDn?K5^ZG-4R|JEkxkmlLlf>fVoKKI?gsyKh1PC;3M~m5!GCM8va6ie=JU!!5W^~t(9BmH*hbx8a+ zC7%CRY;x9CABCQ#snt67-eZn~<3T=$DyP8Qy}s6p?8>hEdx>65(%iG1%1Q7cjOh{a z!h{y=9>(RB2;2D=`+x!ZL&P`l6Ql?n>aPg*f@iB3U_j(~?p0r(0;c=CLW*@rVa6~c zXc@{XauJ(+K%Rb}DNFe??;mJ?`hOECvpawZKy;g3qlmnA!D09~l*J{17E{(ah%~Ht#Dhhc{lmkC?0aB7t`mzAg_T`V!(d z@V0f#>3Yp4(gJf+q_PGGq%Gh==PiU5()XYcsu2@J3^(Xeexge%1ju#S@z$=jdzBMF zx!F$7?|Jvw=E5{aouvibdTghZK*f0X?sLmX^$*(XC}z2{1WnaA9{;hQ5Q-IVWt^zh zq`&pQY(muvcR-9fdgSmY$;)UxlS;O{jnu|3>3_62XZu@^mCqFFkeP+S z>RxRGlnnXY5V*k<{VI+-E~E{?;XfU57KK*`!_v75bHGOa*f9xW&WCL$ie*`O7!D3*Ox7 zid-x1+pjgTzXMQ}iS134jSz!n49DA2Cc8pEhYB22zLEHC6#6!dlAUo?~nZ#q% zzK5InQ`v+cK!bi6?`w`!?iY^nC+69rwJCfmm%p`h`qy~OTuqLkabY~+_=|k}nZ_BZLSf_~^NDPW-~yxjU}G*O~&pq zd?=Jm%F4Dm>`$A~^{hHcdln9l2`*TPvRfjFy#%AMJ)O2QsA$hl2&sB&(%^tM&^%|@d`DLKY{IhBM zN$)4-1hKxj1i^P-YYv$L9+feVLc*sS_eH$$hxxGEwOvC#)5azMYRq-;e@xc%yQHof zFqG(K3N~5YGcoO*?F^M6lkC|H&Ux-wyn@<5`XLc`Ua(To)-7$-I32+H<=5ei;OKIJ zbuprwbHg69hGzGZX#)N!845=@zxxwvj;zQ?gvzut=rtU3_WBG5E1rfL7d|Q;t3g+~ z@8kV16seiArsSF+lphH}qdhEk)v@`NBsZtkz z<#fkiN)B*6!O?P?5i}%GW~iBz^Mxe4*b81N@_KG@4kNwDcu)&j-v@xz{()bZ{1s2t747~a;8tKTo;@D@aZS@J-@MQ42~inJZlmFogjpY{>CQ@?u{}RJMI^QYXs3BvwXH5Hu6RB ze`j^f)HM~EJGCOsd=_nxrAiUC;a|yyw6ja&4pjj|Ez+}JKm zaYw|j$g@v3T*h)os1MR+&o+k)h2Nw~3C^d-d_l9CSj}+8)U;lWhA2G9r+XM;H4(s# z^XWL6_P}q=HE#3-0f3y%K8l3jWdrQkMH695Rh9RCp6>Mz9MpGIV~LZ&L~}HodX!!1>-#+d-I(`!(9!GH zQPgQ;w!<+o3PK8WXeSg|&Btp_hY3(8+k>6;+m_pPbDOH&6Nsh>{>4`IdGcYPn$Z*o z6%~fSVDp}g&C~;krHSA_^$G#|yP;S69(P~>$l9dPG-adyN2s$Y;d`kABYmuRd?m)Q zX3dVVm|HZ}jtj=asep!(-)M!idLcMS!AP7%?6&>lSUD7V<-Rbmc#`<|4o@>kn-a8r z%J2-24z&H)SGGsz*%huBo|gmV3u<+lSx25p0tG-qUZbbv2<_ z#Gw2Zn+Aqv1u{Dq4 zt##QYO$=c2@3W{ax=P{Yb%>m9my#Ti79r<%jZ?s__38g@d{=7UaJA*nlq7b}&y$|F zw}WnQKZfbMV=h3@7Q+MZgJC^3hK#UPnXACZM8{MKJA5Cr7gi~|Yo8JQq`*HhPLi=! zbmaqE{Nn;##M`$c=>x*}5*HAF3C{@phHN&dNJQ;P$VCqeXgk1bA5+WzhF36`;+G@y`FK=dXqvY^rIBMY}$V z>*z%5QVsrt1J^grbkiGX?-;aRUqRr{i7#@V#_Rsm zaB5nIbz$FUOgW~zuAnQuoXGR}nt&;_aDHh>5I+sX!VAou9E|cqQ*@TLxH!@?$f4zy zw3p_2NXR9+1BJb)#pX~@?Sj*amo*Qe$p`$p`k4&s1UKHv8F67$phkm#0!sGycv)34 zS_mmHDKrpOB|l(Rpeu4&?4qSuYMXhmAV~aXBt#ifTU3g4rIp7Z;3UpHORT)g6WL?% z-M8c|>!|kph8*w?Syu5i_naG-xG)E?TNLt#>N;#ezwQNfS`hlj%gmrx3{Acw0js>oxQaZDhf$D z_b-$wiDu2UfjTz4BJwm6aQ)E!)Dq|LYUSj9-P@~2E~}qOp5N<<{`_osiIJt!Uhlj2 z`4CvxEA`hkb1_144!l)uXoyXf4Vd@G$y-AG#=wO9UFEBQRNh`&vBhy)^lg-lPdM6< zCYH9Al!+@f;NjeRV}FG7RB7SE&3Iu`oF_aSs9j*mkll=JvNMgXy(vXZcmmDks^H9a zw>1#vofTi*#oix2PMcC1xnb=qzraSnX9c~;F?zpU+sj=x`lhmgCrN-rk$02N19NX! z^XsJc1zkffZw4URqT0~nTA5cja{O= zXT;0gP=6DKW~SwP_nJPYBe9M``FMh@Nli;mD)?(rc5!5QebfwdaNoUiACc{V;SYW# zXo&j$0#nPzxG=MuD|}6C#dL}rGb_(=G7(?0Ee4p^#l;m=oe*}BqByHzXpnXR9z`RU z`-H?k)w*D{F{e#&C%zxf>B0`g!^t>Al?n-L^lHM2Xa0exG|7)$MhGZBi*BI}8EWKD-r#Nu%Qq>St=@jhOYgucen3)g>rn$y+-%bK3e_T4Xfgn^aS z$nLB4QGd(+!DxcX3Mv=N2x?`&IPK03Jah^wYli>>M;~hX^igO9)Aup6z;|4r=US!O z$hI&Ou2!QXaw@>z`ns4SIp%E7hDuf&w1_dE)1Ua8M6^3rGX{&TJ^6BdpF@gDB7J2} z&Za8#(jrL=-`^WiX0F!k1eTCr2I2~GY~_jC=Bl?VUP1R=Qcrem>=`3c@)FTj;Ug7$ zEu>>)O@`@jfq6q`x7L*t7C{qiG}fjo05apii!hI?>klIn98KSc8tnU4ObH1Q zW^C#rNHac@p!UhoFMKg5pINu%;zO?GJEeCm|%rQ<#d1x_XWKSjsxjW@7DVf^*kWbYWo+0@+0eJ{|c)(8pdS!PYye) zc#-M%7?Dr6e6rh}$;~@EzwZ+->vr#Z^-e@&pBy6FZp&#M&lXe8sAswWz>VAa$Gf%5 z0n|@fx9jNOWB0sT!N^G}gZ$h5jTvNscva-krXpUGH-n+htBed%{`zk`5oa6nXMQaP z!b6FQh6pII=}T9(1`0Nx)VlOAZQ;{zX`m3|s zugq6Ypzx`-ZW4<5fuTxJN=?jSG^U#F0rB*T$)sGqOVc2bcvlylI`rQZeB=CL*?FkWk%$Fa$7A$_=Myt-QblEnLmF6y=_G!<<` z;G&)2e)3A9Oj~~-QN8l=0sKn-)H|IPM+yV8xia%IkL`T8u~Wt8+37vhdRC{=A^=}p zUBclG)&?^Pl-ce3k%#3YkT}HGazy&Dr8J_^1hv~5Vt3wT?$p~0E3Em}OUg@ZW1z8n zY`nHlXW9^n`GZBSYro)cz5qF?{c0)`Fe$#8To2BEEJykCGISQaF+RG!=>PX0g5G8g& zWBJ@I8eC@jIFR?zLNwd7NL~6*3;3X*cNI5T+xs2U+sR_wllnpGYXv7HZx|sCC?ubh z$@G^yIC!UA8{e-KnXS>X=^L(Dj3=pDl<6vL-i(HG3iq)SF%1XZVt)DI4}N9Wrj};3 zy`hNRx!zX-gBSBrIe}h7aB=TKP3!1H3!cSAjH zOPbw@Cr}T;hx!Y~9yvz6r>@(DCH3w{fOaxBg~ltXcsf3e52Pi;Bwx*u;SZ+$;0HwW zb5P60)kf>=w>>-yA8e43*ALIj63OS9_1r$zv=K=5mrWyXU8KC`X!= zfWFd!#U6S}-pTV$3=4dK1e=J5+xAw{0E|fu_l%k$(7&Vr4rpAv<#a}&mwGeJv^qb` zmlb553D!kdzEC)71#Yq3P0gj1jCIJng&9^sggSq;`GB9Fq?2ClN6Y~eTDs4st}{9} zhE%_Qw#1u!7}RdqJ0evu6lhAv@M#K&XA>EZfjTQy0aDj07vQjAJFJaA z8f^9=k5{oR^dyQ^9M0s$ar?+<|hFMAhW0B+JJ zEZ<|tS!jf;ZYC=}F{LJFj1WWL%)DnLMEnojopD2lw+2VmSY?}VX<1ZHPH$ucv$p2f z+3$l25~R-ZtlFx=ULmli@3oZnb1{!7R`xefhFE%(1DrL4+pM^Joa+iG- zgXw?(eCf@|=93s7g3eWSdZSq2)x1Zs2X|ZJcTpeX0-{<}INm4H@>uEBAMetF7mNLY z3(_M}gh-H5Xk-d5o{HOtKJh6q=<4x2TI$-@q})WMuVQLqv(tMVEh&^$toF6uLXxeW zhu$3B8;Z4MxVnFyP?L*uX+&}~JsF#1g>L1%(!}UHsJvsjyajs)JP9VB8uqt>ALWAI zX04xiL-6bk_Y8&s_B&~n_PeFPny5+8i2#ph4 zq|YV^3L7s(?SDhd{1d_-aX9@c$}dULR_#5?+=tsRn+%gv${H|VSXovDDb!QDF(TJ{ zJfA6rC#c?xHP?!5@c6K(kzbpNS5O>|7b7fUnZ^iP>T!OnY3h;lZ3qrPd)eh8grwS5 z6Z=M!S6Mf`dFSYE9jEr#1y`>xJPMHZKnN3n5;_Y^uy`;}GxGiu1t=0!aAX@)c3^Bu zzb-8TM;m^EFlPR4s3b}>Eu8!EHTO`OT#@a3aa`G3nTc3zG=3V$oB}_0{&^&onJt(> z_mKf@ns*zyYCje~(}%{^(GxBW8qK#{Ut?t`*aq;sX&*`?6c${G_OIsPy}Z1Xeltkg zn&siwUK8!;h^5mYjv@Y9$6e2QVq%5M3t;ti_tEHJcv4%%?hss*?DRC95%HPvYPWkm z$bV+X-*l#|^y@si!Gb%hL=Sgg*JN)xN$J(z8#s1umyAI8D7(treI%{6@C2gJ;kggB z+(S@z>FOaZW$Lz(-+RAro6fiP)BFC-FBLcB)b%J!0$wWDYv;4}VF57geShkfO$&Xg zi~azk{WoBD6W8Xyig01+EvIvD$syg^&;R0be@tkCV&F~blNf*J{UE4w54bVq+o`23yaj>j7MMI|Q2a$=i?|1#Ane35vv&{0Zu294 z(b=8RYSZ_Qw$Sq_Dcn&rvHnC{0w_VgGpkmEoew(GGmy3jhKiSK**jGsq1x-h_D2+r z>YzFKBk@j1OLkpYOl9=ygRgvC{xXhU_kLL-(b!IbWe{Au);8Ukp zR53opAq$WlOz0W2*6I>{t`xh-2;e;OiP8jS0E^J)$I70I_KAtt_&ciH(#?;QGQ7}8Dr>0{N_itK|O11RMxz=w#Wil!CR(|gs-)ovStc&qd@ocX06F1#j@DlIM-8&EOujg5xOQ=h81KQS+6%&=S=DnXs} z2GR7!K5Dz9fo0fO*A$)acb96ppcEHFDs?xm?+G}U=WCx&8HPLL&HLizn_* z(_+UtLjI~`K4fcc>*5zuSmUzf*7Jq@qtZc=Q>z(mbN^$OWfzXlg6ztYOw$wwQQ_2t zkbJs-z>d#)?BnSjD%)S)4@mjD`+oUbY8b|q57GoxOivX@ONV!a1nv)DLDFMy@6C2~ zR|z6u=0PZqe9`j(J($C5p0Idt-46kOHCRzDemPA49KC>4#rz{e6>i; zb1oYi^-zwEs3lw8cfFDaQ_(!n!uj?PmS|0k8lT|S?aDoV1 zD3tO-k27w;C$3#6dda-=cj3>owF<#2$%rV$4TSGCHhNAd=tIll)C>ApFeXr{T+6%n zCvN|ma!eaS-dH0_HiT)!4$@n)Gma<4r=;7&Qb z))SC^X^&mX4rqsk4Af?V*#e8xU#vLwPT zKi|EN2dKy!+cgn3{z82p9u{wv{i54b2{nJxx!_5;koYR-|I+nbS}GXgb<7keMQB%6 zRu7F@P8nWuY{yX|;LL9mlxj>^_(oEj2&LNHJ6`(APsd*D$dUZQM_5*(?AD+^uw(ky z)9w)^g;q*@)zUg$9Ptj+zFDdIlUCH$5l}KE^r&quKlJ`| zXeZ}eL&>AH=rpOvUb?p1F`WXQ5^hWXn5lf~UaRcWIjn{8SsmT^BFhuOFC88C6?$!z zVoziW0{MNz?7yUq5)Ge77Ba{C@+%mObvf7n%b*_`Z7ZRi*N!Go_8|PrkN>E!1!);# z+bTMo4R8{F&&WbOFD}JxRHB;TG+vK?WM8$4UyY{!5sPu`20)cKfg#a+NJQ0d!)fF? zpU|HL0H{HTEO+__{9saGl_Lv^{QCrE-|s47-omac^BDJ6MF5kX0POcU*hv+>kz!0~ z90xEkSln`w?=(D47X=49&na>yYbKx#FnryuF7Mvgkjjad1I})P=cN9Tyhd-nCyL*E4fv$e1dSu!7PIxV4tap}ezoa+pBF`P|I7ydx`jf%eG7zG&eHh} z8A719Gx_oImW0NEAdT*~>~`ty{rEm*Fr3er_ zf}9m8zae}f#c*&yrGP_8_h%3FKgWdI?PmPZ#EwHSSQ|0Xvgu(Z*~)QZX33ZHbzSkJ z0_B@E8`x0IISBqE6?MT z4cD0+%nu&hE-ycJ0PzV|H{DQs-3@m%vnsl$BHFy4Z4JCn&{lzG#Cy%7kCh!`^car- z3TU~A^nmNvQ2fqw@h4!-((XhZb239}!r>zB7l$#gyaX3%p7F$}RM*wPlwqxrzcO#| z3m;?delLz#g->tTv{R+rW?M)6G61yHSj14N*Nf05Gcgu z(?|S0q@q&)^?atiRh{Gyf$pYmoBIjV-0Ji9ar;VJZ|6rc=5*v!{o1OnxgQ>N7pp*n z9UG@TVY`camaflGQ&h5C`)n*sF0pB(3}vK?*x9vTQ?YeiU|B1yZ5aANa9CNoqcqvB z4x=o+k&${~w2U{V-nKmt(*}^SB#n)mjX}N83oh~)Ihq>`BJ{^+3h&eEjW8(0Ag#y8 zh-w>(Ivp^viMfZFvoSk(Ux_DpwA3n&8G`9hx)#~Z4yH_@p{RfFjF=-yQ-4v$3a-s; zhwd|{lWNYc@8-nOSQ?s-m!0KQyFK$5bht&8CIh=<6!x@X?Ajs3bAFJ83R=d;FoULn zkv)R-D?X3YDU_$XM8MT%fr1{m8>8vrq3D=t18VYu0s7s+f1pTeIlW+Qe-&|-WzGmr zXk7D3-Y77m6TraSiCf7q5OURqi@xY()i;eV$f!H&;)>QwUi zB{ye{fv?J)qBU?|8Uov66ZLp{Dy}d8;frzlYv@FAqdM~6sd2M|>hgI)-f&!Sy^My< zc?hKC*Jv7<&L8L%xkN0ZPVPa8i81#(?gZ#IM0{7@n(6Y4Q~ojLQwM&@i1fOnJt7>|N2a0q@?V@E%e{5w5VkLfp-m6zFQyQ0lrDm5~NrZAaEM@YA%eloLb2Y`J>f06Kj1@I;xE2{Zb7&!)LM+H$ct78%V7i zQm7hZTsJIA+4x)G;Qw@Fay0vkvABKJfSO=C%U4;`>Gaelz_k~uZ2XAwz$>f;z3_U2 zkky2|;OdIxtOQ#2T=oT$o@bG=Whrbvh78N>4<^YMk4-M76e={i zIAs=x);Z&{2m8KBPPQ|}HZA4*rk!j^ zV`4LxVa3oh9ljMhM zRP=F2z#nFa25x*cXq6X@r&2#vB*4q(%vV_{T(RJYG?s2h=-t@BbR zBYpd#6lNxLgYDA&2%|t{F0HYvD3>I)xBX7-;&qrR%rapN+^yVB9QE&0&6SIWl2Ts> z(PikkwS-4iiI;<^bKU%>4bkM1TehvytPGcZg5A z;hrD=$;wwA1bIrn@DMf=Chzt5z0&Hw&<1%3>#Tu7Q}N!vlbX9ThlfnSmPb77u-9Rd z0XX!^T?5yJiwwRJ~elu?F_M*eg0-ap7C4{Ju}-Lwlacq%sk?@3|`JW1fEAU#1USsKKi zP?rit>#7t%)M9hk0KLGC3W6h(04NUCT{E2WF`BANz6+4Vj8ohzsiR*bRU4^q}VMmr;B7lNIey6w&!DffDo%0vFN_`SkwIvq>SL~wn zIB7yTHCpNDUj`JB3O9I?Fv%|u1`j-!K?v_k1@GU%kmlhqt(B%ly3GwSx$RnKg5LNhpYP$AtCi^ffheF6c=4=j|Q&(JVvtk<+ z!wO?W%0)9nla%G-%ao9H5HiO;mU74`pP0inQX({2VUt6VecK$94my|)-`afd|G(e+ zKF|AH_w(NOdp-A$#ncZ{R7{+*7h5F#FsO~qNIut1Y4ot2tT?tzXp!b;G`+KNh_7iC zHFEIv>adk-Rj~z74v(oFF@q~(PqnC=1*(p?EpeQy2Ish-tbQv@29iY(jNJD)tw*<+ zV?z{7&5w{rIqE9q)NmSGyOtz(3Bs*yh-+IMtGOU|WQL5NRL0Q@ET64FMA3QCZCE`y zMUtvxPmL$C!&Smlv@Fex0bjRWzj&hYr<72Pf?MbP^2u8HrVMazitnIy4KFx;8(5iM z=?{1uSfM5wRT6(jPIIEBw{jjU4T^@ko|miL6kd6w-K<@&KGkLR!Tm3Ql2-(88EWcn zXNW^*J)QHr_Zew82I+Re?37nlPE#ruCT*C?{X20@*5yINafeTKZ=uF`AqdTw`VEaL z-o;4KIXh=2ws_EA;+=p82kgo-M%1>4o8I+54I3)7fmj7s)g~Yx9KEtI?wP!5Sglf_ zmXTBh;Oh5*Uw%6sitJ)-8xYUG_Su&@a)Td8Yyv}7(wt1BDoW*|aUm3qU4Sw}e-!n+ivZziehqGknoRLPiM{F; zMiE6jnCoU3rq|rf{2p_1NA^{pM+atRC8tzLNE~ZV9wD5!I{Yp+FiH_~6I~#N>5JF4 z=GiJxMCSNTTvup?v5xh88iO%H{@PXt?;+7#8gT0_UXMDLy?_pcJJ{mqbmIP$d`Q8e zQ|IYn`}N#(NHV%UAE^}+zf<^0scuCAw>S8=J?f8D>yn;UCEDYPY@#}L9YI1oEnp~= zp}n=}llHQ)xYw*VtSq+TP=!e@$D$pPMF(t8MDn6fbxhka5;EO8QyLG{VMT|$m_fH^ z7U6mt3$67+xD71A2PN$$GWOL3x_wb8>U^^g4LF9GW=Gz2U^ zlUf=<54}4*q&2>sM6Y%{kMw@H2Qq%b|5>6hk6eR58wE>oBZG+^*C1OWa- z0$E6m&ai(`ip#Y{sHl1)Tkip7bLkuw=GTi9eTC}-;r#+=OmfaZ_|HOwj;?MLT6iOA z51Yax4iD+uNi(vh7#tEJN(q`k4yRx3b-EZbPCaiTB$?+1?Xb@dMiX~0a2pD0)?kOJ zG?}R2#Ia1zB==lD2uA@$zCU)d&g$BUsCe9 zf^T|-o71z^eG=e44oPc>us(Booqgb$Z$jv5R6Q}gS)%{_e))pk%isA=BCApEP@xQR z;n1TEO)Hb2aW1ln309ERe7sS@oCXShWrGcAvvr#sdBtN$*F#n)CR_UPuGt};_mRAHVICQcTEkM3n$to&r6XIpf@GM0MyLe&9CnE3PnwnLKY{= zLj?{ZIS#Tp8hc!?{OD0HcAlNkxh}gQixM<@f90z}-QF*%h{#q*L_JYZ!`~XS!(sOQ zjb08`3L;7JD`%(JVI`M97PAg@c~m8Xyz#sM-3|3fCz+7oQ^s(7jPJ=zcgfmNar_c; z4K~TgE64;!38hLv2&pTks86-@9jhdMpU7P^;hC>d=y z$wp&+sw-%~0m{z7P{s=IjF#3<9P4GrntDsj&lp4#;e~?z0mT@4kJpF zv-n}}SnhrQhg9w0lFL0aW__V!2FzQRq^&%Og2qlj8$zft-SGi>c8;>$77^aqIHNv5 z=a-3MqXp|b8JlIhXgYAy_^p1-Tg3eWec9eMpg^-HsgL_B(=AC0n=#DC1@*-V2?Eoc zUK6SKN5AE41^P{!f3JC0v}wTbckrf513C78mj>47U+2INn-tC3F;N(t9sn5x4{aA)z-3ASLwP0wN+( z0@4X3)X+l-z2-gWectu_*ZSV|e(PJ`C+D2u*)@`@6;zxDbBRGostmz#p(?RyG} zBjC~7B?=0U2NV=5FDWRb6DcT|om1-}vcL-}bJgcc6y)=NpD?)y;K^UkFAUr$C}_W( z|NIfhPs;{8q;^+U2U8QMsp+r$<(MT@M?qnQQ&v*c@g5_dI;a`>pcapdVTBUM-(ZXO zZCQTLw)$)|{MQ4n`$iXkm4ZEg)nmhFjLl0ay6ch2njZo2n3`4h=o9u0Ga(8dY8d&A zhwu|wmR{=8nA4_ID#O3&u#V|S^GYbwOJs+$LBG35Pe1PW?4%T*Y3Ty<8SJ-}CnRu* zyq6`NF6Bbu)Z?R65`l#MRZ6v`;}X zz57r~>X49NR$N#J0*AEFF%l?skRc%=gs$7-^Ziw49VwzP)-TN(+*%4K~o%zsbVl(;0{YxXQ-D@~UlwGat0Ny4p-W z-NpUu810$m>6a_`;bH2{8G1hU2DWqa`%3qe}G{O!2)!GjD zPoAAv={Ve3JCYE70}gMWYpD8$eyQF0+I&OIA zyi`rXzZ#an`o675Ja7EfSjk9;S;^&$qGpkL>8ibQJ%;9~p#AkpLH_oYl@)3(|IHR^ zE}bbTAX2OQ$6n5J#ip2o?tANwK9X(=Y5)x*C(Q1!k<;nPqC&It%saz-Q5-VE!^745 zr%H7vBTYFvW)LQ3W?{1?|Ap?Px%O!CJdeU*hi0Jb$-&km>mK7Ke;*f@tv7rC+rF0s zo!UjTk=7eFnvT~Sm?8)x&1bs+J4+-e-onzl2);G87z{?HA>MAJm^ZKy(mf|w0e2;iRb`ya^9UuV5~U&{B7_pqRaI4;%@xouxIvp; zB0k@kB=lhRjh)AlS;qD^`|r8!-`{PK%t~3=*SS(v8c7N^wl22r0tX+0q7DYN$j%RT z*MgFV4!35u4hJ>3NPRQ;OSBu`56kfQvbIIiTlFgn+tgRQuj3~_r;xHoX` z5ueJIjh3LG=9DQF6&1ceyzoAYWYXyw z;>D3!YOlafe}>Vih3QyPh-v3i%{#u0FKXwjWb^oRXdeMckAvVo>G<^W<;&w3w~>(% zudtVvkx*&o98?$s=@O~3O?VOe#B18@5U>` zxzn`WBhy)xD($h-u5eZ+El5urmJ+l>_BiVaa+MF*nH^)xLT7ZV(xV%yO#-{%=VIib zgIpacfteC^MNgFsR;w!1w;P6_@d*iWhlr`Oy_&X+Q-W|%es_+nI+%BfXNB5qBfh){ zjyjQHr)54l95(>tMn?~TrGz5#Fh#VPmoZc#1!4Xe8b&Zwh})o8^4_~v3@sXGQ9YV= zVJ;&7omUIbQIQYE{1GQRaih|(o4aDDPfnC$YS4e;O--V@RRqTErF~N=+@1Pz+qu zm-S1j=i!?;Mn}*GPeyf5NRvt(msr4pF;tUX^l z7w@}TbiQgdgJx95eKs)rDtgFVgR!hDzC(dfS;4GIq{4SZ$rr`265~1&uzD3) zH)%DacgMu$vc3&&ZXURM2g=KeO*n`c9Bnh-wq#Xl+kGN~2Bq{ZZ!K?{i%KhqO+QGI zZZ|u4muAbS;_ckkFqhA-Vp~5ITgQRcwl(1On(`WJ2>YJfIy3!EE4R&Jxz`k5Q3Q&3 zs)q=0_H_Y@ysVu3d}U(yBz-uk0+bTb>WM8v5SH|FrH<{5uF~45sF)XwnD;O+7|_xt zsz+sha#h!UeM(z#6i2Xv@R`Em7~A?~kM+5K}}BN#2j(FWd?;=ua@vV6-MMJ)F#=3i*&F0a9ID;B~EKN~B)_#lHF&OE%q2*x~E{n^-fPBG4$ z^OBY0`pLm#&z>%W%Bj~bKO8%aBV8{G?5)Y_?*G*B*N)d7z*rFr)%*(wEVvAHHdFzv z_s~ejFU=F-b4$P}lvhm(GMv4vb6c-pCM@Mi51j8dNkyK7vr=j5 z!SK}bk+@_oBC(QxTfwTq&Bol;{?;Sv9G{PKuhY1@ru|*63m+!Vf2O1^v^svN@3tr% z7h7H@x%R-|I2R@c9@)It;`EEf(o` zZT2!yk%@?#y=}Vu`aGY3z+zR$WSHu^Bbmd6|d>taH|=u_ZcC>`qV&g^K3W)%M3=vfG1UlP zyL2FnTip_cKfw%f(S$t7e4!Sr-SUT>ry3%8T`WZ=F6Mk)=nICZmVFH-@T;6l1&4tC zPI(<`8f)nyzEN+gx3{TRLBT={N6r0pdwaVj{Pww~>-msGI+qA}+2)&(LZR))MDDJ~ z58{e$Z71K!PGix(N;@jpQ%^{oJf6beSN*O z!rB9w_AE3yU)>-+Zs=*$-&+7le{bnp{msqK?@pO2q?0qa%9Gbl=UO&T{JqUfA64wU z@j#8+>#dr9R=4kTH3$AK-;2@+tDq|ZmP%Lta-e#SGN3dBFSKCa!ON52)M3)t_^$@Y zq3O4iYd1Rurwy^y(K^mw3?*fdG<0L?!|Q>$-~g+HLQuF%Zb!D7MH|4I`1QVS_D=)m z<+Py^{H7x-N;Mr5a!l!FsdSoGqyfX@^-F(gSp_ck)vDx&3Pzt%?=J@`LjNK_qI;vI zv4@ujEDWNey_hF9yB`fs;HK;(jH)hIis$-ailp1)M zM;J4!zBuHceMB(oTvxY@sTk8V78>zn_SJPeDeg3@NZ{|Z&Ir)+dDU|{1Kbw~YgzXv zb}Pp*pS`Q;+Ry{@dS#cG6oOzSrju>(a{j?!FGf*&V(X{93=W-7BcD+IJAKbP$7N@n zYp0#^NYy`|a|P_|Fy_pgEjs8TwNfBIC44F_7sRG>r)k+;?QrcM$fLLA$Rsh-mk+~3 z^@7?$OqIQ|K!XkDu3MC$g;np0`|fs#SYz_+oZAY_mhN|jFaHsSv)|xkU-Qyue>yXC z<8mxuW?+{R2h53Bwk?aPTYxoNNeO28WHr(?FU&r6g|0xGmcZmoVA2qz9d`>RyAWLq zhNeHY_DiKEQSibb=lb-7bgwIAJJ?6UM!Lbaz&R6QTTtU|h*ALYyIo9XD>hh)Bj0sc zpb0A%x2srN;c>HSF|1@5s=V$zD*8M@9+;Ww znT=Bp4`1(2#B9!V_hcuk?Va@6@O3WO`Uf^n701t=gqW@v8;7iR`Z^qA0X7Wj@=ESv24kLXtF^KIp!*bia#wvQcnDNmJrd?sCI9>-m^=%NK=170s` zWxZ-%$-ImA+mMSeK)%~xy?IlsIPaz9NJIf>d|b%PCo`cRDt|B?%v}3(b-$U6>DTo1 z${eZieqy(%Mk5~^NVH%5)Y@5njd1neu6AdC2?x7-t#>BO1&mvGQj*=Ec#YM#j{Bp# z*d_B|quA3TDn;H~K6mid-=(umc~*9)FrXzPe@1@znv>W+Rl^@iB`244m>-^(ba}PBeFEmuV31m``1pr5*a_{p{Y(uC^OMP$Xs?8GcTL-cs+)wwI$ zjAgYG()1tX+m(`d{-!GAuBdZlMQNk&pB5Dam2FP-%uC`c?&3)WEsnoj1} z{-0=%WPwxqh?N2GWOz9Xhg-n_IJroew!)JGhClwpssd;H^C2pWw&JxwaW~YE>RtX& zzG;NVtwQ&{d$$Jv{;DGQ4M5wNJ4AFbUkdp%ooQ~E@&3)LTmB(WRv$9$^GKUS@=UO z)cHn-UFgZoKeUnMyh=+L_G)Wqw>sWLK~<>AoknuMW}RjwZ`lBI-N1Ga2C*=}t95H$ z*&oB=PJe`NX&~yn?i9!9t9Oh`E;(2|$utJLSdF;xCZxS^V1hANzV_U=aTZ~Xex6HJ zI0lb3>d>(S<5Jga0MUh%cU!rwXAHMEhrr8O*(+0s7bxkOLIAsvGW@TctnANzk4)}s zLq4nMBD+1a^L3>FHDHvgTgwdAP~T5>W9*xdlbn8#!VwG}@XL|#{Za0!Uy|u8EBR8) zfZEk)Joti@_M$W3TRqDSL(*b+6AD0wnhE_Qx>}IX!n#&b5PI}0qa&)E5+AE@A&D1s z;r`w%zZ?G~J;ZX+-_ayxA=7}pIz@(>nth==Eh$AGvX8RoKKD%A!t-t=T%qU**06U8FA0|QUstI3;I5nzX@ZkMj^l*5wA1^%JTH92s=Hur zKWB2^@VdFV$;dBl4(!ug>S^E}=(zd~0K3=T4l$?pBZ$W^6JP4*Of1EdHePl_ zJK5Vaq%she>7?1Un0%GTLQJ38y@*GwQp6w=o~AhGMuAbHp|)jpw} z{(tINl``=x2us{}HvLL!S?orVraO9d!P3Csvvz5JK^_XEO5)!|sNsG@P3z_QZb!Y- zDg9~ZGFqH=r=t*-v@Wq2T92k*DV3CYKubkkG-hOa5DaKuSL$v)k%jIxSgcXasS zt1z5Yed3FIvn`4Zb$+tRFF?C5c>e&~1Tb$J z!S6WnC5S4l5889_7J$(7lU9~0cN$vmGYZ)-F!FV-xHq4N7&I-dwOwBa5>1iVgW7>q zqnh1ReidV05ITpZ(-!wGHi^zH*=^s{`-}Zbk8Bq`y+(!A{X~+ape;Y(nv-n3f4mZx z2~ASPsRDNNySX(xvo$2xY&`i1uPL~?vNBtgJ!ezLG`O^#Uq!m%>M0)MC;SR9^)NHe z`f~SQws<0)B~N-}HgxK=h?FoSpXOxkTzvVQw#2%MLZ6nJ7`VrgQ?Rofie0lHi!ryOuhwK?V zBG`B=!q=bh58| z_}lYIDl|)SrE_v=(Jm!RU4eR?xflWgHg46$Pvv#qjZ^7tuJ^yEaaXuWrjF`_)_2nG z*@(&(PTtQ*23C2IUc~22d!Z<40Uv&g8^C-aFhTZ!SqRvL>pJ!CQ!i<%g)nFJEtz{4 zjwBbo?km%%=opieX8dJFYO3+_bm#eICJ40i)C+Rk?U%a#MSmfgfe^1jpCEk3wSm!Gr)Wa5;7lQ|84y>#qPL}-Fd9=m1U$o>b)kI0Wf0cx%4Q-%L&) z(TdVy=ju`rfdjQuI+;q=lERtVsl&L-?fRhFlPfnGPPTyv$#D_4>i)%X=-v$v7nqaN zM#k9=PmadPvS!c^=EtQ5+J}orP;si9)ck;g2tD; zax!eVN@rzO)4cGI9trY}rVT*~rUgMR;y6n{ofBpC>RNIWGw~5F{APvyP+vd4Ayj#7 z?dbTp5h0%t$8rURfaG>{hv`T!l~&~se*!`ygKR02A_nn)7B5H#yAHmBSU(WA_-r7` ztY3`+F^}rI|C~u|sM8asS^4|Jqiq4`mn6BzWo0-HRZ40iQ&o5#b(f$d!B1Sk9jT3SZo!4L)*IeB@e<5K7mf|rdRqS44LBT}RoWX9816aC zuN83GUR%gt8q}&;IHDR_{zn~aT^O~#Ws?s_v7=!=x74g6hVc2tHDhXObon)7E6%iP zmNX{Lr31E*&Q>dT^IL9V(p0%HLL-p}h_4Zoj*))Wf%cE3NNx)7h2Klj#2OR%)FOEd*{NHL-sT=(4Q8+P#kU*+CXi~&x6iKY-_MqHEsIMxKep9X z1>$)0XY$z|F;>mKaos$EM30%-ciqRvC47w2eucZq2sYrY2(D;mW;aKd^NpD6-rPx- zba`^zbP`U_14PZ!95_S{DV-8Da|Z&On@NVk4ywFf^Tn>Y@%zW8K#a0sRCBPSrsLrFThf9g=z#}hMVuM6WB0He~6Pl=+px4*wXI%3v#f{6ROK}>x zb6W=h(iTt8I}Y4DfA(y4%lpWK`O-x`*%`onz&yV77>xTH01rn^pLQsSeCy#?;p2?_ zp4C$3Xx9Ng_Vz(5-(ih>zL-nJ5#J+He}9t|073xAf(~%#X;gap`=Kz{2!Q8k&0w2G z8c@@O0uTVi9__9IFsc{73W#A0_smBNk_QqF{`S5iFLSLCmHqAl6QaYO$e#F*qw8Xm z3hH$l5F|F=;9RhQQsjYtA*8V#^P8K_cVb410%Kp?3k?fHbljtRz!(AtzET`G#LOGrVkzv zuPrfIS$3L`YO%Cfja&dZ-Q0Yz+OTJM9tVEGjp&q;zOOthED2z}EXuJ+L;w#$SscK6 zP1Vl33Pf-L9s-zhXZTEw08giUJb=DbC^IFY*_GPF*In1vogUFY~Bn0-bUOLjWXFP(Z-AIq(!f3e5bs z+J62j%x3;Bgw8mid;9|epK|Q&?LlDhGT%AY{;9E+UwCJSp0BeD3VF*1^Us0uR-u8E ziIGs!jou?6Ey$)yHG?jYfzfzGV>-pM`4;|eB)b@>0As$uCOZQEI>QT~{uGU}?`8Vw z#ey{Ch8Om}*NnX%;ky3h((U(+UnUI4C$`8pIE_i>+FC+11!2{3m+l`chgiAVIUuf` z*SYqjKrN&gTAp)>@-*8*inliE=ZXaI^!!!1W#>>_EciYQ0zX!Ek?J$WuC9LFdz|)c7`#(noBo&*P-JKhLUm?rs7birFrmqTk~(Ajvr#!?C)utacfIdkU`h6%*-djnV0E=e0rjtOv40p zbpSqOK2hKW@i20dQo2@u^`Rkd_EqT7M)&g@Y!dWrk8nX2&>hRRgfBlss1sys+G{@5 zGcd=w3Ig3%*{4^R;@$t!D^T={F>8)A&sZtRKTbACM?3vv-K3)r3&B2SAIWr~LbA2< zKJrbJIl$nM_PhX3nN;==z=VHMeQEIe&3Rc~EonYGS}pk%-6O96vU9Y8rl#X`1|Y;) zM&0t9c|N%3*}a$-KY+Or9zzti8%zZr6M@xpywsP4{Z*Wr4!M~kMBHau+R&hw7YyC! zA4!LxiZ^ESfv-%zLJFnd>pm-+^50gMPP1a@3XXu)qw)m7X)mtBbG58s_tF8em?p4% zWr{W*^1=-)yNj|~E`0^6RiO%Bw9)fE*f#!m+fGrf0?SK$O2; zsys;gO{yAV-{FP3uYA;HcJ>wE9dn5dxPyI;Kj;n^l>Q_W$X$4w;Lm_=>goZ-123P0 z`!urWVMs=0DY9~(wjxOsUT?M0wL`CbrWah?!ok0l|FKFBw)hW-JZIN;87^(TmW02h znU!Z8GrAOCDUpi>6-ZD6%EzC5AxZm;IlGi7{6FnF*I3ix@dnbfW5(%u|TZz94e2@Ph$U|FpYrUa2X7*uNQX$efI8z zsCAUJJmR`(Z63O{ z#KzvFCBQ_IV%vREeRR%d8yokdCuREgrr+c1Q@phnfeVhJOII1&#tX*@WvW}F z(=1Ujd-&jpkzy2}1sLEGC}aspNYe0-W%HG?P${53>41uvBMMC8-VaI^ve)5BTe7Ne%n@_ zW4l_+YVp#Fk#U_-!j4P|vcn8wHg2}BA>^W4==+dUD?gSO@ykow^iV}pu zBeZ^D!eLW9+{erAm6uvQo06hEN$0?A)nV9j{KYo|9(ZUyci6}zs<#Am9i|=N=E!UE z2@A5~j7{wATj)Mr6R|eg9asbe$@!9zYsr&3NFf6tH_O1(vayX#=sAF6o~*|M_#5nC zmd)l7O~?m_q?Q-m)((Y35%07fI77a_kl$?^fbcLyDXDz8+8G_=?Tq$z-m2GQsLHw2 zrVWxv7*W+@0I)mPqt~$v+toL>JPYtV5mY@B z&$6*`r8JL4Uhy&ffbz|3V#2(%+4_FZK;vk9MJb>HR)9iXl71S1=0PH3|F8}g+7FIv zzUm5aXSkYgH8(cU4%5_=iD!d;gYd8#p0;^;*TG|V#M#_;^@5=B1dAV&7C*&mX~;}R zm-$s%+I*AU&7W>9A^;q-sAqJ=I#Q5gx-QB*?)DmxuLGoG!9#^^+ZE+Gxm%jOx{8t> z)H02+2an|B4bn&KoWnQb=R^`~+7i*XWePxy-(Seq8Y$Fz z13@V)VRtqh>7c`a_1p&3!SSj<-FBeVb`Yg!tFJ3JHNEq{`IQI6iojlu>?p&>`Ox zl>u!f>o;jYJh_VgsW{lkSP+5(5YpEiAWx_7qnz_w{qb4BrVIRf$i&=^TTFY`oDdq< zE-J!cE3-h#olDkecZ$!7oiX~h5fDtFf=tEwy%yCwNM3QcaT&j){hVSfp?mGct6avM ziJRfn3GgRioF2HZkcaPJcN_xlqg_W7huYHJWO5md!-ZUTlUTWeZR&4cX#+xadYi@J z4Y##9XwC`0Ss3gL`QG_jvIH31ndt z3fbhgb3_%Uz}?~caQ=33o(7}*9nQwb1hhP3=F=|u}!~&a4 z7if42HPW=%p1Sg+1d6UB@hLCE?}Qi&ULCvsIADPY+Tqaf?I_7a;Jl#D%dnjP#*4saqbbj06SoSJARW4FU+ zd$Wr%K`*CbwEG@TdC!fuB)6LMHAb`l#J}gsH{&SEKg1s*Ndp|-WW)T3q|@A|V#iTu zvxcH^rwT-{Md0kI*i_eXxOFr~q0~vl;TrjDff#@j1_iN|D{P!pZJV^?<$awqO36#S zyLZyNy$Es?M#(@cK4xYl+UC9Wv*f$EG#lgJnA>SL#=Q?=Lu| zC+nJCEx`nR;+9!@k=Dyb$@n3r^TSoX4}eOj@ps-Tz@5TTXI6SAyV`u6PSr5T`%|Z& z7`{pUbXlKRmG0i29v&6O$k@9xhsCYOv`s-pYa2c;VC7V4!-@^8F-NTnz|oeV=Ps3#Z=) znVmM6ACglJfH|%d-qyU)*ErH0%XptWi^`1}ytHzRTAV7(_t}<6PEh(AkZi>`jHMrA z&QgA9o6JR^-3;f=Q{e^r@THTrNCS2H+O4xcX12u?yu(Rc7 zD;5#GU+z8c#INb_ZM8KV+eNu! za|cs6fke5z5q4=$!5GA$+AJ`0%(VU+%x2xsvijH-qdD+3Mamo$6zRL5z3*kgUw<{1 zud@^J`Z?WNmmZQQUl=d1)}1wYr6t%0$g`YlcWb8`VV4!p zk;jO?lXK!6gWbLK1lx2WY^u32GjL&RqRxwM114WV{yDyQhVmEn2E=dXwH(=dXSyjl zIr%gUFDsIBm=0tUYhr>PU!y&J{a`;PvTK8L)T?q)I66X|Lq-R&UsbTRPsdOs!ANV> z?Ebo7k=RPTH*`ttc#q3>`+#6c#L9c_{5)e-%)@zY{%FVz#u-U?9TA@%+wBn*oHv)s zVwx}dMXi?rZ%@0TxuP5EN)D2vQV&%3!sO%=FiSBt`$PU4?t6cBO@-FQ04!TC$7UvascX#ridh1fDFl1c<%U(ykzXiJ8+L0`}Yz`7lJwV6GO3fTC079a+c zV_Ht18lL03f*JCL{;m7XeurCgg!I|oyc++z-bbfd)cYfs-jzC$GdDQclKievd~rMy zSLJACy7H*JpEA1$Cx_+Ht=}vCR+E_GQ zh+|%nu!Ea|xtuO)`fmW-3^=A^Kf)_Sku6ua=r+(euLG`JP9R6y`;b(2mwJyVX3C1;4y5`?R5=+B@~<2R1D z<;5VAyPVNzMzGwHvtvVJ{m)e^^Ve2Ba-7i##<%oOkN^+7J)uAct6pAeC@;Seu7v6X z?VnGPMw7a&I(+i)%E5JM5*(@#1KIl=R3CZ@QE zSgD2+xcj3rmq%qHn99A}gI0w$E-KD@7wBTO`w00zM5&_66NYXes>aNBm8l5I@WskU?G9BN(q$qWSLoIB+=ARdn~M9 z38@$BwepN8paL)rP<&e$!_Mo?XdsSa@J0T7NF)Wyp^LM0|k=b7yk6q`@@RPgz1asPQQT!5PA z=tF-(bqf?Z-;%^LuKNps&Rb7T$9p!OLU zQG&q|BjDBm4j~9G{nP}}nlY%ykUKJFGZ_9Zuf)MMWKjGTrLo{HRT>D6YN{^7vS~ME z83>E}((pY6+?3%4bH<=cywf(KD_}@&i9=<*p%^2Wh8K4465tB`TnT@(68=o*=YLgvUKN0?ihDFZmItfknyxhoZo$a|a`9h*?=hq`69if$gFC-Q_p^5miO_f9`d4ysdx;4;wx$ z7idrQND6xMY-Rmf-_O~SgGUEOS5aj+HiV_ra?S867%-rp3>!kGmf$bS1wvAYI4~;T3IRj z8-Tl@-6Mu*r%}}Bb0I2G2ScnOo>0i0DCxe@g0x3JNL5QA69svRXw$)(10Wp`QA`CH zL8b`)Rmt+GE7u?EYl48q6W&(g0f#}^iU$%K7x?}$TCt*Bl_b7=5liMq#4d2(G!%1a zA1UO={8ZgmkIhwPNNKbMUl-Mu23*lS>sU+Pq~p&YtHejdfB6#gyh1mXy@K;bDoYY8 zglax?j~5Pz)!va+1bPU>luf}gukrB!jK5;XmOP%C{upWm5v6?WD)G;eRR&b~iVxy` zY=fc>lQ%fk!#wTo9qO?URl6Rc+|l=ZShwxK;qo(YZh*1H`yYoA?F2+Js`7jCMpgm$ zLWAL=!+j9o&jeb#VfsG#BQzMkaYq|MNRE*;%H57NF?Z6dzmHAqU#iP^uxy7OxC}^d zQ~uD*UVFB&7(Gos>^>#?RVv6J%PN~up%7PBBf!tc;QVsb4jpY?9ven;Ymn(7C&5=WKEG950}(!dUZXt7z#fFoOg3N$!q=ApljOt%zQ#606aW-aXLRBf=^qs}0%z)U@Sw ztq%bN2Z)sF%wzZHBtw7I1j|cel5V*>zb+<)5(Y% zKx`tE$?TUSU6kLG2#cHt1n+@p!_zVg0VbOF4^MB+=FxjZrgtwKf0oYDDD?X7$_R#y ztVDE_vjb5Ha6p76be-xvRSj5r;0eLVDyo)G{?Fns6j7N3Ec`SBR)$_~S2c6Ti2b#z z>gylxbwPg&K+)RGyt8=&L)BKfL4QV3P3pA+C%AyXV=laW_d2GN(_M18LVTFj9R*@w zm#pyz@=kcCZHs>;J1^U%u-8rQU4L%k*!lu+*e@Arpg_8)+kQ5$b4MU08U^1Pi*|j} zG^`&C*VI0}u8B}&7`kBF;reX=n#&AiM|&KNEv%^&WKWH~{24Bd$t><*`RC9u5(UJ; zz{ox5_=>0a5Fa&%V>gGn)U0FiP=qw5W%TDlWQg%qUmzT_%d23&u-Wnsv=Vu>$PwF; z@N_-iH_J797Mjr)(HRrHnqu;YXV$s2z+-{Lf@eq|59fSnFZcO76T+|l()Sd_y516|L>_d4^W(s`2T^y z6#uu`_#Z>~@5(w1|7Q~a*CC(x=09`t-<1Ib?w95ApX+~<_)q=wZ~rp||3`uP@5=vk z!v8~}1PcC(Tl+Vm-U`%31n)0mZ(-Re|D@61^XM(lNa*_ZeVCE3K$8B&y)PBTfY|?! zq5me;2;>F!3JL-zZqEDH=uJN91yDV%`A)!$6Z{1oQcUA^>2pxPkrpGnlqcS&*MB#- zz03m~zZ4%21FrY^RsQ#LHzE%ZZ{p)qsm}RLK{hL}yt1-ThoLG1`3q?Hx9_Noi3YD; z`!jOD>27Q_Dvu5FkTJ*m&c8HSb`c>Vlm_wi;YwQ!KBIgpPc zvEBWw|AJnCImL+lk6ApnYeIQC-<#iF2ZvVY%ri4Oz<6PXzc8>%xBVWpsVfRzA9IBk zXUy}dgv3LQb*y07v;2XM;G79v`rpu->B~Vr=(Q>(n=a*$bMi(#ITvBs#+}Be5i0BL z+`o~wMjZ%`EU8PmE|9c1_n(ZnYSg^BrH@gp;5l@>z+BKCAX zYDcN|u3TR=!!CL#oWTJC9EDQFKdr2Z{|)Ss{><`(yArZL0eu6`354%ORDyG+rA-$M z^=6uG{Y&$AF}FP^h~rcb!Ha zRyvHHS5i?loh~o7h12v>{X0k<#{3C2n$8ZibOf|KRYxH~={J*n(s_&T6FNIMmOG3M z19$gOIv}1Q&3nI`lxt5+U#N)F=cIys+MDaxV1f{3-0|p^m`83_TYTkAXGek6{qZgE3_^zujckZf@nljk4W^TD zR5g-@o>z7SX#EFXJ?~A?wo_BObkoExY}F))EydPg`GhO}N&lX(3Twl({h?y37cC2U zsAX(}{f&RuEN5evoZ63dU1Xymh`%3g2+_+1S}WYW12nhioZZ&3P!hD?yDv<3TI~r+ z;yjT^!IO7m_Q_RJ2PN7)0Ucc#X9O=nRRsNz>6nRG*L94Rn&?@(=93CtlOI|98DkFM zvB`?eO4)>Xp?x$tQ0fMY$7p@eB>HGIfw^(7MznRYwghI=*iYMyzu-6wReN)valbEA(4dcAKtRT^ zFg_8p);zybKE9SKyU&SH0i=WGexe!%RRKzR?XN2m^EKlQFc=FuOS#6IM?1m^57vH5(pe~WFf}60-8=3?~4X}o*eer z>&Y^XC>@uUZ#FwxokQ?ARMRJLS}2r@_P?&!RkO1D9_(gPy>|xS>c`mgQD)So^2$bk z7j_?GU-ZW%A+$CpTvFzCik|y5q9&qB<^%i>pG8l4o}5e&VQ7?1t`-m{uYS~3hOl2G z4Z%@qFEUZ*v*0IJX2vZ|wmn5WJ)eX_I`UqhHF=>gj{FIU;DMcuV0&qUj>9%OgHF7r z{7JCYH1cx+y=9b@aP>)5X;t#w9)&e?u#Y!+_D3Cp%ZsySe2kdJGm(`b<`kwGh__t5 zxl0)kAeTlIH}Y%GA2>U%n^``Jb(-%Er)|Jm*?GCAFUfg{z#a{soXw<1bNs5NJhC|R zwmMy|aYJruv2=W#!=z^$foNIb@4P#e>uHp*_Z%VAK&*}q(9UU+m&93Dq_KuC$UmG@ zu&mhinIi$Kv|&-^RmJ{<{s1yZl|#J2&XD)-%hodX?Y&`ccQqKeC8mVVY)wL)%-H!25pW0^Z>g(qXlYu95j~3tU3p-}D%WwMISM2s) zR1Msl`Osyxx+QGPDHVWfb*#u}suk$X$?52r7|60#GEw9?oY>o&01Ku9^0{WK>hDfy zY#o-7Li99TOqrZHIH>gVwS3NK35NCrm7faT?xbyI5ATQ-0FHpQ&A){?I(%$fl#W29 z3WH^0pryn7+K{IDzBx%Bp^e)Ke-1PaobqtcHVAHaZy&B(;>nlrbt1DFDLCbrfa$~0 z^~QSSX_^7cK3#)veH5^M8z#40%%&C}A-Wp2+a;WLkysJgChP`{M!JwGr4fOrL9KIiIa)%|jtT5Af!=hsEtAyr z?dR*qPFU{=6JpdveSSx8gT46iR&-dve)jk(^ri2?`^mumW@L(+^G;fWhy+XBMl)lR z_nT2ao^3vVf`0-wuzX`>&D-MD$@d%tZOYsLZmrg>W8AJcaBhMS(-ale89#Yuv%aT2^A= zB!+S-=Mim6mi0rK^uSu@z1M&O+o|g)RVaTTAJFP1e;jF~$Ivi$(EgNpLx3^?_NU96 z2>ybD$6=0Fy++qKGBix<+@`p8_5&se&N?QLc&D9F2adaz+~u4-wf5DX&tbibGx-OG z59)6i2Z?$t>ogQqEN&-l$y}a8WbtrR@0m73yRA+F#M^(y?HY&K*Lc-GLRjthdg7$z zgFdG9t+<;cb3{*MG(K8PX>|7_wQLuFk_>3m*5Xt*YvdeMaXkVm9*p;Qk9(z~8>;TR zws|$7C#^P?xHythF*i~%Yx`;IMEjbIx)UvcW7-KCW9+SG96HFnsW%GC5Im!Py3F|N z8k~%#>844eh@YkJ!feh=x==%rwd+C>4Eelfge$HDWRiP&h<@oLcS+AJ$19Yh8jD6^NL1VPS2_f6n{aucXb2!;pd!pX#_94Oc$t9eTvXF*&iTmmPa@SNG`CyW#`8aB) zgKWZj=+!CEITsTYw{n;v9<)XfscUr`W z%GL={jtoM`$bQH~I2dcTk-{YVZmdO8<4_o6XOP{s+I5ASNk zjaT$aK$(!{)$?&!7mT^D@T+Tzina?Eskpcu`*Qf@1h+_40|dP3G;Ot&u5GxD+AnQt z4d)d=?CP~;9U}VJ+rz6cz`j33-C8Ial@XblP-Q$B$_Z6XUBOfVv$!DVs|qGXBHDjZ zJ_EVE7~gwwP1b>Ny-+nCG2&tS7|DPKYzpNO8z0z0)T%Dha(onvKX0u7bL?q&Z%m{4 zim-kj87&j@^)c&aww2O8I~tqIB*la?&-sT^^RS;cw9AnJw+%&VxuDwu+F#zW4DR$X zG$PI5V8Ja~XB&nEF6MU@>0?&Wl`VS({JO^@E0%AaXuL+p6}~UrsUdmg8D$==D`0Ab zd6NaQ5rGR2Si#eO>1*=IK#}$ASnilhMsoC;+gm=`2d>N8VJpM4))RSM^;yY@Av%Z* z?oCx02Wqr`oJ}(?Dv|j(SbY?vAJb1y$|})8KH4t6QG3sNB1K^cv8arTE5~MXdn;6w zdnHRt{Xtb7y7}-`dDt$DS=8$e7nBtKtvn15HC+zp`hEDC;&O>}8M8_ZF6ZlSc!g1< z=o?X=sm-bc^wmv8#R10!87?!SZAm7i88or40n7D`t1I^0B7F@=*E)L*YnhoOXlT@H zFLLoRJk8G@@#RPMx;n48`%+O^6b4ts_iab{H>eFDmuS3>Zjk)x@(!*=D7oIgbjtYL zjA6s|cGu7Gn*|a;@XM)Fe^1m@-j(MnKOfMzTWM0u+P22yVFD8ouG0j8QPJmt4OA6; z?KoI|6D4|oM5lU@@YOvYw&KEFpLAErUrWZVt$h9)Ffu{gG z`WK4SnOX9r1V61mIS}%h=_2Rju3C$J27!jW@=wXJi9(zffrdEE()EJ)fCsqkTHgzz z1Au5mvimsfY&{~OSFAo&$(f_Bpxw#aHDp2cMa5@c4x;MLTQsj`W>%`1m+5#<)?e;R z_Hm0FAtx6X${U$k3!3aa+U zN-snB%H^*FBmEyw5%oO}ORF6psfybfP+*bQa8#~Jj~d781~s_^1k2xjwOn#TDMDHa z(`;4=*}w!iF5u6brMk&-W;@9zgqe|xX_l~NCD+_-RzpupM8GuD9;&LkAKQ}BiGzTk z_1nr;Clc<;nsr4FE*EZUfRT~bEmOYUT@h@I0dytbkdLjP`q!Tb(^}bE`AcsaymB+r zB-s>f?526mp)IdP>&2NZ&WLCbdn$IlTXIXKmb$LvI#h-n*^MwH4tZ)(X(R>0-IIO&6H)DUy!;(4UF{JHyHVu+!6Wl z)t$YLx3k~{CO@~$ect^ab8qb?V@FB9rQYny8 zz~WT*4{Qu$WnA1&^hd4Q4j0QMPn*cr>UOYX5Qa84^T*1k(ZEbcI62ksWMvPFOd1uN zD>yw$X)vEKF9ZK9K_=2(pk`{jZn>)COG0eP&p-EagM4}GFPH?9jWKnhRV%CperZF< zZ3VMk#QtUT z#G0(%9rMlC*FfNTpN+5k+JbvYx|ZjLgmuP%jswgEN;s8aZ%{9Gb;!Y&&3XY{;*&k; zDC;5+E1P5FoCBV$42F5`d~818_C?$y0(9(q^E{PedMTPW89F*2)^2LR(o!wNR31o2 zb8!d$nE3INam^)ptFGNf(H|vUsoygmS=q5m^(k`IG?yQk@Mvo&5V*ox*`BZ1G&5^n zQK#rEG=b=-rE6lq3l=>V_Q)npFt{MhiG@Sy#Hw}fI|+MT9O>2WbKF}nV5p%#WQ~3+ zty5-JZcX`{n)Z={cVEGc1^0AcV6AO0TzfC;rn8!K&CtW;eF@_ZrBEZk{n_#v3+L`HZ6Cn@%Xo8ioO8fIUCEwSP|wrg zi7VY{77=HmUvvKx(NF~ER3dGjd(n7h&P+T1wcF$w8oRQ2}yS{x!S?*|vo2BL-3 zNqC4YN`x$H>6S!MB&HAzg*V4MRrlv3f*wTgzeQEYmmw9yba`el9?C$=-2q_;Dsbww ze_(sTE0B$}kb60Capn`*Q6Cdz92tbZYVJA8MQkZ2prh9R4BJWuaY)Vmz=7^6nP6h7 zR3N#bJ2w|1G;!G!Rc%^iALljMT@+;g<{$Nhlb8|iogP+eU7RK?w=S=|KRFf^`o2(r zq|G(o;4erIRhoOeRoi7}XCHK*d2g$3mZ?sTHWytVmiKIS8y2;F_sx59;d>t+E;meU zI1QxKh&iW|0t@cx?A7vih8bu|ViY{4xkzW1>s(+{>{%4qpIevY~2F}-D0NORIT3sb;6uH73+GByOx)5ykK4GSly3 z>KV9l;84dn2u^#k?Nr)`ZmVu9tRW{>x;T6Cmnl}mf&6YNI86bQl=oD1Z6B3t_bE{N*6gb>?o{f!hb`4`><@*MamQ6N8#n;e z9Sm6BiMh>uwHaUP!pgn5Wm~%TNJ!l(KLlY7p73lDLDO+vHI(X879?d{UAXm+wrJ?~ z{QZe@L2>6;WDWoP*Uz z449C$`m`Q&E)EKHPrX1CpLzV)XXMln$`b5WUKQ4Y2JY4_HJI&9LVfnuq$c*)D;u`G zUg0=52e*^mmo4~3_uWu&O1I?c>_9pujXRR&l-FmoF&9` zX?xwWht_`VaqD!m>+I93VkfE3G|#7Wdipk^;DgOL#g5 zyj-@XzXO_e>gzh5z4KPB%qv-;vluMVNpq*MEg!5i&;T};V22X8p?gM5wbU18g(k#EqsbKSgnD3 zZ|BuDAtGn>NrEV1;eTKW{9y62=9Vj7ry1~$lKbT*aL%>;b0mZKbILESrN1rjY}oW6 zZ@3Ow6ngDC*ymYi{S3TE@38GHSZX<`xRbVLL4tOL1^VawP+UKGyla82uT?*G=~(^&jVj87ccT1di}E?5P_J^AwsA+>%gINxA?`xeF8eEG(m z*wb8t1;u$KSh*V4374I|V(J{il*`@Qloggz(;w5&>~5RcLahU<{w9dK*z!#PgSRo$ z7UbZpT{|_rpIQr|l>EehzH4aFUHNnW?DY2!L%n7u^|G6l?a^PV%jDMAR>zZ>#IoLx zx8A$Ob`NE#c%GHiMC#et)SAsYJ;o|(0Sb$#;O=s zC@vR!ShNR*^7@kRcmW$S!N>?a6zbn1niIY^vN%KYTw%tkvWvy*tVtKue96u8-(#jI zP5C2wQjR$6XW=fYG=*yx`SvowZ9!-{bzPc5O*X_}^`uTo2sKvZK-wkbJx5-oq;~!l&~^qWf0oj*uZN1UlocXr0DbkM zRqCH$+h)@`n1>%j5p3D zx|Gb;b;ZesHlv*~wT;!XLE^g_?Ycc!%xmhjGC6MXU%NMn|G9f3dvA`fD~mL`ZZk%h zSLfvSRY=9V+P2KX1Y(KA1{mzFVnPC^t6+=qP!f9*?xb zraj|SS|xlx2PQFdOm1fX zA=o8#vNE$O3A4bjf7GoM7WbI1HHR0DE?0cCP)K`H7ePhqE~#rkqrbW;D;|D)Mp{3P0JC-y zUd_`nQirk~UH`sJWg}ut>5*3d?CItLZ=Bqi&19BT72#JPmJBi!v%Up zR;#T>m()Nc`T$3l74OIn_v^NiO`8{kX4K-SjMbP|FlD46zv2 zzDdd)vu|@-;6GT(&tA_a{mM1TzEL}Ws8Tz2%w7*98+TREI?h*FtZ6E=Od6b!_84%D z_eHFc%mP84^f2?uL)xx5_$ZlVaF`CdaG39{5F()UEw5$~$`QY0WXIk|2&w;2Ayf?d zoB;{eMl19IDRIA+mCx-ToZvOxiXzAe?JfW(dy<_C8F`7x&W8v^x$ZF^zP1PohWZJB zmxIcW&_A3B*h6@6n8_^`0vZbwoUov}k|IdDYQg^XWN5OC^qPmRhTDuT)qlBFiKa>V zs7Pj*t)E(gwXZXA0EO6#tzf4*FX+!)=UGdo8#}FgeII?`H~ku%zJBlxUBf=|&Ty0Y z8P3!!6S&%ZB^V%eyNyy43yFv?6Q`cw;WTnFE=i3z=t=80#lUj=iqg4f!J&*?ax1~> z0jtlISWYPUO-I)fviYxvswkaXRSj63U}9W2p>&RM&;4q$R`#;zZh-a=o(2@_dn=Lze%lrFO`ttgChyVN)-&@IAle;r&Z*dU5+1J)4@sZ~n zd_DR6*Q0rXOm2(YO#iJGrux!HU*@qd07RjomcXeDU5r0z)~6^Bp$po$Ga38=XNKxt zH-{(RfeR8MJ8KDk=8&i=5V=Sq6;i8hkGhf&>u8 zgX}q2R8()+nNg-NFu=*;B>XL?XOC+ph*Rpn`zQ0zeT_^_!M{cZjYEzVpB4n480q8o zsw&2gnIW>lNLcww5ewT$qu%*d!m#ftrPd`-ofEzku<{&2_n~pUSv>Ryy#-}K*4Og- zXE^@YeJ6d|CI%o6-Y)^F^C3D-b~P9jO*kETHk66`!bznYbwB#*=bf9-QL@;f^yR%d zJh%UTG2+~b@yN~_Ubg`B;R$LSgwrsOG;F5PHOi!XCg7Hk-qYFaOA1vR6IYUXUOeYE z2NRgtH-GwJvf?dQ9eyy)ecWF^fgubD{(m^5e^2J5VeN5X*nn%UkQ+r`1!(ZGzXtw0 zf%1RxKmNY1Go2=U~|f4-ctWeK@j zW8*~C-HmuF6ATDNpsKX2o|%?Y&*$5Ch=m-kPYr(}A!!LtiL6;Yg)?^O^Aio(?sjbH%r3_xn!PVPw?TJVrbhR{7XR6MY-X;}Rt z$bs1J>?4KOqhS|Z23eP(Z9?oIS_4EK{qt+tqUT;VJrJ`9dwO7Jb>Mk#V78JoU0Cl+ z19%AG>J2*{4F83`%Z61zZwfjMFICnQPzLo8<&REaMGo>5MbFrY%}}vcgB~Qomn`j~ z5RxET5x1cRbgDcTO?E~uW!iG@vE5#y&G!rCy?~2gOf-77jk7Gj@ypp~$_hp_h_sKh zryb13*>Eij$c3;Ib%UpZ-^gYfg`lI{rN}34m-1h`+x8?sH$C^A;W-X7V*UA&LHZgW zq#B@%zuDQ{m;)|o)7A6V70pB*dJl&^Y&-{8s@(&XC_mcTbMZZ1M&p&IB=aCNq*3ys z2bPB%9&VV5>wrA&Y=VT037@@#clC)Lxzq6yE@;4F98qOTUIpg=c@3MF8oG+(ES6^w zS+5J$Znz9hS~_wqdvd&>{W1hVw?gms`J6`n{A!ykF$c^o3;>5sw+~o7ME<|0{r(?t zmghkuPaG`SXaL&#lz7+|5gL2<>-IbZ-hbhP|Fc2huU&8(Tu8*r(fHb9tR!oo_07JU zO)C|D>=>xuV2F8A(vq3q12U6vz6-!Xj2U_(2WkD`rZaU>9uBQwHu09zn$xLU7(S44>wfJP~s;qBnLEPG^R1Hc^Ec) zDy5-M-sM6-3Z9pTea2ait?!v<0JW!IcM+}5g**JqI2PdDw*4~ZC;>1o+*dUHOZ(_!=fW!SC$)gOUnFq1$KQ%{D0r4wD^M)++o;vO^N>i@;kY{ zXdv~UTMH`Y(Qy&SK{w>~(HAt|rg*_3<-)PUbA}vGSkB})D6dpr1nOhRb~U)mZj^16 z!af>{k`w*OUe_ci1wfN7{b%^teNF;JX_1&LYlhkD;VE@O3Fl)8`Wl=y33Y<}Wag{%X*UupIF$Z{{Q1KJv! zhO?`vse5|w$8}Yps&$uqp^TWNGqyYFU#gu8!S-Atm{s*g94|c8u}U*pPiFPu^_upS z02uIJ>PZNU)%)>5ePs6;@XY%MtmU`zpUlq6SlhI z^1#+lkk7&%9Ei{h`$!!jt?`Q#Yz|QhXUKm0o6db;Z9yrj($n8`(V^LJmesS2xXI;p zl)MOTYf;QALu+$T?fQl9CLF&LmopF6JZQGn^|PHrf#e+KEMM~$8)&ewUw3n=fl?=Mz$`vR^EKnnG+>1b_kMschG_m z6uGpWrVmMxpnbo{G2dJo+6eLpgqmt zf=(*L8d~spc*omUg1>3eqk$>h(aF#B7nfLEiY_6ss2hMC`XdhV{wQl!KMSG45M>P@ zCY8f+yk`_tuNJ^0>$5m$+y!Sn_8w}x-PUP{>d3PG0^mbFQ*@07Lb^E))JEU~JX?9Z zu?g>CfKV4JaB(L;>#un_b&KwzWkI5LUaE3iKE&EoJpkV08RME2a)bk3%JiPW#U6K% z26&w3jgY8Z2&;*@3+&^-bYRYTATy42WQ`MTaL*rfQbq)6$YC>WTo*ixSSVf(sN09kq>*4tlI(rbSqb zb}#}16)@6QA7nj>(Nffvlpw}ho@3f8AUetZ5rBVtdFjx zA2@vLHQpE|C?JgvKtK@t|6ZRG zX%TUO522hTWksR(p&-x^u=^w03PC`KKqN&3RopYqHvClP)UzHxDCMR(F|0~N;t7;T z>ZgCx!JZ9+RM15z?*+0BuilFO+M4Qycj{zu>7>neX{>#_<28M#S6-cRhIAXE_nl+` z<1fZosiK=rQo_SVRQp}l7L%kKHY-6$yloSN1ZNJ-413CW1 z^uZ13^B*wEZEC`7(3;@vdXl{q2(YiZ@y?ZbrH2k6`umC)m^-)wXXW!pL|*?K^g9Tg z6>VVK{%S^?(BaRCYUiH{yIi%hz{y^Fo=lAjwfEDm_Xu|D#j9b`pPw->I4%R=dWCRt zagA+kBG;PioStq>ziqaUoUb({w`%;Gpm(Vs5J|Jek^FSA!uaWCKPV_D^mnkmeDe}3 zhZ($$PK)D@&Q1XW-$z4do{fxXVlNoe({%%Fv*4sq@1Um>Y!@y#N_S4;*e2eq(YyiIc=Yu}f z_3E{A3$`?JhN%CobikEM(Ts&X7vA@jTiMS6$V}A^`=jUr&qt(6%F39gqVdzy3Y~A~ zcDKOVU$2;F(?4EA7%~hF4nEFhzxQ)?UPrZgZnBzBt}ooe{Co6=Ms!*=`*vgm`LsbFj;((0N3h%X>N#n$ur^$Hh|kZ@81yCEHIDmIz(mK#$Jb({5Qe3pj0peJ zRPb#*venXFp%#8fF?HzTD({{i)+Q=mr*u|fW2`RFo+wHGsGkDQp&^96?t&rNGNuc^ zRQX=7`n>OE#3Ote1flyMB80px|5K+2lo>U2jM@D!Kc>C@z-4iAJ-t+K#y>11fEvjJQ4DaCFck@uoNWha?{K~^kUrz_txH|V#WwyO` zcYl64(B67pEDz3EZ*~Yw*7Jnwy54ox2k^d~YMynSrLEy^R+sAU|0y^mq{aJ*#jFk+q(bdfmxhCa0(cF)toNU;eC{cc?^ zy3hzAlAj`Ls`|ruH?z6`F9gh12g~3dfb&Lv_C?K<=bk2Q(WarB~k{5Uq0q4X}^zq>@mdk{7 z-}(WE*=)ZZ@Nf^9#mLl@e$1__i;JSZ{#21{Iu#w=UFR0CILgIx*JpK8wbo0Ontqm} z!8GBcwh8lzvBdNeih*C23`k8A)oy5{BsBM&{5-g@pQ|H$Me&YtRann>N1xo%j+XYW zOqBL?sWYSt3YVyBzy2VU#^IW% z?2p$&!G|zH&!b$;F7-{l5A$rF!PN%qz{$lFta#YS;5s zZbB#`X6D$<%}r6HZy5qUX<+eh)Lx z@-XX}LW&Q>fkGkx|Gl5AkI@q)x(8!bP*haw`NxXD=S~sWIH>OZvE&x}ZP2Ak-Tbn$ z2w=bHB@F)lW;_(_@VnbzQj;e~)b9!RiB8}O|funpsi9qYuM&tgVl}a)-uLs_-K0;Uf$vC-H zPzVm41HINs$s-J!>TMjOZWA#N`EkN;L(cbj=EiXnTh;TX$l!*#c-)RAYA=@?ZEii2 z8w(2q4c1E<9iHsJFUG^mqXrIfyRXhgw%R@70i~P)6kbPXJs`8Hp#lB-&5ffSQO|D3 zmsjGhmyJsb{)5><>4fBDr-SiSKuP|tJm;2od>|B3t?!#>Gy(spo$oC!2!Y?LgYWAu zq0{j!x$kwCujJ^*6T&+&aqPzfDW$P~WGo@!=wOLy`O9DjD~2h9a8*11dzpGw-VFGX z3JltOirt}R{*3*flLZeb4!|+xK>;qYSx{FOALPsP%+KXo(})Ma8@2re>=D@P$7H`J z%*hPy-3k>rY6XS9TLL^jcLi|HivVDkKp6DehoK0)zgk(*)YR1U4-SIfwrTx7J(x(J zUs{rfwQ>*k<$qXr`p(-8?CvK$pFgS`wyR{pg+z0DX|n!0feqw1`)po3;*)c$^HSNC zSem&a`0<1H>umL?o`I9Ob@S?+q~`cJT-3ehFCME}Ye(z|#%_`#mb=5_6NdRT^M~(< z&oXK@3=pFwSaD5UG!9pjppT-Y|-oA*ihc59o?#pQRV8lARFy_r2?@n(2J&-`kz72ofP#M8TS(#IA> zQR}wlTAxcH_gb6e)Oc+lFC>IrD4LugrQDeSp2V;|6K|GI#VN`)maZmg}h0H=tx(sU`f?P-M^`XLKY zV2ATn_D#TTfpmVnU4mU~Guw5&aRAD7{|*gj_bc){pfhrSS4w3y$6%}8*4%Ju&Qh9slNR>pfEcwMzfS?_&r3oo``7 z9U9KxWedyriNxIIweQPoUmn5M+L{{|UeJ$;bv&)e9LTR48p{FnFRjZxQSp@Wj<*N0 zc143K|1%6g$Vjo>fQ%*M)*Q&@Kw6S!2e<#ZT*k_Q@a}9Lm+<-d`J98(tMMj>lX>b( z%u*(>;?%{H{JYZHay3%0;!4tr&&GZ^B?fG$uNuQ6~F#qyfm2+24x(L{a1+o*4#i4OPbbcYe}o z#oM{C+WJbOgez_)gYgR*GbtLCjSWvY|ApF8f1%ua48$xsI-@-5D2Wc`cQ=~>u}<<2 zScCcTZhJK5PoR478a&)HhQjGdV@XsHjqe;<+FLn-9^d(KFa4*3twV|1M7({8M`C|F zMj9Mn7F3j`8Qv>r@Cgqn*_yo^$VF#Z@AlpCyJyGkMXbeuu6;7X;CyEuv(i&4Yy+R0lD5{P4)4;vjS%YIA6!i_gisq5#YB z-X!mjZC;SL?8|4<9ftc89RY2;lr@qDU5lZupdX6TIqj6R zwWR^?0s{l%`;al?;l{EpA z{#bI}0f7(c#CYW!)vCjg#HcTDK8YA&tQdd;GM3CZwC_04ZXAQb`ZtjQV=^^OnRv`y zN#jyH>dy%dp=`beQfn1(&dxo7$KQe9jrsXgaZqhHS`$}SS3Mv&OpNgh$@k}7^uHz% zkQe4;{2B$l+w@>6uUAh}QdqxwRmhZWu^u~ zj$&;3X5Ux^n1($RhnQ!&1r9W&&x73fAKB%%tgqm>53i9?n`<=|x?t_g_Sm;n79`(? z$r|k*)~kdu&2m46LCkR9{Sk&hI=~p5TO5Ku3G)wGp0F8WHxjp@1&JSrj*SzW@6M;9 zk@DLqtnqbMp9vuo{9s@X$y_rE!#XZFS>PaXB{Z1|z82@5aylFEJZk|QuhcCf<8;B$ z%*@hQ48DDb_ujTKk(($jQpXl~KEy zL?Nk@MQ&WywMOyec}BC#ova)9GWKc5Htr#HMTR!6*u5?$=$KHPcqVrwm+TF*yaof6 z3bk^ihS^u>JN0+d^Xom|SAyxNFWWId7_gJAzXxMx=YRT&JcTsshHQPD^BjdZ&Vijw zg6AT`!|Uo<>n4*?QZg9l`yEBT-@G9HmoUBs?AS8%K!8!Dzo(veCBgTriGhUuNTnAe zez46Of^F_>aK^b@N4mDLm4;6SXv*fSmoA6~4K1v!vu=qol^cWdeg{r*%g}US;zyjC zhP#;o&B45-6&AQt{WB=psnT+BuuCBg?d!jR;K#hV7n6(V>vTl7+VRCW1Ox9tE^YHT z=c5+Cci||3g`xg?e8iCcfr~WHi(gvw=*!iXeTnkeW-WemUo~tmyH-672z{#t0>drw zDkL3OJSpXZ;h_R$6`^Hcs-BEPE2OO%;Z{$0H*Q~89^&qG2=Bd`|U zUG6(iFxl&umBN$a(v=_<7e-C34^{~rgw<aNRhAkZ;-2O=;_D=Hvzs;j5dH+a_0o&H8dct|kFKz}eOME9Sj9O+LB zRlWlg>6}lS51pJ<-Ggg(UFWthAwPcPE?9M7rt^h|hquRfd4UoEIrNnt9Xdv`RH0DW zf)PhLkmldjV-_FMEJsHDSOf|s2Z!tWmq)~N_nk1xlFG{W6G)Cq{jSMhs%1s^Q#ZYE zjPAVi+1@uW(4aRay`00o$Ou0iz>Xr_qH zPa6>`kGZ}XV|;;-ErL^d+&qN&Mz>(oqfg$sBC4o3r>_b}0cS;FR>y6yk;;WO(07~5 zZ@$O_tCW?;yHBJ_?@>|_m5?kT8jKDf1yK^qoM0NSOC7JSC@dO|;xQe}vifH4B1N4T zWm=MGU|W^5=ZB_{#v-A+sp9c&#Ue#ZHA`EA{fmm8Hiu|c!p)M6$e3!3s7)y^r#4oK zAIn;bdSY^LaYI%Vlr0HE!ID0ws1UJZcJO!39xB?W_Z{-qUpES%aV$8GqzuVpyilN0 z2(ix0Oa{ct`NU{YX7LD;>1XFh;zj7nNs#ujyMpaOu5Dnw>?NQwp(l6^(@QHq`DvmN zP=Eb2&cTeA7XKF6wz)h3mSTe6(v~fPeqwOchp2V>lj&?#{2DF(N3iwW@JhT~5O{tO zVp`g%5+r^rRqCeyShZpMt~I-%t7z=mny^vAExyn4QekX4M6j8At*|bh9TpJ@ zZFUDUG80b_>8x@iF^_ebr7>$un`=(5%+R&jW{G-houLWbn~z!n{S(@^P>>KfQ-wZ3 zx^{K11O|0@&8M{)DzTb7*XfhCF$pB`vUIGdF;n$uNIG+m;$td1k+Uw;gsssW zAS3)blGuGoI`oFfD3RV0*21hqS`Du@Lwo{VE}CZ(QNo~TaVSE>AKijFje1sPUZK@ev&1up=`Av|(a|^4TQr^i>s% z>2hQ?ggh3Yj%c4tX&2fE_gHU>3#N#?yjfw;6XwYj7QO|wQx`$$BFnleW_cwhM1t?# zi)C5a3~6Zvtu6xJgt$Y0n@?y)N{gKdo%k#+>H)VOMXgc6636o+G7@~gyFY?}w!q@f zvttSm7k61z&DT7nI%@CYthjL-Is|fOM$+s#WM`OB4))vG8@QY_)DZ7k|DK7QglyB5 z-oBw6(r#8s?Q}fF<+eSNaM+4uq&%MtawR#1nmAd`+(3?V$LOhTW=edcd}}Y<=U=nP zyuL8H`6DDPhfL1AsAT9>D9C@NA1`s`59mIWcw8Gyu#-K(}`vU z0`OrnJ>f^CI5e`({hr2F(mNsN1Ie*EjBSvcyObSd-_aue~Q zuHWJhBZ3k$1A|c8W9I%+-TFVN_TNIu(V)QI%+Q!ikA!+=SNtWV_MqCDT6q5)DU7|{ zaYQA5p+Ll!y+h)Xkb?zks84VfPW1wBw}x_rp3!px`!GL8OqYUKNc8tIS3PCYGS4=) zztEA9P$!pf3bPSb4bdPG*)mYLD2d5q)o{1;6MANLj!7v!6y>Ed{MyDdJ#r+h$V`rP z=Jg3Cp^jO!H8C!SoX(BFGWu=L_@)-R0I5mv1GJE5Q8&B6>g>&j4Ox6g?BRA+(##X_ z!LYU$a>|SP;pTbkrAVxb^zbW;Wo&FoFNhe|nlqyHJjVTj92UoJ{nciT8r>@1eIXR| zLCLH;OI?U?1sIi)YaFX#N$AD}2zB^yipgvq;@pfSv1K}BacQ7jSz5g=56K#tjw>(X zRo0rRRbi_>1&w(&P_qbL<7aKJ5oDg<8+fdaW<>~RdCUL?{Rrd)`~2coR=>6~EVG6t zlPF^pm6dl9G-TLCRya!(RaD3*P&*?2LOUlXCjcS~pv%UJEGvHZtZqE}N)X(?Crd!5 zViJY`cv(6*leua{__FK6SK#B->7wi7o!ZU!qUEq%6Bp>;9A3U(Tw;_9jXQt)31lHq zJ~GK;KY9!doShS{uC6rcO?mOBLiP)#lg%<4nDw)AMv(j-g}cr>Gj-(EWOIgKZVzE> z00jAOfnNudRhOy|NK-chaLCJ7^Gyan+0%U}>kOH6z6Y@=c{9Ayu+~cE#<~W|Xgc!- zPc6kK=OMWV^(3)Kjo}`1Ad*O1k*(27z+?>arN$juu+y$qEQ2&KPq*Ben3Nx0;;b>p zi3@5XCwhDl$)1)_w4^SCz!sUZI@W-49fPiAiLQ4?$Okj4MzF8+gVwz{qnL8m;_>Kc z1zS20x}jvh&zE3aAA(%_TvTUUud3d+i89OY0P?r$fHiU*lwsZcf~K2+AzFn!M&(<2 zL_iAQe=QY+x77$hqWef*sSe?o}pWoT~V&S=*9ba z&lGR2G%0IQcF~Ej#%{c%?Akcwg!^EaDRkQ@%S4rcc->au{PNk??g#3~=JmI;crbWb z6$z0~_l9t_1)`j$djtLhJn((R+^_4FA8iSwGW^=#y(H%r4-lW}InEd$P*@lW<%6d{ zaj%l`mrvIOa;-J`Le=7{(^{TNdM{?;?sVq_dyVZ3+-;a0r&5$TkP~BV$ja zG&$f`;QORx)bzJP9g8D}@vjcD|p< z)7>yzLoixQ#OMx>t!#IOPphkz7gJO7EcFkIqCBp=VqO=OpZNHDEE z!b&VQ5^Rt-ihtx8Z($0W__y3kJX{nhyW9*7p6D5ynV34ewV>}&s7|NMt!K<I(eQ%Ly=RQ9Qpvz!Tgob%EIiGs-fXuph{JmK+W@iXM71TT*I(37VQh82RzA>c>c zvcSG8NyAe-uSVZy6~)*}b1Ft%kzWHXvlo|x!&ZI`0Fg|YFq6yV+4pI;vj@9^v-<9p zsy=0WXk=thN=Iii`x!N`hbRC?`Ut=~Nr{O+1@?5mTy{-0+OA=|;z=VpelhWsm723L>%#~*#hVdfujV%}5p{L-X~M5SC&Xy3NY+U}znVDodnEeazN(NSx{E4x z3A2*sl{(wx&ga?k0F<2Z%3cfl+{#}8;j>)@zq~PBHeB{(xl4z@4>+P-S|qlb#x{P- z%aOL!rS(lr+zp`1rO4P>$pQG~f^Bt6%#N+Unh!KNol+o&QlMIKucAlFq#Ww8d@Jf0 z{#!>18jtJX_`G1sfs;*1AZN6zpb>92P|F$9 zKRi!qL6dS$cX?zSMmZ4JP2l%JANc_l%KT?`Wk&FA8*yIqf*Yk9A{@){VgIpsik7UG z)PJU196my2f+;oMs1cOMec1e2j0kH)UUX}w6D+Ac11OLs7y^&VZE+wT?n4+TAMgd6fa8kB&Y znTZa{$s)(Ij9k4VBL#Kz>@JVv|8W5}m~+=XY>U0J4Et@O1r@iXKaRxeDNyfAqWT{b z1^o+@a&ayt%Gk8(VQCqcvJ35~Qk`KGsHO)8m{*sE!iwC(vm-Mzs0+glh0)-xIUo`1 zBYh==l?lTqz8L7sFfNGWT7<7nF?^(YC>Fner;I187 zPj!c13R(*f8%j{HWdFokkded?-Gq=owlUQIq?UQ07M&d*Hnw%l+1_uCL}xo>iMtqA z#|icHh^+O~?X$9@xgMHklAux!NH0fG{IZs;HfyuYHrYi4Ihulq1kulDA5JQg-IdgA zVI5(1ga+{#H_SRxR|f8Hy-T7tHgsK&1B?e9{2JO#-}dKAm4VJ-jmxQ;9qzA^l40It zF<;+KH@~`-GX(c>a|;Ooqsz?9)M;~PAZq4|ucy(Fay4#iy-^*DY-`%wE=I_BM zT+RdC&utr>M7PM@JCN+%{M%9*8d!iS0xc5H(YH;nOaAw3!Via@N!&=F6LnCTb$L3> zD(2|!p5phmBB1c}ZYX{|gBVPR_7GVR+&Qw85QA8|&z()ucW)$6t4lYNY z*l>V+wGGOL1IN;?tN&LJe7gtUC^+mW5XSw)ga2S9MyY)jiX;2BVlxM6Ek>_q1Dm+D zj!2W4X1^5bbu(_jwSEiM%z>$d#Z-cv0#{s&cK48~aD#6gg$hw5=DB zYM2~dUry$XgH98L5w-WSQcNNs8`W`1cL?>&j4a$f<(^u-Y+?gm(V$VX-)3bHvH~(uDo~92DJZTt3?3@o zG^lj)Y#YZ<7Y!DIA>?c;79on%O`kYDAFX^%Y5e`|WhI&Lr&2Dd`K9mu<(70bV(4t1 z(lS#jn7lK~{1YSsSlaJE98}T4*z)+}%k#pfB@uU>o5!nCCbknQCM1Kj16!mBNm4x) zhsLS??kB@MjzQ+R_9})I2&(4W#PYA$UGW`lsJWL^cFr1R-nMU1H-^fnZ{ZVsspt__ z!VE3f(@XRHPcWwHB`OvwIBd91%khPStFDo1w`Y^CZU{3j?Fi`%hm$lE1ahnuLsP^< z`*C%|SAOwxvxC^SHuD$SS#?d3gW7C>X4_`tbd~QP+AaK$o?*xA$=`ULFf4&61>sG2}0R{jlYi4dCH_8^9K$ zZ?0#{pPDIgeE?wSLhbqj$8OGOB8}bq`M6k67yTaSoU=InB?s{VD72Gr`;ZY7d-vOG zw&w}8@5|cO&Dz#`(&nZfFvvE3k|WSZ1Y*&)uAGI11#}`!isk#^CBQmpp*?(R9h;gu zTu|39tgD;jO>ZP&(T2ryD9pkA(SJKG;3SaHYIh zo50~`zS=*Yd8K*3+M|z1_@&CRY@3SsmLet5NBKsj02rL^CrN2Q-t~y^SX4Sy#fX^D zpn`g!V&-=}YD3m~)?u2vi&V@R^CL|~ElEvTHqjZDKn{M0wa(x8*{X^p&depcw)O0P zJ|`8Cubbu;H(;i-WGx>b2LHj~S&|}jI9o62HmflJ`|4kyf{YEBf5>~Bm}o&t+RYtM z*c4T1NPf+|>j9q+c9({BbmJ1w^IEx0u*%aHwj}bq)_K-@GMb0gmJdvbLO2LjuCR(Dn>vMg3) zG<@zfIV!K}=dVh+Fv-VX-UtqbIZ>#4Vm92c!o!=RthCnBERURH%V7eMKW?SAx2Lzn zv$>;HY{6-C(Zb*5w7e3?L|d^ONhY5<#E^vlWSLN4C<4Ja!IL*#0Zy=gs$Ox27nq_fY0C>&?D@Wt81(Z6jB!>Gsfat~#r5G2RgWkZ@N-{#P5#)h8Ki5o z=f;Ci^*laf$qikR%+Mc{qcW`FU5;lRk3BGEvwo;09nM9FzMWuupp|$PT>R@-IQI5X z+#abWCc=(D8TN6)X>2jM-$E>THU|fN*32f`H9$$t(=F;~B**7TkPxDvEYO3>NNU_N zf3H`^^^Y?J%TipFXXbs)jTFMQEIxI|>^q^m_fXL6(;q}lSy)TKFGJysGFmXUr+&)kA&2joX?I&})UlC35-NIK?ROmF@fBG&? z=#xON-B=A&dU_uF=vkg8rR18_x0SL4&)r~1(`#!HbH`+C&IfjnUwfjC)%a7R|IvKl zSfIr0>=*%rc_sjdAS=~*%llT=)5{C2G7Jyk3<3nWT~P!pnFsm3r($G`EVZ)z%fc9B}Nf%$|`e}N-Iz}WE37Hbj+EZX_!t5`UoA(PRjTb72L~Ikx z1my=_TJn8z6T;^VZCbTzsW&QWPnX|L6B=A$3E{3<3FHY$hu6uwV;xCNDoRO2pVT7h zRTCC{x)ZgVlauKy@@u;@eUWulL`}Vr_sKt^v=(nH1j$#{*5T?rU-EuA!@BWWSlJfd zSK}v>WKh}iJ^kQDrTEAQ2dlHhI_qP@ySQyL*S+{86-OFt%6iDlV}aYat0Y0Sz@y}L z7=z8*i6iRAXeJ8(?L6FP#FpE-k6mjNZ-IN%6GJT9XvJvP{k2UpK+iWj-kBy*BK zw{-4XVO2$-SvueQ`p)o1BKPLoLX4hHevw2L~NftHSkH41^J-2wr z(-DMRLKt{%)Sq@d|l?63fmW6XEj4yP6zNnl6;G3 z$2l)R3+QG_&mOdshZ-a&^0UKa% z*9s&kZWnO!SB5}d25^?*l9Cz?J_Sc$GCohVC8edE0IYz&1U(Nv5JWsKrrnr*fDKt~ zIY*I1uPr7a5&9s0^>Ca0foCr-$GxyM$8Xk&gNy6&@%BJTPr;P9P^QkfDn%Z#!ubX~ zIi^X_fYCLajZT+urqkghP(Ur_)N)`10A3U*scbgexl2?P!JFRSeSejenZH750nD+1 zfr0*kfsMjApO$gX>YC4HsLP)_1H`73-mGZ1+mLE$7eAcb1i^5liRm~D3bGH1{V^B#)@8Ib6p1Fq0r0e+00`!)o?ig-e&#Jk-$5epl~c+|Vlj*@Cdrp4}n!#~GR+g5hNFF|TisS{Y~|L+q;%Kqdwk z1bJx3nX5SXO9u44iO*giK!NF93eg{gY&JjU2NV?ub>&(OR*LcftvY*dW#unDqtSpi z54zVTH1hu?7PD{!d5v^r)zsMR1Xoux?d8P(E zi2qTD*T+P>R6iGSQdfTh*o|5Q9(%~GZEX%bn7&bNA$>h9PKWM$vEuH({h=^k9|Jzq zZ83($#C!(W(RNqGU*3)@lLSqy|05%Je%ojr0LbT*4XO}=&~yd{U32>S5pCc zv3HN$<6rqVHzxy;1hDS3x&o?~`Sj7Yx3`UK9sVO6OKQ?#*r~NaTR``Y7wICo07C$H zlDRo0(wA2zp3HBPWrhDg$~uX%aPuKRBmSEQM2eA)CD9uLn^E$Hz=gomz3t4?;E$=I8GmEE8o<eaF1%>bOmnfn=MR9FH@$vm2C(Qr-6C#bK?N|C z>9h~zYB2>74XHOy<3k|Pm#>vSF&{na0fua%m5r3 zki)P>L!-@=9(dwvjak)Z^QX$_otNDf`TwTEh^MsQZUYV1KoD(K_bvp*C6FtQpAvY9 z1h{59_FVQYI)Fjpe+SSy!XH9{02lzAcrZL)ZGh9W5Y&KIhF}vh5dJrmhE1nG!jv7L zgBR%fBi+G7-l}0a!TFM(_s4xB*e~Mc)P7n=N-{m`@g09 zyLX0-6p-via2D?GYU60_DVA%G!4SSQ|5)JxSAUo}I3VW=G@(2`cNr5M@O=&-I?1Y@ zFAApshqaeMUr%wR8dd2UvXfc$8x08W4*56p6+5S{xE*jz<8yWwwp~Be!pNQNZ@+8N z?wS+jOyx(<&o1&BQmhN4U0%i5Ni4ed#-q7Zon2rMY-VY0#;xc7H@OR{1t{Ovd1sR_ zbYA>%-Fd{d+6qx1TUBRz<(Xx>XfHfhQT+{EQk7`)>## zdkWXbB3zt{wUO7}!|>)}W9}N|b2w#kP?Xo$Z0o}jkGyAoL`jPc5{i=cNB+FhNC;e(| z2z|ikQy{Ldlg-!PGEu|B!=m&}|80b~E(B|A*X35W&pkHL>QV1&SSzJ>YGh=j40kM5 zH%p36tY{iQ1OK1UbWx?jz7ZU))yse$T)3c~<{w^~qH;c-Ik-tt{3nyXaE~(vnCt}= z^ey>G)>R>!Y^DxHp2L6ZR$X6oDiRKikOGX*X#;9%D}xix;`_6t`@es%PnK=E$vHBC zjgXw@-`3;g;NUP_4Bk>eQ`Cg`r;gj3#DJJe$S(5D$+G3yW1wfu!sJPomr3_1?10^0WGDQxBxrlw8{{LC*DT;W*@NN58k?4; zeniv%r*@Ah(DR6d=`~RV-z$RQsncdJxY2|-l#~;M>XmTp>}aShGM0a5WMi+@_3Abm zxX6FT@NXi3EQyE*!-X4GOBQ!le5rq_$1ELbTgx^IWKUS6qAsQS%0Ixc*vdj<1EG>2 z;?-oTqKWOqb;R>XMSCG7SWL0cPQr`FwMlQXfRYUHy@!V7&J^pw0ovM&9D;g=iX28# zDLzJ5YS-X*t6};0*Ep7v8rgPnv+9}zn+_amGH}UK%KQSz{7b_~kQG^*ioO2?Bh?(ljV&65kdh@g9uM{5Tn+p*UW zWIm8h3f2tL4YPlkSc9Q!4MZw+Owy4 zpIQ~JA9P~*PJ{PnN_Uu@n@Oz~FugWUvIIOYur`G?bbXmw6s3~XmL}6)`kaL`#03N` z{fVnxcZgdtwWn8l2Apa!pHYUkeC#70vS4p7c6sl>gt0xT14omjZtgD1PEUeBf5Cqe zScNYX2?>f(FwE#&4u&1o%A+nD7Umvc2xK>%R}d1NQF+2sjz?fmFnKh=-%Fff$?dYS zr6y%0#wkP#D2Sv0cB8A<O@hc+x(@B0Z>w!V9+h&)ejZRki~qRqUibdUrDn_TTaYe@bKrgg#O%%ai%2A|}Xsc@~Sq!dXdQLKD8 zOi^)jzuT1OlL0G_-F$9NHHV6V((70DmZ5#gFPc(pi(%qNH8C{%n zzdUV78k$Hnc0?mwW`$h4LrM?RanlkajZ*5eUNr^(DQHx`MvT$1C=>MrW>uk~1e+){ z)8y$)g&#c>q}tk4xxwvcuzXu!j&9i49xf7vQCPN!cV~=qbM@y zUKqx3ES3ayTuDpL%X4Lz7ja@yb&YRiVr)MRYxU=mgB`mNJ;5C@9Kl$ZTXoRl2G;B? z8t$J{Zmr2#JrbjVld@1$H>e-cXrm3SEX#XInqp|gkgAntqEMN!O^mHLPmpr)we1Rm zB{fEb`e?Lyx$5W<$9_HU)O5QvuF1F9_o z%De*X-5wVPVE#BvI@zqxCM#*i1!%=7m?tY0Bs1ec`P^UaB}iV-zA1eNXPx30shjG5 z+2~@C8e-*0@C+Xfcb{Js+U-Y@^Wi>!dt*yFDyFMj*OT*bgWcc9RxWD8E*mY0V94jQ z`{f;7x`I3blv4Q3^cwuQK<-zP{C1<)2I-s1tb1RwB#1az+i3}>5j2BfFg#tJQY*Ir_ z(5x6c`94HE3!fHjLQs$hin<7d;e-CgDwc~sr0jdo`mReh6v95|x3c<@;7>o)^9NNXORF}9oi;k=#>RseOmBa<& zL=~oeXjE%_Q*;hk8mSZ>K9&Z-eB?aTD|&`!@I35(E-Em!NHOdNz00MMq`MWgJ#zP^ zkwsUUaHUW+T1A>fD&^6qZq8MLa`YK8kuB=TL+O!Qh6x=#qTGaVKHPl=t32f!D(bS_ z0j>u|ddUj$Ds_28I?Yq6;mYr|40O@VgJRF6(w4ggsfeYXQklR7HAuo+zTC-)GuwwNFHa?CTR27Tx5kw5FywFBJTqUD-l%V4n_YfHY10#s7`1D&qz=!zW`{|- zc4NH0_$Ac(;1|*m$=tZ3=-Tcab6FzoagM}zof$?=Ft-SXdv3b2rz$wo)}dlT1=hJq zyI6TR({SqHJm;qIC699VY0JT0`?#G&j+1~$wA86^e@rsvG5ptvaEtO=nU%>=lx zda*oHTLy;f{H+A=E&;-aPzUXIe*oabU;XS3Hi zkVLo$@S9o2xd*D7*Eb_9qeo~rQZyx&%(HKB?B|ItZU@^$kBOQlWytw6I64(Ehe}>H z^~q*2>X@>LxtZaSFm}!BLa|yTYjzL`*ST!9&6EF3DOshSfL+a7Ja}yITNj6kb3ohq z;TNO+;02y^j2Lv`ro@mNsi8oe?+c+lCukWbOsI0V@qv_O~nuO;`DxQN%?wo zy&fJZ>qx2^=A(x~r^8p4+9sPiW+#sxt@tP{eW84Fi|{i&N9ozqH_)$fN-%-v_0HNg zE9LMw2r>jUM)@^V{s=II@oF+N=H~f?<+iK2!jV4lw(DVAfCgKeSf7-vu=1_ns=>&DF28k?(MuZzxZaLZ#}VJc4~gFAOZU`YFeT^%2f0k{iVHe!sh~g&HcTB@T+OK7xgR( z7_XjJ{9&Dm)eQ>OTOXG3B}R_U0mIm6#!`!KTjB;%vmN7@J3_?NDe@8f%_d@AyEUBt z?gS{UDYX=tC3YN7I?esbj7CpuNs;UxiM7&FcQ=|6&H_v=8d9S{#xy}8} zwL0?ij^exe4n@ut1g|-=`KLl2PDBT*s)UsC(DS?$aBw=2S0)05PsDT{kYvsu(@17w ztj|4{VOv$evCwhWDZZ%(j|0K+2i7gZF|lm%^bcB6Qq0HA$wMx$CztoLB`Iop+14MN zw+&6=SlHCt+fyIvowI1gE>84`iHU4%mlU(Fww({fm+20-ymp)I9)DrhrZt!JlZf|* zwU@^S{TEyuqqP!R1XRmq!4Y!->ArQY>uZ;QKi=Aj_2EN6qY&`K^U7_j zabaj=B(QFos`7DaC-x!&1YN&k>=vg*+8xiza-v-Ov;v9#c(i!m9tc4*ca5Wwn_`RK zKAYW&neN;#0^W-O!NNg7L5ypY1q+vp>a4PME{+>L@X9#pgm*vMgfFkI=x33(cXpuR zDE^e?^t%`hu%HP^)1?wRv~uCe#nBvmO8x1S@^okR)Wb-7wK%;(r#>E=Vul4>H+@}* z%q`9N+n!?@<-SnwoKc_N&d2$01y*{73r=w>MOGLWf;(h8E2id1#_4O|DdG#g$=37? z3=HI}*jJ+M!%`t#<8SPq?P%oU@>`{#&u7{0`HXcW8A+huT21dDZp~?5dL7KIBYyYe zL5R~-UX$~pavy|6|68wQ#RkLC!Lw<@TTSN`W&`h*B}F_7MLl=Vlr=cx?twdglC3GW zkG*Jr)9w!6kO}D~e^snhnCrDxagVoQ2^vVIeM9)GoJU$(dT1PwvCs3J)GFVe{kcck zAoF*GF0>t~88&Mr<_{yyF70hME_@0YJe>OOzsd#F5HLn(t&D$~$sdf+LbD%TcF~6& zIOBSbV_iXn*8GHyr;WndIx4JNqPBi<$W83emSWo-G-duTC~`vg(+{*u-}He4g8$d@cto%AcN|W@d>!J?94Ihj8tfe|5sf-w63R;?uCal{JLQ zILvxEy$AfG(Q7Bcn()n}IDvu-f(Ns$8LUq?szuFhnW4xw#rn6Zm=4UI(^D;;hP ze1EW!V(h5ZmilH_N@VcPR;8{AQ>$I4#?JamEKL^<<`Y*U`{1>In$iaGaOV?=CAOmC5_*-Pn>N3w zKRA}j?_2HGP!q4e5|+=?vQXWe(s0h(@*qukaFidEVY5(kq@$${?as!|!Lx+do_7ia z9*-xDo{uM3g1WhLyg1gTBfA-WY3?{AL^+k#LX!I5{ehvvi_=v=yw~0o2qSi@g2w4b4F-IGz&eH%X(%*+Dm2=^nz;dkyTiqq>$P zYz}L7seC-s`b+ZW%*w*E$_f^9^;an31WUa31w|!MnI7pLWf8NBZaS&%O2~dM%G;t| zILugsPA-_(p%v-DpvX@OOQBaA)R1ZCHd9bRu+SD&j`pD^2hmkGwInZMtXjv_Q1`9v z1zm&fnk4t@Z`PKp4M7&S%5HA2pknF~ak++Zscz2+Ufkt<_57+E3Z`eK-ll*up-mZ0SIF4qY>`tZNfk{g%{t88oh}GQoPpaBfE+}ZX&|(2<~Hax zzoMoqX zfhg7U?bUlMVmBA8rS>2qGwB!nSotuAXCujHZ>(TJ50RjGe^SU|V{4mdttOU0F}aw( zPf1Gk8mAoFtEH7vM?uMnUsYXbR-PJNvLs^Q;NaB1LjU+U0&}rF;^uC@M%ET>BL$at zRe~8|QdaGw2yOu{wxTx6?1H?r1X`@@4+nllCFeI`cQ&mipvn5uASIp;R2%FY4&PX8 z!}9aU@gipm>v+Q?^CspbY>r6>j8AOV)MQ-Bm^WH%pSYX-k&Jk4Z7CPQSh=w}#Jr7F z#+vli`70oo>+k$mjZcSKKNFLaWmbDss`(}*RL1Xg(CTbT^IDsO9hBgdMwb>AR<{VL zUZ5o1QNJ4@R;kXd-=nrYaW@!qrF?Wo>S%t|m8-PxgOybe$mMpT-kL9Gbjh;hB)_~p z?UcN@BtzR9O*RHkIyrxvU8LM*wJs_%_*0A5!~W4xUQvDKdj73ad8I8%nvP(}!m6^$ z+P<3`@2K0`m1B!VMQb0F)|M}z>+8w6=BV2OTe{_{jIm6}UHdfCE?6vl_cfmBCH>pakPU9~fx&7y{k zggV*@6dV8GWuKUn^L7taVi}!@!lPSVky3du+jQK_ zsmA9LTHHUj%PX;`FE@VH&aKbeIRmFGA9B-l>6u0^GFb(^-$NV0#j4R%l2WKjvea-&hacim7Eh+@j$LYP zbfsk+X#v&13f{#{G@FVb*4=i_Ln=N z8a$BtzF4wQEG|@+`?_e`@GU_t%5G7SOq}68jZ5rl#Uzo*rbw&B!h+HdQ7<~=X(g=WZaU67M1|9V9^$# z)`N%{_UYxn4oO`85~c1ZM>72n?`;;VBk*-B!-Elbf)M9yX=m#q^mWZRwoRJCGFo*@ zPHYP4(q)*{%M$PDRq6hOZnH__5c;6v?l8GVAeYJYl!Sj?{uWX-zN6zuAM zvre7=*qT^g|BetKk=GDJmK8QNi%UydZse@XvgBr4#JD`Lo60h0Mx2wBMAj)|a*O|o zC0(Vi(4S?gQpuTQr6$nURo^BoI82hSnz}8qVU|TwNVj;UE}mRkSN64a!-U%OaG`9d zsteZ%&ywZRD*NIvbzPWkc_*CEVW?#846pm-OiP3<#^Az&hJwPQg+N22E&XCciUotD z0fVBgGl6S)s|68;&wI4G)74*v^Sp;Wp4&kr^NZ8sN2g?S2+C2swYBxp6}yYomUemE z^-}Yb4S{uzpZ|J{axHIn{`z{H9FU_SM5wQqK^SA?2*!iD^uVGKh8J~P&0*DR>MAL9 zrlT7e8XphI596$`KUaNum*4ueJNRV~8P-N1c|lA@b0V96osw=^MfsA-Dd~R_{f`L# zM+@TquN1*-{%7cv?Myl0Wgd!w5Z%H#5VXA4EXJ7vQV)5>Q zC7j5ux&L3b-g4w~mA1CI++9z;$Up|#k7yn7BWufDK)Lu#PAXT_f z+Nt=W+}<}LHnzH3thR0zW=Kewog0>&lauoz_+l`2H+0e#QP*YRm;^F;WL-_(9#8}RwMo#~RmiQN`oGGRZfD{32)TY{nvqVIva0euc z7<`|Z^9g~|%*ZxP|Jqlhq!GtBPr-IYLmwXy!{L7sv_&b|-~d84{--(njt&H8FKwo`-6wy zb;=^~B}Vb^Q@WPkGiQqq^V3;9sW{iY_lCs za?;54hx6r~i}P=~;~#{=+|8x^8qkvQiJRG1r3-6{#u4CBbjwT*Yrg!G`BYTvB$1*z z0tOi{opF6=MxQoWe|))VVU_KloHI%e*Zpn$`#rj153c39rO{uLEy({^wW1c7|8>JZ z?fkE_tKW5F8B=()&yEcgP)lc{{oN#}ROKk7y3STTwH^vHNGoNA6sQ^F*z= z`D;brV5WZu#~r)5X{T8IoOQvmHaRee^UJbbP`su8SA3NCWjv!q6r+ygvJzbQSm8%0 zr>@u%p>Vj67qfD4M=?2agDUoWCCBq z)>rZ5^lLF(=TL*5TA?3HpG)(cTd#~K1OpKPqH@`70Z+Nj+Z=sE;sA+ubGfoR6;%8u z>h2yDC&!aF=ACI;Lcp7)wEHDM=C3C&2ih30D+v(nf>wKUEZRr1w$L3DgKqA_13cIg zzKQ+#n^t+84;(j#Jdm}Q&DS~Z3k46uH zk)y*B1CP9%ofS3*1s!WBo{aH36`k4dV{$?l`21Gb|5}rKBPMlVzxD zd6A&qhtfHBcRFH;UVQi&Ug+hVUd!f} z2q619+#;i`i|+flbnm&O*?if@<@5QXUf%I(-`x_jinUu&Z0_req^SZ8GXag$)9YVk z>GSy&{MOr>v#r535C4w)`Z7-wfgFEomorcQ8$XZzFjw5Q9`y@l9tV)r4FC;G+Y?ZK zq`)AXzT4N~`!vm`%vE)7Ih!<2vp%dJ-;>vOWMCeSr+g=;%a;!gF42fe8`^D)(Qd z#tBB}+S@+DMGY~sWXYku8hT>km3`;Jk`q7;!st{!Cm*T6X3Nb77jJybRB zk!k@qg+Z>scHr%9ZE%2|>U+;?<_CJ@IE^c$7l27mnvld6soa_mC4b>eiBO`6EZ27 zNmE?94)MVbbmfXi!bz&HlwUHu_e}+Ef!jtu17JoN*ofEf&s-d`&Cy&Z4Nv87GF0m> zjfERJXc+f(rCg*KPaya3(d|wFx572YI_Dq-i_P$s5@YM-Z}>Rwz2IqG+rQW%ur-Cz zvxA})LXv9Tnfh;$s!Royw!ZbJ z1NYdw4L9&{2!Z*B;w_&m6}QV2g4E{BRs7mGkH9>(vx2=DOV(=)c10|yh-CFEaq7u@ z!^&}2pUh-*mM!oA1-%eZy38F0`K?x+AN84G;>|M>&fn7*vI5#!F=qmq@XfK73$1@z84>=l`D2U^v>wu0LW|HL`( zY>7VftaYmk;@pUMH?_9#{Y?VTf;~jv>j*@1PxrQ>v@4A&v+IET0#jyf{cdoy-IwPh zMpgtb4D2Je>@qunIpVLci?!MVdY&Ko9hsC}OcD7y+GW_2q6p%%#>|+qE>tr)WQ)qrklhIT0IA2zI|SgzYL#oWB5Le zmN9d+;Twhe=aIGDT>5!ELO-dXdgu!N#gfR$z`&Y9s)zegO$W6-5KhnkxLf**nAp?P zm0me8eE&ZAK-4Q$AxwT1@+(GNA9rv0h^+>=R&U)>_8@8nAUykoyVutUN!1Jn>`v0p zM!L!gc3VR(tVtD^_rSob$!Z7dP`*rvwcm25zYK3>QRvKx0Sh8Fa|MOkEdxL=Wfk7a zD_(4@`s-F<-8=;%a1veVL#QOh>ptU&6?h?c@uS;~Pmx_dfh@dV<;r;n3`Yo^K<<;^ zKblBl7pV1&1ZL@Ai44+t;J4a#0y&z`%0@^V{Uh299nn88Cu2nXtN3kcHuJOoWVjmK6c_3&V5j(L| ziX_kece3g94QHkXj{`9Gof)$t#tvc-&?VC;y4s1~i26UeD`1NsdSGt>(Fc>POlIVfD|&@*FAOpD^WhdZUFo&a(tAx1eSQ^fKfMa!Cjsd4 zd^!*s+^`2mWf^F7T@J+5n272LR)vfausPA5SbSkSUY7}BYKEu-@|dipo=AQ7LX7-O zb>R6Wo9YmiGYWvAWq9}Cc_{|1_x$BR*~us5U59TSSK=I~w^`<2si>~btTir4EpgbY1dFrbS?MtqdDBfKFY>!zwKBNqw-r%<_q_{t8Zy9DS_quhFpG`efalk$yY$IwXFZ z63_oDHaTmnk3vt=)M}l3?=i>0@gSc=l~Z8uUSDfPc4b%oy+ki2Y3|uhy0&2~q?O^;d*@!LwBiFd*_g_o}Z?0n>e6A;mhRFk_ez zv3dKJ)rbiqh8y%KKhY%>0_3{vcx%_%y~+uo z+-#@k_q=;-b72~z&e8&IJ+@Oypklmx_qk=H`UmZG6tmn}f~IO5kN?m+2P`Vj87rw(s=B!MKro6S!ho`;xWL0NlmRJd#J#_*vI=nB5?Z_oCi zO--Qe=N$x?%Qbzu?x0##EOHWaYT0*E@b{XP*dFd1`+~RISc_L!^4R5|%4Z67$jriE zb+0x8N``!H2;5+beig?ZmmlapOG!shXH9Xdg*h=h_u~w2g}hExXm~%Z-u?Kv)Aqiu zM!8Z&HbHg%h0)J}j&XuYpC8xNIBg(#`f8pSY$Vi)xux;+!;lhvU+7AqluLiTQd@I! zMK2P*jZZ3C>0m>9Rrpj7f9sOLuEQDAY*Md-3jInorUJQl+~VQnq1i%@{N)?n1#fP3 zMXnY1?bn*v-vOw~#P%l2Mu@>OhU0B1lU<>oLj?{h-$?v63Vjn1wxDdc!-Tu0h5y}_m-0n5fsxvKIFq-F0iC4ejh?oQbArB&PJu?Ynuq3Gu)O1 z5?PC0a9GI?;zQ}Q7IzA5Gxwb4nqu-&pIzFGpH0i|`v&7@OR1dNS&V}NCHX|}O3ljg3@jki{S_#Zxyn%`>Pn2DKdxA%u7-%OBb^*qk0OyV(W z-^0!Pscgazph3Tk_ccc<_Y24P6Z34*+7v#O%ir2L{cAjCt|muNa&nXQx587T7AihV zus@#)r)o#Ag@ahT$Q~4G`GIlH&+mqlqi5CMsAO!&Uog#zR$1k;YBqQfeXJE7`{1w3 zS#Mpan$wEcaQw{yy0a0e^r3i(x8>NratmYA<`x)O(>S}oJ7H9uF8g^9zw?sCKp;K4 z3``_i$0|^+C*`sNhe|3U{e=INF6Qt=ksO|U)vfk50@QX+IyjbGeHm7yl_pGlI?SUX zraHbLv3KTC8sP71XncF5VDXd@YzZ5Vxl!;|OG9u+-hH*_QR~wKd~};rz@gB*RUTxt zTzL9Mpaole!~Qav1P!`l>_tD)YL1lSs-Tv%!{&i;rt^IVZ93d=?=0RCyVlp(LEsE{ z+);;*1phlj><^m;)Q9)-EGv1GszB}jvoX2wF`#|VKmT)HcAP9tvl$@U#uCr@CS&&) zJ`_qOWo6qO_9s|S=NET%#VbHjPz(45&Ko3TE;dnewTcLChPp_f&bh`q@9`7XdJEKx z2p;W3%xqYC1E+ki1*1=PE>?W)E8XdJWc!{4!8x{@Jws zr1ukZf>>W%g5bNaHHSmVv`yyWW!+hB7+O8p=X=4)rHRd|_KPKz>T~b#K z7)o?A1)HqynV9y@c81E3N%m|8=R9{TUO{aj{g4PeFIXvP>z1}^oDN|9^6PL$aCEuA zx){;TxnYl4L$mwIGy(sV422_{-~9e&2BlABZboA=&;zC{YsfMA`wrL$ghFv)Ropw?UF zr!>@#f@47d)D_s@H7&IfP@Vcpr+-SO@e#v7nIhCTN-bb`1smBZ?V za=PO$B?q{k;Apwc2pW;*3sc|Er{hml@nJg9}N?*qW_t%~I^(@r#iMF95p7U!G-rQc@@uv`U}X^Jx9+X1RWZi~In$?Au8Y-P zLrYWO#n#h6`1F(dp5NFt21k((o;3-8P7p#xf8&z{_ePnE9rugDHG=4mSw3428~LL6 zzq2}K>Y9qoom!D*K8rTUQl*I6@ULV;+S#RXhpK>~7U|i_O5*47y?wFs(K8okJbqB( zxFh0M)Banh4;= z`E(pjd*HX`8aH}^06@-WA4S6NvH^DNaq5l@Z#_MM_&Vy}x$v}IHUsl8p{ngJN%o8k zRBcUGpP)N)7$SoTYeL;^uGc`Gz#zA6e2~k}HBZr6o;ELE% zU}A6%aNvnu=gj}cqKUAjs>*vmPxtx<4(dCqvBb$>qB)vPJ<6{1_5B`!Zp?c<=;-z9 zDC)E^+u@iP1tA4Gv=fS~=HoS|!vv_4?ZM9aZOiStxlPsX2}IKb|6(iqJozwC&1i~) ziV8zuuz63$X6gaN(nRo|dWC@f-O#Ijk2^2`WNlJtnzB**Dz7peD zvu4Lw%q^N~#|7izR6s+?Z?wW$y$~FvU?k2WcH4e&tQ?BGa$gu&JV|_fho>2&O$pjQ zWq5{12ipE_ruP}0*?iT(0)r~f^=+9o)gvZ$*VNoL%e~{b?Za>12sX)o@9DSJx|&cd zVo-jIO@l_7%H1(AN@Pr(c2^&gvk)0d8Z@d@qOaS7Ww9vQVzaVm)&x0@ixI-0OIdD?fNy#?m_b7p5#{76X1*qTT2 z*1GJHCI&G1_gT~yU8V5yIz&#lOGyq$i;(lX#wp;|`t<)czALqFxZ3h(N)kKg=Sk1o z+d((DAH($BF&7|ci{SzI!LS}1Lq^!D%vIoHqGPIr9lnp*3#*jfwak^GdX z0vYl868fFDPcvH*Tc>_f6)#3KTjj&9@#Te)ig?SYNV+YFO4Zn!(kD&=ygLE%5nELy z$N((!nxB$^r&iFZ`)<#g>SZs`B>!Bqsl@@wlOX!L#}=n2ZU6Roh61@nq#n7+-6qAo z!Y8iAZ6*oxH5!j&M&!3T?&n6%UA-fXSwWx;RWVS4o3N*DLPAATpZ~cWD&=t8XcF|HSwaq+O5F~yx5~2*LEh}> z?pyMfbyRzPLk@U{EUWmMd(MqZT$qE{EeiQVbse^#U-yDKEeQSNWoFPThNk4pF265D z%3OP+M!V$|@(;%n-!%DZj9jTv+oje@psU9&RgGxjVhj97IS6zakTfcF25jVaa!AbW zfChWx-24;$xyG1Ty3pCKo3Y8g`cVf6?qerj@YwVciquX)$@9X)8kO=ZseZ1|*??P2 zWQ4CUIh)ww;R=X&w1z{yxE0>%7-a^MA10yf4eow4FKph)i5qTO3xPEAVYKe1rwQ_R5?(Nkhm(|ZC&+qj_e||Q+#K=-i zd%WfvIIHC;j;5#nRbFETs zWLuaCSF6zxIThe-6bJg1wub}%bsVBQO_KXoJd5LJN@R5qW z7Sb`YCd2f%z`UWeTkFaRi=YWM8f()P0GV;%MVQCc^@ouOj;8O!@o{&$`W@gdjpozgoaDXS{nGMYF}pca;t6g-!_jrk&xckBI#dL9sIwfzf0`H^+Ae}z>X4P!F=Cx@L? zyvX!>jL4^3KH2TgqC2KvxF8Ed+?-qENr~W$}HV5yTdF|E|T?E4HOkSKv`c7_T+XYYxDBZYz4T$y>9$96v4*s0?4?DQULJ*(4b5r8kQ zF5z$oYlE2t%Ix<2$iwmxNF3s8IU;@7QX0`{g4%5ju{&=vck1nh71n&~CFLcyG0@mO zHeTDOGi`{({J|pEwO{ZzUx1uca;UnCDacFckr8I#6_iyQxiBsIKOf&q)Aw474F?{X z8AhZ+r3vAiK9%v#Tnq|N6V6XnTQriV)H*(~^ON}i zty-{9inN1(_1wqv&C>Bn^0GaVKeVV>VzRfHb`_F`o+_9890kx7?d6ebbtL3Lh!VS? zv3%|p4K6c%9LW1rii>xbv%@}Yx4S{j51W8#pttG!3*3~BrFssrIM!tiVz zUDk&8od@}$o|CtinIW_1i??)%k-vS-r(WY%H5?d!6BOj1x?n*^ga6U!fOZRP;&9c` zmG0~;a`Qy`FnQ-Fit7_Qck7!BKzm>1Lo@=gHnHWd7jm}P+2Wpfr@3b!xU`9jn}oG+ z&S5pjCjSi9Z;3YahHViPT!8ox^!k74`9oq^Y&e(@^(zoBfdG)R_lH1$m%R%w05|Cq zmhZ9SEHpw^H3EBl)#Lo7YY0_P0Cf@>#)xloW!ycPLP+%X2W z44(oXf-H3J=S$lzGc?r=nJf1;R1%bYPq~mW{niy^6nmU?azV^jNa8v?bT%Uq|3;s+|w}`T5;Z1%_RW?@|&U#+x0Jwt|XE zd!v#-pN4RmTpGG6PrtFbb|*2+aKG#2y$h+PQ@N-X%iTbHopO&`l``9^&%zr(xy!za z!E`_XzVv2f^GS>kLFcMEy-_UiYTl#RgS)NqyQq(G0Z}a~9Pblpd93v6k9TRoi^cxH z1?dqfLL^8jG%^JjPsQy+pZF9QboF>1Ep_c{Qf{KsS1~oQ+37uwmK4e=R{L6SA<5Rx zLvN1m4aM3rT-`rUsL92-G$J{go{UYhLbq~WX=3ypRNk>%-h#aYo&=Lm4f|Wck8;6p zv(`_%A$azNdj`V*`<=8(``uDtO;n}mTz7RCfGx1i`-BeM53t$d0+hB_g#XDMgvN<2 z(r1$dg^d@Y_P?QJ{t4laIGp|z<(H&rtM(pc?!#@EO@_%SWeu1wtSqa76zZwn7?JBe zp3juR6I5@;nrp>2czjsY$gfSsD<}@fixC#FOk;#C^*F!PH1){&HUtNtz3g%kLQ-w3 ziG8EVtE`*eymNH7j#GQ=f~(gT9tB8yAcP4(37rKdSUi}g8F~MS0u%`cL**@c6yr5i1^HSwcEWO z3#p^mx>#5>UxwV0WX#7wewl~umBkLzCU%#riH%L zMSp|7GSp59U;KPZZAs`av6 zGWioWGAfQ3Ff#rTV~V6~FE)`2&_$ni-h#k%3rwCfDE=a`Mce~V5O(63*}I2gxA_sj z=L#yb&-hYIyufJNx@V`a}p`^3cS@}-;{Gne4Bj`@9x zoK`ad=V2B?V`}HpD43nnQ`0es`!_8}rCR#sTEny~A*)9IJm7dYh-kCcEeeAevgA!Rb=@%5)y+!CwZpBt=0DhS8qzixGeJk@K zgfj$#)C7{#u1Ca5!4G_8`Um4`!q?hPSu+K?Q6R$6XU*23_z3jL+z@9pEd{K@9SPoF zxRCXjm1V_~t?h2-?CsZUkhpO_ajW>~Hbm7q>~ zgJ^nVAGKZ5z%p#CYl_bIyGylPP>PEomAV_(_XM0v^mbQM6BCnf7bJ2qYwBJ?(jiT) z&@)h73Hr`cfL@{&@LFe{``-;q;881?B5&UUf$sfekQce{7qrG4->-%7{EB-(zswPg zXKQjj&vDSv3(2fP7o1Pq=n@&Vv`rUT9b3{bv+G>*HTCh#)WMM6-2VDCArXAB#S?d@ zX|dxRA%9geAF{Q!b@2-+tZ`X#>-j?dQRyJbsnv|Ox&JZCvI|FNL3U+HrfCX;sBmgR zNIu;^V8>@Y_VM%%mF+L@2c-PneZTxIH4Nj*2Wf&Trl*RdrNcWy0`~{7AnCEU_h!4g zs{|1+^Pn1<7)ntVxtxBx$5#*_+7Tt>3zP`?F48vy()*he&59q6v5n%-;!gFqr*l}J z37uY8OfKfzz1ysKXP|JehFRh>&=u9+e5O|DOg|;q7|XTAD`t-ITnWD9a4@Nv6alB+x(?D5mIQK4|%Xc(W62sXp!R$V0Nr6!G<6!&rl2INp`2jeX}zFMT_ zIhPHMdMHOn)RHalyI#qIsc4>O;e30Daxm||`mvk_10LbN>u6^&Ef0Ol$*LF`I6;Ih z6iRua#~HWa6W1;jy=30`yYT1PT7}@1WJHwW2Ez9m8$Bl!^r7W&>IHo)7!xQ}uI1hP z6Ssd&Ii?LEZ>$j|8^X0PS@d)|in4(pl{zR_tAQ6j&{sW<`Q*Za2BOFcH}{+h|WEbOl0*Uf9|UX zV4*bp!fKZ7e8d8xVyFCR%7I+Ui4DD*+zTPEU0vyx8rXmUcW{>Vldu08(ZnibaHkwz z>j_A|w8t*_AlL!gjFB|FzX=6DhI0H7Y*stS8g;2hAb#nRQRN0lK`wY=K4Tv=SrTEF zpYPts161UV?V1Q1f1$n)4~w_Te$nlzgqlC;T=1k^NPHFaf9ZNIEfoy$I%bNKBD5h0BLA65f45k^1UTgK{rJjJ+a^aVpcfuFGp{2k^+iVP6 z+lc8?sQ=*b>-=o-*-ZCW-B~)3$A8S!s@S?m8gp1FqTZ;qNh@mW2q>8ndepX-A9{Z} zw3G9#q2$q8behy-FJ0U1m`(vt3Ad$x%v3&guT}Qx9M;14td8z{k>!ctmyVA63ca>U zu_rPGf&9K<_FvLQiH1)k3z_46`4tStx}59(WzY|ewv|xMYey3(dl3HR$A47Vg0u{= zZ518P1~`eoXJnzC7nkBTDp5^v8n4GcvaedjuSV1Vh{ZT|1E5Nrz>sJ@B%;{{(Snc=wRLxC`C0aeWh1(yc__I!-#u5=0tDVP0Pp`2+q^Cy z@x1IVaM-2yxo{U7O*Y&TFQ%vhxZBN~{o27FR!g2oYVi`jZwhdjV~zuNS^&x<0te`W)J-9jPXz6C-oXX*Tg z3?b0lnf!QpOG4v7kVf}gcDrhBUWg~ToFH-_^z?G3+R@|yOF-$F^Kz1)1g0@pJb*!*zmA-@v&wuQcnm*LUehe0EAMbpun*mFMxv zhU?4@<_C{$mzN(qfcS)~n{KGR?uI*>Sry$=5pCYjwg%oOXsf_8;=Sh4$I6Z|dW=T^ z1+-j5dcgH-D1PU;_!F>ZX?LQIIhi3f;cyZ6i^G^#UV@7>&v;@~s_W`t%COeRUzs=f zg^w|JzZXZW!maLO?1}36{^LhX1#pPC$^ndRE-euZ(+%I@i6GkU8hDfir3?&02o&P- z=_CFgQc=er?s(+z*esi&db( zj*Zivu-(NyOV?+pDJog6eKr;*m)JB?hBDGc?CjdFsn|L$u&fobulE%i(#-Lv41s8dY9L)^|5&GjZh4*RoMi>-gkk;d4 zM70e?oer4T#N5No*_a)?uf!8PT51)?48e3LU5o5y2UDidP}IM7M$D002)K@C3T^jMOAfE(;fTnKWxhsFD)${T_(+H7X{LT*4ShA0z+Oe#1jPy19g{23Mw7XX+q}<;{wS|jgmCX4fMgsYHv+sgS#I%6HS7@ zv=26XkU?__`>N-Bc@Qei`i|Z9kJK^^fTrorzES&e{Y-l~RWqsO=+jHdUNmOdHj$}P zmtrSzI(D_3`;*M?%BL2wNKq}fYoDs{)QesBZ(F&;IimJz6NJ*bCB!RMjZJrqSD#K{ z_3mWARW&`s14X){cG0q>Ken<2u@XN)@71f8DUHj&QnSmgBtp5I@#ecd{SoLr;4!^Bi<2$w0lpk&KX zKA#$6#it79u<_my_x;(-EJn8DSvyIU9FDkOn|{xG(rft7c~uD|dQC$44#u&~S0>7X zd$D0qnlPC8oOuMmH*|)|9`JoTRst=1F8cyW&$CF`vJ^HSLxyGc2b1KB$0iq33Kg1M zoHC0;>zr}bHFM$8X*KU8lcKpCM|Nkm{lG3g5yH0*-1=jfgMHs5C)=4~o0jr@(@r*| zF|nD;uwv+_lSEfu-CFu4S<5J^{!>23)OS@_ac@lou8k7a%9SeDil!N=myMRlN%F%r zD*Cu1;14rI12?`Jw91RdQ>mXS65!=?=Bum}u2^tH8cVk$^zUFDpsn9cYJC(dpUZXZ!)#DzasIf~7@+1QFFpyn)PFCI>uxE<7`)_JLu zk-mLV3NsVB!FK6>gi)X}m)6)-luMG@+kU5Z@j6TuW|=Ss?pAImj{5hh=E_AwNvW@c z=rVNNTEe5M#LL0dxo-Z`hG=rhE!);;R*7#VGXQbFXL*0hs3nke(g>A(M~CY%iS}|z zu{y=Ql@}7!m>PFDT^1P25E@hfly!%Q?QIS`bM@~w(DE$EX8;NiX0jl>1hv}8JH#j5 zaLA zjb*Hzw@U^{QoW(^5kNs9zf)X?V6#Kn&iRX7r9KI&+L8-`D|S(O zoHU`F8m)BnF9V85g&RCcnBzs#MiWn z8aeoSb=bpXxIE;Qkjt$t!}l3^nz( zGsK~@p3eE*`;0UkgLJ!KcFHR&rzw>SlQvA{{++lc>++!CxWgyAw@_oe5QJt-{f0&r z?_#9roSichTRiA5@lL>l19s&ZBWl~jP4D`jh7A?kK&*nRY7>wTj$T<9_e@?jtX8Q| z%Sb8$aP|AZFTb4*MRu{a4T$Go`|L{{xxo)4Hi4lkX-+0m6{T{~xDblQE7qmL*&FxXVe~KZ<(ZMS$=$zXrENO{RFD#9nm^ zqlls%%ylyi(`)W#evdi1Bm1h)qXRRul2fWAB#yNwj}XpV9ex)Z7^R50i7pVs^u=pi z^K2C;B6EBvt}C>{SjT!kjlmcpe{HLS_mF5V4Y+j|uScEBUO)%J9c=M)I&ptWKBVB# zsq^%({d#UXBpF?wkJO5Z-zog0RJS65+Z+7b9`(nnbxBXF676wCHc=hBjvyhP7BCdb z(B4|~NqgB?+-uewRu)@vsKO+dW6_Stq64-kB6-oLI;QOy37PJlDUAo}u%bg=%%Iyd zi*P-Sh1Pl@+y)lmgOYX=@(c$ufh|1NGHAx}qwyBGr8rT|+UQ}F`bYlrmw&me%?lP&#t*X$5a{JqVg zRA$@p*8cQJO{+u=7$~RxPswqp;{6KJ@GGldg;tln9hTLv~&PqK$%5n*=AuyQYTBg%jt=wZ>;_cU|t>mzKc7il4!%D8?&tWR43MP1%L!F*v3*E~$l#Di< zWTUY@)fF`00A=T3C}Rb9#>;tMjIWQ2w1yYialH3oQ(RnR$GL@2p9Iv%b(V1Lmzu(pH{CL1QPN4I$K+?)U&bJ4e}WiwJLQoKYX3 z^UFlB(Sr4zjLouLG#$8U{8m5aE#iKGzHILrP@vh9)W`jm>6WC0%^2q6g8Jfw1c7Nz zuZdLrqu+A20{y1Vzt_Ag+B9JJJ9yKjfgJn4O9SikuXErB@^k5k_i~c!pc@YJj|(B~ EKY! - - - - - - + \ No newline at end of file diff --git a/website/images/logo-180x180.png b/website/images/logo-180x180.png index ee9bae1f61ef701514b88f892655ce6b6862a5c4..d89c259e27c8aa861b82f3f0902ef9c14ff7f93a 100644 GIT binary patch delta 365 zcmdnTx{Gy#G-Lf!c6MPN76G9k76t}}Z=NoWAr-gY-rOnFD4#ug-l2RvOvWp zJ70X+edglqw|dj8tLiPx7T&q|w*RhjbqVw2hm0ciu|HOBKe4!K;`?qPdDnj(7x070 zi{@{a#{PJ{h45_&F_NFD@AqN51g9q7o;%+vjye8Wsw3O~-HU+XpjzS@QIe8al4_M) zlnSI6j0}tnbPddP4a`CejjRlftqcvc4GgRd4ETz-*rI62%}>cptAuJW(={{*F|@EU bHn%b~hiJIelAjCIAPKS|I6rOj6ecwQW_ON< diff --git a/website/images/yandex.png b/website/images/yandex.png index 7b35a630afb7645b7d9ea5924143fe1ccaeb9fe9..ad40dcddfc7ea8f942e3d00a44ea97e51b81af2e 100644 GIT binary patch delta 4160 zcmZ`+c{Egi+@5S@%TCDF3?ti&EsQN^gphqoloHKYvge0EB|9V8vqZLx zkgQo}kZduC-uwRZ{`a1H&%Nh-?|IJse7~RPJkRr?UKtg~LrfX&=$Y$*Ks7H}|9Ucl zKp=5%l#MwE6e$G)J&Xr|j)A3z6c8vB4g#%vfIx^W5J(^}@1wOA@PWz8)W`tz?|-lV zTFcT15&}4mo5luuw&6d1t8b%BZuzB za~pkx8hg=XVsVibd1%PHLTeq>*zhPr_7D6#11Q1keTI<;^@|BSwndbho?as*dX1>5 zspls0rV4kQYVyb)TQ8Z+1c6XINhBujehOA8!zA?gTudGDgnNXVp z9pZiq`x{p<$=Xw%?fA&I0&?ivJ_zoR3Oq&GjmOs*&5 zXk`T{_LIJlwAfvS56$%V?`zw(pZdmU<7Qmb_ZzU@#HTAVvZ{QBKHlE(*|W`~U7DJo zQ5>dZH@C>VtY@S`Jv@6ho19b81KM0G(sIw6cpeta%^QDUTH}aMCfl?Y%gGn?go=T+ z!-*y2vexVfc{j{=*v9YkMky3RSRm+dE14kLOT5aG?Cj$5*rk!}l9f#7Q?YfNNJFIR zrE#TQV{F61J~{;SGKv25G@9wlz$jtia2}}P9+{1FI6nz)^@SX_*gs#2d(OGb`OPCnRM9ny|Ei2l3t zIZ)YuuH|Ub9p{h5Zmc~x1^tIU{*oaJwa8OHDu6&B`v(U~^Y6yjkB^V(&tIg-xwyD| z?(Y6JK8{uqDi$&+92v+!Znf`1q*ld%L?rLPB?Qq#W-#r|@BE!SN4hCTDY_er#W!S0pxNn$0Z80YynAO$QfPes#lGMlb_N-*` z*MOLxbV}Thf|mAqzvtLoA#nC4CM5JM_op=-i&-51YMi$)Hdeg*ib{}_lw^vkOH1Rq zNd9j-t@6zq94Bp>IgUc1bja6}NC*Vt(-+fB1-n`w{*NC&a@Lhu7OcK5I|t(9qkKZZ ztwvx2BRCmL4-k>=vy;1E0L>FN1#nl5%}YYQhTD!TRa=MEcDOgEMd+eTYV`l#4D zHpZjI$HmooJl%e@Dr`{K)+XxE9QjA4XkxG6rCFdt`s8~(SPl#~Dr=8Cb+TU(h7ljlVkEOy5o0AePDv=qo@_xiqkS^xFRN3PsfrdM@r_Qqf? z&SJ|I6Bc$YU}}20xuhhaNIJu0d~$MfZmtbGsX(h#MIqGHF9|xQh-$O^i}vLm?cb-4 z{X1F{o3XfYgPtsT@v{2Ea`S8@MiRfU@W4it^6t`0=>b~!Z1QvD^(NERgUzWMobj2N zllhtt9Qkr`a|wmd#1jFE|0J#r3&$INU_RJfcR9|x}7 zEVE$V98*{k$bQ;!-W!Rg8KRU3s2clx7~`>QfK$kfii+ymLXh_n#wI5EY$OtC5u>fGoy0F6Hy-!+ap?RtRaFgu zmm&yR*#WC%1E`OWG!@S&o_5a2sFh>nHKU3)ew$qkZzejt0WNB^f z{P85cRkt`bOE5mf)ReU-YfrG3 z5WI)IeH&8li$+s5f49(2w*)OQDF-4A4Lb{-CL}N?KKaCoC8lndWIk;8Go~vq_b`bS zt~4PXUyrM++greIt*`&6_M{I!5Q3QPn-O=K{zy6PvRi?9?fG!g%*|90R)nt0-f&dK|d5}yU)+Hk&XG}5sxy6O7L z{k36<9EfpgNr|A=3@193K~ywSRDGt9nJ!TM4H%bDZ!2SH@oQ;mY<5;VkeoQYR)8D! z=5ey)Vd%`Dvy$db^!4?XN>$O1R`evE0~s6Vi6F!m1phnO-*;_~)z;7edK208gne#q z?(FRBhM~~N`OWFZm#d-h1bl$MzhQ`$wswSH?8&djL>&jV%LM|yLpcd@QcjcgbSPz3 zEN;Rfj1<^mQc_DI@!tyHHpn`ktWwkkX=8d_&^t@*?l#CUDe>TWUKB`XpXoU;Yk6Vn z0y!}{aA^B;CjjQYC`nrtP9r#@$APateEH)RYgM(%UcmC3Llzj)Y zhNXb#bsIYdJ>DSV!)(shf&wvYvy&6fDI+6;xzfOcb)yD9*I>GUWUq1j`vY4gm?0vh zLjLeH@71eofn)&%S8*n(h=>TS$v`Sua-ta6)zuU_3H^mWRzE*0GVZU$vX#`>e*>)4 zCs=^c1k^H>N?lq~j^EqAAnE4fvazu6P^`gcfZzLF()T#WyA7~gxgGXw>9~S|mzKSN z%M1(-zJL3cy1Dtyj+dQ}`t_^TO+1pWrdO(kIj(PLXr<{1nM{7h16?#4D$p&!#tE1S zT~)|c{=D!i@9ZpA{j|u`wU%FqjgM-nbJWdCJlvX1q6`8aHvY~o;FAG_duMP=CWFP& zp$pA1(eRkrSeX&J{i`J!cNAPnoUkH<1qCTGqP5mu<>auuP7@zzPh?-kXxt3H zeRbsbs9!i0y1B>A5VXC}*$H-YcYo=MfBN(@91^p=pm)>2VjHI;(pSgVd#Ne*6yY~9 zIhk2X#p5}Nml>;GDe_Sl7rmCtV*i~UcHu*UgO!3Y6UTDi#!KV>t`I0qc%+w<0PGe6 zC_OV%crYRNdC09>udQQz-k=0{dBw{vva%M3i)BIMRnneb=Xaq0?CRzQB$#q#^{4~C zQiNrpmAa-T7-#C#4LiX>lRx9#+}x(a3y24PO>$>eB+f$g@nE)cV(yUKn|fz?CH1NA z-)GulqVA%WdxX*nf+9#Wy@R|A2=y1+SQVUx0m$)p%y0geF61 z(?lX9{6@}O)Y4SPc#CDvB^h#XQ}kG=dEZoaYW}%YoG&JrAadiqlhh+R-KzAU<^`uG zMbZQmL(~GG{t0|$h98@n1Ub&{@;C0}H#@-qzOEqoX6UQE}zb zv!k_wYC>hD0u&0R9hg{H_~yG~Fc>^G>De}8S{Z$H> zQTJ_Sxc6@jhQb5{Jn0glGt7*@8N53f8XRnCX-U64t|OGpG=I_sd1YoQ1i}OM!V6mr zrhEkqwwATps ze-0nEyLC$^blujm;O&_J(z%4Lt?h0oVS%x0w)p`V40ghe8;QlF4R=VA| zmbpj_g1ro_69UtgxY@1%u?onI+1LDqHpY<#2Jj$-1pS)W)5s57V;v(SiL!ZZcQtliz+6v|z9_xWUD zHBpUVVoOUN4q`kD|FhiBqXBu}c_$UJa{Q_8uC+`?Vy(^fXT|pACDQL|Yn2of6x7wf z&(G^hOH*h&OQE5mh|H`~G|fc1^Ir{{p|#Ch6?u7cIsOxX21iH!KtWfpzOAmVZf?#U z&j$#yZAszUz6}412Q2E}xgqW_JAGBq4`3jd#Do|+hTQfH@j-Y8`v5PH5?o1995>u;{OLIL)9z*0La+T!k|*$Bkq3y$(rxE delta 4445 zcmV-j5u)zVAn78IB$09ye-Rr=L_t(|+U;F?R8-X(|IQ4EJXDm#5Cf6-Aev~htW=;& zLN0|6L|v?FzW9S~J`i72H^mZ3Yem`ZF36Rosa;{Ulu8S<1Q9{RFb3s{pnxc%BEv8* zm^t^4qr*OXpL6Dn2uX?QehImZE7Io-7g*e__Okhp!C=7%~L@ z^)ImE&xXwtF0Z&4{_`J}YTxf0Or;h2iHn?zsB}q>1o5`(s zdYHa*2Tq@6NoLIgCnswwTgEzl_bxnm02I|HlS;wE10-g40=*u}%b~NgPvYeT0Rc~6 zlWDcurluyzqxPX`f7->xW#q__mR@hVriKP!{kgc%4Gpxp&}!+qb6Mp?M$#%3{nX*o zrAt^F?%TJ|@~bU@2@_z>99G5e-GjP1v%5@73;g&aOEP~x*xNsa3m!4s+1lFv71rsY zLt*7gj?I!rV_r%5`F*dMwICtEx;k1C(-Z{(0T39-5@%+De@bOuNgFpJO9BmHVK8Ql zwS6tj_V)LOpdj1?!>njwkjk4kTN~H%n6|ZrxHvEvc*m$o6c*x=sZ*_uYmxius3?}0 zqM*JW8XCmfVigrE$v^*T-9}p~(<4X1TW_%zS^(I;UsM^bt*kL^V*@j0z|f)A#@q59lH0)vKl_@ZMfHbciMK z^#xyFYs*?N({^_F4!E%q&Yy?gUZDc&>fro&)=ZF;WNlW{qaZ8YK00o ze7J92XAm1}ZC1->8UO+UAT$(by1t(G+Zqk*+zC=?f1l*N_pELDS76%C4#L84l`p^K z{r1U|004S@pJc%TYn%QRm;4Yt>*srsZ*@CvuA^chqX;D6GR-DiwmTs z!S3BG;gKWo_rK$sEiHhZ-^Is+qa)|f==FN7Rx6jw3knKOojO&DY|K(pQo_Q*X3d)A z;^Jase2%G_&3Sown;$u1y}Z1ZFJHcB(IPJ| zFDcr0O~GI=sMYGq%F6uw{Leo7j1kV8H!nImdfvQwJv}{-*Vf(L-Ip$1I(YElkt0Xi z+S&lX&(Ckkk|oQQEgLgtjGdhw=eKk^T~$>TfBK_#c6O5|Pv+BCC=@j{HTp*rEltzo z#*Opx@&b;NO!CA!F_A;<+&Pwxt1Dev%6T$cQ&W?ZlS3i)IrAEo06=DDW^;41zzM5f zuYddPx3LGib?X*Q)0LH#X=!O#`9@@CXLojXiaf_vsZ{y-`9VQJXldw4<(qH5iPhoT ze{a7vS!_i`#X=;c4y)nn)vFtLVgl4^^`=dmuwSgMuIBrp--uAY%F0Tbrh)TW>A(Gr z#bR(UEtj)&dV1)OKkieYD0=2h`u=^aq|VOHZQHg52M3Gn_2b8nudS^WV0!D;t*E|- zhevyRdwzcYbI&~|T=LqrYsJvq+uNI%f0$^po)}LinLc^)T4EY96NbdiwBTd{`en%FwUF(Q!wKv`Lt$=1h`ElMrUQy1H8Af3pL? zojZ5P5?WhZUwrXJRQc6cUuF2iaG&sL?%%(k`3U%GM|*p-i2TPtSfN72#Se$F@n5d_ zq_MGa)~s0%ErLgD@Z`ypQ&UsduU{V#5y3e`a&vPzJ1ogG1|J`vb?erpq@*x!u~3kh zm>6N2*RNj>NJxqCMSOgGT3T8He}7jjaV-_u1L>9GS!v@Z0fB*gW zG0pn=dQ$E9@#C1C-NznYPe(bUwW zR4UbKwOXxKsZ_PKwdB(-Sg_zeu@DF_&5VY^!a{{Y!IZ4gXm;-0iS{Ape?@zHJ0B%Q zMMY%E%a$#>dGn@9rP63L-QC@7ZEfl4>1eMt$@H2vYp|*1lTSWDAD@{;jPUpOzjEaY zS2q}yq)~t|Hiq@$uV}+zM;(g@rTU4`|i5{kd%~k z`SN8>g_f2UY-$348#iuiW;!=EftQt)VZEWLsYwLWt*tnn z6DBf3k5M5i=hW z62hs;P=XFef6k4WL?Q_a3K}XW8Y(Iv zBLkg!z}+2>AbuiFS{s|ep}C`@qo$??6TbA)OH}{*E;HaT3D)%3!=inQ**K7xI(2G5 zKmh95Eh#C{>2x}s?mhM?;@r7&O`UMCdl3W$ftMHSf0(kd5nZw%Apy*BDO(!4ySvYy zKaWaYdF2&>6+t|X965qX1l(lN(a|EkE2~zmLJftjTetS~^!)VGPnc$Sc(^%C1Awrw`|#Blx*C%5nX_!QfVj_P9lnF zAVR@Wm~-EOMx$wLY}~bL7iy7G6!qjUS}_OYf4mfU zDzKMZxNsrLG-hXHWn~TJ3c(h^w3{0wB>{Pm;Oq?3r;B{@va+(QtgNF)j|y*u1|LmL zO_=1FXP)VQvCYlRm?Skdb=R(4oX<;96sdpv_HE3yiFy%Mu3Y&gS1gdLt7}9=1ZVE2 zf4FIL%N7o%Q&K=&HtDu)+p@E>J32ZnaGloabYe!Bxgf`GXJ=<8dX0bx1A^}(dwcts zm>5o`BO)RKcy1>U$Ml2=;Nydb4u*w^Nz};7%G$bhtI>I4oUVg|gWcWTot>Q>92}%l zX>M*VSRoqT-rkcYO(JvlU0hu3d2-gpe=#kQKzux)o4nv)ARK$-%zWm|na!IwQxs)* zl-@_v^pYh@l9G}Q<*)hAAzK)Z8vboAP_YlTlc1{p&KCR31f z^{1z&qeu1q^!B{-&O3ac78DdjMn;kjcFDjsB6DWP(9CqP^z?L#q8R{;Au&%D)22-m zZ(*4W?D!IMz2)TO+`fIAqw@`1eF z2dw<>zWZ+Qm^PlK8IfR-7u=8pa1ID_L2lK^ID*H+FTebPzFo0mMMy}<+O=zq7JyVL z-Lz?winyj`pqRdM=ME;pPSGs{Zfzf7Jy+1J;X*&1%~(Rp;Bm?m93OpL0f!O_ui(xge4un~>6cw8(X5{yeqO8V2?8yXsp z9Xp0ve5jEtlgR=D15pDrGcyzQ>JJpt%tD23UGnquMS6$^6T^oOe<#z%4j(=&P*ssn zLAoRh3k%6DiFjmZXQM{XrcIkr7q`8=eL_M4TDPlLuU@=(alo0z)<0&5fBp5>oXTYS zmUIv!mY#q9d0=OIOto5VX=y2`)6&wickf=* z#KYo50bt&|d6*ff*CVrkjWXPrKMA*OyQjW z#pGJvzI~go0@#LxqNu#QJWfr9Y!t!4!HtcLWCfTc1d+Lxm6erbl%PP?XGuv(Y-}vU zH}+8m8mI{n6%~a>?HiGtoQ(b4AAkH|Du<)rOmogu@$vC>e|2?ArBbO>Ha9na`st^n z4%3c<$uwz0P!u(3(xj@YDuqJP)zzg`D(ma(`4Vmg^0JmMUk*fim`oAQFD#>B+%l`NCVMvop%%2%ygg=GgiA-8k- z`ufVTboNK+?(RlCR1%3KGBT3MI~>4WmQ*TDNl6I_2_c)Nwzd|v9eR3tKKkgR?c29w z2E|~SX35FPu{`%{p##O%*4AJ!R6F#Z^f7%`%xq-4&VIf0_{8Qk67j~_oS=Bfn-OO`CTe*L;}u2v`% zXw*JUf78>aPv<*ql}e={AtAo(J04G-JgL*^#LeCs4ZUIoo?pq6B}<~Nu5L5Wb>Gvc zPcJGeG8ha7g8}mwlK;7dUau#uyw=uMzTYK%zb3D#=;-J;aNq!$C&pmIh7C-zWLH-g zI&}8#-D|QDIy*a$9Xl2!5NHdqapT5Qr%qwHf3jpEs=$K9r2La-rmd~5;-X$?8dNI4 zOn(Ol5FMwj)9KpV+kgJ~=bJZg-nenY#>U3q-#;QE!pFzQ!NGy)zdb!YsKeRT)|L#3 z>hA8wb|8+9j(l4(jYfk71P&Q8#AN7!!C=s8we|J&XU?3dtE*F~R6~aj_4D(Kii+~^ zQ}AF?$e0E-)`YXFoKUOPZEbDk<>gndTw!AOM~xa45D?(!=NA+dWN&XzMi3~KN_4iP jY1(8sI{6hxN5}sGw1xzT^6*D300000NkvXXu0mjf6Eo64 From 9d0c2f83f898172ae06ac2003b036e0c4ed85f48 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 7 Jul 2021 22:14:30 +0300 Subject: [PATCH 857/931] ClickHouse dictionary source secure setting added documentation --- .../external-dictionaries/external-dicts-dict-sources.md | 5 ++++- src/Dictionaries/ClickHouseDictionarySource.cpp | 6 +++--- src/Dictionaries/ClickHouseDictionarySource.h | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index a841bf3bc80..8022221843b 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -129,7 +129,7 @@ That dictionary source can be configured only via XML configuration. Creating di ## Executable Pool {#dicts-external_dicts_dict_sources-executable_pool} -Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct` layouts. +Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](external-dicts-dict-layout.md#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, `complex_key_direct` layouts. Executable pool will spawn pool of processes with specified command and keep them running until they exit. The program should read data from STDIN while it is available and output result to STDOUT, and it can wait for next block of data on STDIN. ClickHouse will not close STDIN after processing a block of data but will pipe another chunk of data when needed. The executable script should be ready for this way of data processing — it should poll STDIN and flush data to STDOUT early. @@ -581,6 +581,7 @@ Example of settings: default ids
id=10 + 1 ``` @@ -596,6 +597,7 @@ SOURCE(CLICKHOUSE( db 'default' table 'ids' where 'id=10' + secure 1 )) ``` @@ -609,6 +611,7 @@ Setting fields: - `table` – Name of the table. - `where` – The selection criteria. May be omitted. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `secure` - Use ssl for connection. ### Mongodb {#dicts-external_dicts_dict_sources-mongodb} diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index fb8660b27ed..42ec73ee520 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -224,9 +224,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) ClickHouseDictionarySource::Configuration configuration { - .secure = config.getBool(settings_config_prefix + ".secure", false), .host = host, - .port = port, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), .db = config.getString(settings_config_prefix + ".db", default_database), @@ -235,7 +233,9 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .invalidate_query = config.getString(settings_config_prefix + ".invalidate_query", ""), .update_field = config.getString(settings_config_prefix + ".update_field", ""), .update_lag = config.getUInt64(settings_config_prefix + ".update_lag", 1), - .is_local = isLocalAddress({host, port}, default_port) + .port = port, + .is_local = isLocalAddress({host, port}, default_port), + .secure = config.getBool(settings_config_prefix + ".secure", false) }; /// We should set user info even for the case when the dictionary is loaded in-process (without TCP communication). diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index e7c6d4aa8d2..fe37610b9c4 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -20,9 +20,7 @@ class ClickHouseDictionarySource final : public IDictionarySource public: struct Configuration { - const bool secure; const std::string host; - const UInt16 port; const std::string user; const std::string password; const std::string db; @@ -31,7 +29,9 @@ public: const std::string invalidate_query; const std::string update_field; const UInt64 update_lag; + const UInt16 port; const bool is_local; + const bool secure; }; ClickHouseDictionarySource( From 20cbca87de1e4068113205c3641ceb90178009db Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Jul 2021 00:48:15 +0300 Subject: [PATCH 858/931] Fix 01791_dist_INSERT_block_structure_mismatch flakiness Add SYSTEM STOP DISTRIBUTED SENDS to force messages from SYSTEM FLUSH DISTRIBUTED query context. --- .../01791_dist_INSERT_block_structure_mismatch.reference | 2 ++ .../0_stateless/01791_dist_INSERT_block_structure_mismatch.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.reference b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.reference index 3bba1ac23c0..9f376fb3e4f 100644 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.reference +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.reference @@ -1,5 +1,7 @@ DistributedBlockOutputStream: Structure does not match (remote: n Int8 Int8(size = 0), local: n UInt64 UInt64(size = 1)), implicit conversion will be done. DistributedBlockOutputStream: Structure does not match (remote: n Int8 Int8(size = 0), local: n UInt64 UInt64(size = 1)), implicit conversion will be done. + default.dist_01683.DirectoryMonitor: Structure does not match (remote: n Int8 Int8(size = 0), local: n UInt64 UInt64(size = 0)), implicit conversion will be done + default.dist_01683.DirectoryMonitor: Structure does not match (remote: n Int8 Int8(size = 0), local: n UInt64 UInt64(size = 0)), implicit conversion will be done 1 1 2 diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh index e989696da03..1a96aad3f13 100755 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh @@ -18,6 +18,8 @@ $CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -nm -q " INSERT INTO dist_01683 VALUES (1),(2); SET insert_distributed_sync=0; + -- force log messages from the 'SYSTEM FLUSH DISTRIBUTED' context + SYSTEM STOP DISTRIBUTED SENDS dist_01683; INSERT INTO dist_01683 VALUES (1),(2); SYSTEM FLUSH DISTRIBUTED dist_01683; From 201bdc4ff59a76de4ddf9c154ba17233d6e6da86 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Wed, 7 Jul 2021 21:40:01 -0400 Subject: [PATCH 859/931] Disabling TestFlows LDAP module due to test fails. --- tests/testflows/regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index c2e143a4b1c..8932e6bcf8f 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -23,7 +23,7 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): with Pool(8) as pool: try: run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args) - run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) + #run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args) run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args) run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args) run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args) From 40a2fc8a18da3ffed18052952f819bb095f745d5 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Wed, 7 Jul 2021 21:45:29 -0400 Subject: [PATCH 860/931] Trying to fix collection of clickhouser server logs for TestFlows check. --- docker/test/testflows/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index d39ec12fb82..9fa028fedca 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -73,4 +73,4 @@ RUN set -x \ VOLUME /var/lib/docker EXPOSE 2375 ENTRYPOINT ["dockerd-entrypoint.sh"] -CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n0 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] +CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"] From aa84d6a91379f058e754bc6e4761783b6f6a163b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 05:20:24 +0300 Subject: [PATCH 861/931] Render pipelines in Play UI --- programs/server/play.html | 78 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index 066cd09d16a..35254a29a8b 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -283,6 +283,23 @@ color: var(--link-color); text-decoration: none; } + + text + { + font-size: 14px; + fill: var(--text-color); + } + + .node rect + { + fill: var(--element-background-color); + filter: drop-shadow(.2rem .2rem .2rem var(--shadow-color)); + } + + .edgePath path + { + stroke: var(--text-color); + } @@ -305,6 +322,7 @@

     
+

@@ -447,6 +465,11 @@ table.removeChild(table.lastChild); } + let graph = document.getElementById('graph'); + while (graph.firstChild) { + graph.removeChild(graph.lastChild); + } + document.getElementById('data-unparsed').innerText = ''; document.getElementById('data-unparsed').style.display = 'none'; @@ -461,12 +484,21 @@ function renderResult(response) { - //console.log(response); clear(); let stats = document.getElementById('stats'); stats.innerText = 'Elapsed: ' + response.statistics.elapsed.toFixed(3) + " sec, read " + response.statistics.rows_read + " rows."; + /// We can also render graphs if user performed EXPLAIN PIPELINE graph=1. + if (response.data.length > 3 && response.data[0][0] === "digraph" && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) { + renderGraph(response); + } else { + renderTable(response); + } + } + + function renderTable(response) + { let thead = document.createElement('thead'); for (let idx in response.meta) { let th = document.createElement('th'); @@ -559,6 +591,50 @@ document.getElementById('error').style.display = 'block'; } + /// Huge JS libraries should be loaded only if needed. + function loadJS(src) { + return new Promise((resolve, reject) => { + const script = document.createElement('script'); + script.src = src; + script.addEventListener('load', function() { resolve(true); }); + document.head.appendChild(script); + }); + } + + let load_dagre_promise; + function loadDagre() { + if (load_dagre_promise) { return load_dagre_promise; } + + load_dagre_promise = Promise.all([ + loadJS('https://dagrejs.github.io/project/dagre/latest/dagre.min.js'), + loadJS('https://dagrejs.github.io/project/graphlib-dot/latest/graphlib-dot.min.js'), + loadJS('https://dagrejs.github.io/project/dagre-d3/latest/dagre-d3.min.js'), + loadJS('https://cdn.jsdelivr.net/npm/d3@7'), + ]); + + return load_dagre_promise; + } + + async function renderGraph(response) + { + await loadDagre(); + + /// https://github.com/dagrejs/dagre-d3/issues/131 + const dot = response.data.reduce((acc, row) => acc + '\n' + row[0].replace(/shape\s*=\s*box/g, 'shape=rect')); + + let graph = graphlibDot.read(dot); + graph.graph().rankdir = 'TB'; + + let render = new dagreD3.render(); + + render(d3.select("#graph"), graph); + + let svg = document.getElementById('graph'); + + svg.style.width = graph.graph().width; + svg.style.height = graph.graph().height; + } + function setColorTheme(theme) { window.localStorage.setItem('theme', theme); From e841fae85267228a9f60bbfbd4030883d81c9948 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 05:27:40 +0300 Subject: [PATCH 862/931] Improvement --- programs/server/play.html | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index 35254a29a8b..98770be27eb 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -469,6 +469,7 @@ while (graph.firstChild) { graph.removeChild(graph.lastChild); } + graph.style.display = 'none'; document.getElementById('data-unparsed').innerText = ''; document.getElementById('data-unparsed').style.display = 'none'; @@ -627,9 +628,10 @@ let render = new dagreD3.render(); - render(d3.select("#graph"), graph); - let svg = document.getElementById('graph'); + svg.style.display = 'block'; + + render(d3.select("#graph"), graph); svg.style.width = graph.graph().width; svg.style.height = graph.graph().height; From 15b75be59b0a279cea63097cd841ec8758d84692 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 05:29:21 +0300 Subject: [PATCH 863/931] Add comment --- programs/server/play.html | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/server/play.html b/programs/server/play.html index 98770be27eb..da763ec2a0e 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -284,6 +284,7 @@ text-decoration: none; } + /* This is for graph in svg */ text { font-size: 14px; From 0e4257721ce2eba399898a8b453b2ecc60e88a1a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 05:50:18 +0300 Subject: [PATCH 864/931] Maybe better dependencies tracking in CMake --- cmake/embed_binary.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/embed_binary.cmake b/cmake/embed_binary.cmake index d15962c05d4..e5428c24939 100644 --- a/cmake/embed_binary.cmake +++ b/cmake/embed_binary.cmake @@ -53,5 +53,6 @@ macro(clickhouse_embed_binaries) set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}") target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}") + set_target_properties("${EMBED_TARGET}" PROPERTIES OBJECT_DEPENDS "${RESOURCE_FILE}") endforeach() endmacro() From 15dbb5a07a04da12a2f2f67fdfcdf2f197b98a0f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 06:04:35 +0300 Subject: [PATCH 865/931] Final touch --- programs/server/play.html | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/programs/server/play.html b/programs/server/play.html index da763ec2a0e..c3e8708f20b 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -301,6 +301,11 @@ { stroke: var(--text-color); } + + marker + { + fill: var(--text-color); + } From ee1b3696a251b3bf24086daedf7decad5391dc8e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Jul 2021 06:15:30 +0300 Subject: [PATCH 866/931] Fix error in AsynchronousMetrics --- src/Interpreters/AsynchronousMetrics.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 5c49adf6fe7..aca92b8866d 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -716,9 +716,9 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti { ProcStatValuesOther delta_values = current_other_values - proc_stat_values_other; - new_values["OSInterrupts"] = delta_values.interrupts * multiplier; - new_values["OSContextSwitches"] = delta_values.context_switches * multiplier; - new_values["OSProcessesCreated"] = delta_values.processes_created * multiplier; + new_values["OSInterrupts"] = delta_values.interrupts; + new_values["OSContextSwitches"] = delta_values.context_switches; + new_values["OSProcessesCreated"] = delta_values.processes_created; /// Also write values normalized to 0..1 by diving to the number of CPUs. /// These values are good to be averaged across the cluster of non-uniform servers. From 39de7f8a2a18b685e89d0a276a82ede735410c91 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jul 2021 11:16:57 +0300 Subject: [PATCH 867/931] Fix logical error with signed and unsinged offset in WindowFrame::checkValid --- src/Interpreters/WindowDescription.cpp | 4 ++-- tests/queries/0_stateless/01571_window_functions.reference | 2 ++ tests/queries/0_stateless/01571_window_functions.sql | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 46e1eb12dc5..4de15af820f 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -160,7 +160,7 @@ void WindowFrame::checkValid() const bool begin_less_equal_end; if (begin_preceding && end_preceding) { - begin_less_equal_end = begin_offset >= end_offset; + begin_less_equal_end = begin_offset.get() >= end_offset.get(); } else if (begin_preceding && !end_preceding) { @@ -172,7 +172,7 @@ void WindowFrame::checkValid() const } else /* if (!begin_preceding && !end_preceding) */ { - begin_less_equal_end = begin_offset <= end_offset; + begin_less_equal_end = begin_offset.get() <= end_offset.get(); } if (!begin_less_equal_end) diff --git a/tests/queries/0_stateless/01571_window_functions.reference b/tests/queries/0_stateless/01571_window_functions.reference index 47a7c062b0b..bbac8e5ac6d 100644 --- a/tests/queries/0_stateless/01571_window_functions.reference +++ b/tests/queries/0_stateless/01571_window_functions.reference @@ -13,3 +13,5 @@ select count() over (rows between 1 + 1 preceding and 1 + 1 following) from numb 5 4 3 +-- signed and unsigned in offset do not cause logical error +select count() over (rows between 2 following and 1 + -1 following) FROM numbers(10); -- { serverError 36 } diff --git a/tests/queries/0_stateless/01571_window_functions.sql b/tests/queries/0_stateless/01571_window_functions.sql index 614b98670b2..c6479044b59 100644 --- a/tests/queries/0_stateless/01571_window_functions.sql +++ b/tests/queries/0_stateless/01571_window_functions.sql @@ -4,3 +4,6 @@ set allow_experimental_window_functions = 1; -- expressions in window frame select count() over (rows between 1 + 1 preceding and 1 + 1 following) from numbers(10); + +-- signed and unsigned in offset do not cause logical error +select count() over (rows between 2 following and 1 + -1 following) FROM numbers(10); -- { serverError 36 } From 71d0682a2b32621d5c317072c2b1c96230900fec Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jul 2021 12:24:08 +0300 Subject: [PATCH 868/931] Use FieldVisitor to compare offsets in WindowFrame::checkValid --- src/Common/FieldVisitorsAccurateComparison.h | 12 ++++++++++++ src/Interpreters/WindowDescription.cpp | 6 ++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Common/FieldVisitorsAccurateComparison.h b/src/Common/FieldVisitorsAccurateComparison.h index 0f605b7da23..ba3fabd1535 100644 --- a/src/Common/FieldVisitorsAccurateComparison.h +++ b/src/Common/FieldVisitorsAccurateComparison.h @@ -117,4 +117,16 @@ public: } }; + +class FieldVisitorAccurateLessOrEqual : public StaticVisitor +{ +public: + template + bool operator()(const T & l, const U & r) const + { + auto less_cmp = FieldVisitorAccurateLess(); + return !less_cmp(r, l); + } +}; + } diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 4de15af820f..923e10ed31b 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -160,7 +161,8 @@ void WindowFrame::checkValid() const bool begin_less_equal_end; if (begin_preceding && end_preceding) { - begin_less_equal_end = begin_offset.get() >= end_offset.get(); + /// we can't compare Fields using operator<= if fields have different types + begin_less_equal_end = applyVisitor(FieldVisitorAccurateLessOrEqual(), end_offset, begin_offset); } else if (begin_preceding && !end_preceding) { @@ -172,7 +174,7 @@ void WindowFrame::checkValid() const } else /* if (!begin_preceding && !end_preceding) */ { - begin_less_equal_end = begin_offset.get() <= end_offset.get(); + begin_less_equal_end = applyVisitor(FieldVisitorAccurateLessOrEqual(), begin_offset, end_offset); } if (!begin_less_equal_end) From 2304f6a31b77ea3123217f50f5cc6138f61054fd Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jul 2021 12:24:37 +0300 Subject: [PATCH 869/931] Remove dots from exception message in WindowDescription.cpp --- src/Interpreters/WindowDescription.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 923e10ed31b..32129072972 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -100,7 +100,7 @@ void WindowFrame::checkValid() const && begin_offset.get() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Frame start offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given.", + "Frame start offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", toString(type), applyVisitor(FieldVisitorToString(), begin_offset), Field::Types::toString(begin_offset.getType())); @@ -113,7 +113,7 @@ void WindowFrame::checkValid() const && end_offset.get() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Frame end offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given.", + "Frame end offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", toString(type), applyVisitor(FieldVisitorToString(), end_offset), Field::Types::toString(end_offset.getType())); From d4256a8583bd7c1f08a35c868c4455311340c500 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 8 Jul 2021 13:49:13 +0300 Subject: [PATCH 870/931] Minor style changes in JoinedTables --- src/Interpreters/IdentifierSemantic.cpp | 9 +++++++-- src/Interpreters/InterpreterSelectQuery.cpp | 6 +++--- src/Interpreters/JoinedTables.cpp | 18 ++++++++++++++---- src/Interpreters/JoinedTables.h | 10 ++++------ src/Interpreters/getTableExpressions.cpp | 5 ++--- src/Interpreters/getTableExpressions.h | 4 +--- 6 files changed, 31 insertions(+), 21 deletions(-) diff --git a/src/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp index 0198a92f78b..098bf033399 100644 --- a/src/Interpreters/IdentifierSemantic.cpp +++ b/src/Interpreters/IdentifierSemantic.cpp @@ -1,6 +1,8 @@ +#include + #include -#include +#include #include #include @@ -280,7 +282,10 @@ IdentifierMembershipCollector::IdentifierMembershipCollector(const ASTSelectQuer QueryAliasesNoSubqueriesVisitor(aliases).visit(with); QueryAliasesNoSubqueriesVisitor(aliases).visit(select.select()); - tables = getDatabaseAndTablesWithColumns(getTableExpressions(select), context); + const auto & settings = context->getSettingsRef(); + tables = getDatabaseAndTablesWithColumns(getTableExpressions(select), context, + settings.asterisk_include_alias_columns, + settings.asterisk_include_materialized_columns); } std::optional IdentifierMembershipCollector::getIdentsMembership(ASTPtr ast) const diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index f3857b138da..d820cbbae45 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -311,7 +311,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( ApplyWithSubqueryVisitor().visit(query_ptr); } - JoinedTables joined_tables(getSubqueryContext(context), getSelectQuery()); + JoinedTables joined_tables(getSubqueryContext(context), getSelectQuery(), options.with_all_cols); bool got_storage_from_query = false; if (!has_input && !storage) @@ -328,7 +328,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( metadata_snapshot = storage->getInMemoryMetadataPtr(); } - if (has_input || !joined_tables.resolveTables(options.with_all_cols)) + if (has_input || !joined_tables.resolveTables()) joined_tables.makeFakeTable(storage, metadata_snapshot, source_header); /// Rewrite JOINs @@ -337,7 +337,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( rewriteMultipleJoins(query_ptr, joined_tables.tablesWithColumns(), context->getCurrentDatabase(), context->getSettingsRef()); joined_tables.reset(getSelectQuery()); - joined_tables.resolveTables(options.with_all_cols); + joined_tables.resolveTables(); if (storage && joined_tables.isLeftTableSubquery()) { diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 5e9a285c5db..099fb5c2f44 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -161,9 +161,10 @@ using RenameQualifiedIdentifiersVisitor = InDepthNodeVisitorgetSettingsRef(); + bool include_alias_cols = include_all_columns || settings.asterisk_include_alias_columns; + bool include_materialized_cols = include_all_columns || settings.asterisk_include_materialized_columns; + tables_with_columns = getDatabaseAndTablesWithColumns(table_expressions, context, include_alias_cols, include_materialized_cols); if (tables_with_columns.size() != table_expressions.size()) throw Exception("Unexpected tables count", ErrorCodes::LOGICAL_ERROR); - const auto & settings = context->getSettingsRef(); if (settings.joined_subquery_requires_alias && tables_with_columns.size() > 1) { for (size_t i = 0; i < tables_with_columns.size(); ++i) @@ -312,4 +315,11 @@ std::shared_ptr JoinedTables::makeTableJoin(const ASTSelectQuery & se return table_join; } +void JoinedTables::reset(const ASTSelectQuery & select_query) +{ + table_expressions = getTableExpressions(select_query); + left_table_expression = extractTableExpression(select_query, 0); + left_db_and_table = getDatabaseAndTable(select_query, 0); +} + } diff --git a/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h index 52581c19999..9d01c081e9f 100644 --- a/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -22,15 +22,12 @@ using StorageMetadataPtr = std::shared_ptr; class JoinedTables { public: - JoinedTables(ContextPtr context, const ASTSelectQuery & select_query); + JoinedTables(ContextPtr context, const ASTSelectQuery & select_query, bool include_all_columns_ = false); - void reset(const ASTSelectQuery & select_query) - { - *this = JoinedTables(Context::createCopy(context), select_query); - } + void reset(const ASTSelectQuery & select_query); StoragePtr getLeftTableStorage(); - bool resolveTables(bool include_all_columns); + bool resolveTables(); /// Make fake tables_with_columns[0] in case we have predefined input in InterpreterSelectQuery void makeFakeTable(StoragePtr storage, const StorageMetadataPtr & metadata_snapshot, const Block & source_header); @@ -50,6 +47,7 @@ private: ContextPtr context; std::vector table_expressions; TablesWithColumns tables_with_columns; + const bool include_all_columns; /// Legacy (duplicated left table values) ASTPtr left_table_expression; diff --git a/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp index 2d9391f4673..d82c7fc1332 100644 --- a/src/Interpreters/getTableExpressions.cpp +++ b/src/Interpreters/getTableExpressions.cpp @@ -116,13 +116,12 @@ static NamesAndTypesList getColumnsFromTableExpression( TablesWithColumns getDatabaseAndTablesWithColumns( const ASTTableExprConstPtrs & table_expressions, ContextPtr context, - bool include_all) + bool include_alias_cols, + bool include_materialized_cols) { TablesWithColumns tables_with_columns; String current_database = context->getCurrentDatabase(); - bool include_alias_cols = include_all || context->getSettingsRef().asterisk_include_alias_columns; - bool include_materialized_cols = include_all || context->getSettingsRef().asterisk_include_materialized_columns; for (const ASTTableExpression * table_expression : table_expressions) { diff --git a/src/Interpreters/getTableExpressions.h b/src/Interpreters/getTableExpressions.h index 6a999729a2f..c4ca01ee3c3 100644 --- a/src/Interpreters/getTableExpressions.h +++ b/src/Interpreters/getTableExpressions.h @@ -21,8 +21,6 @@ const ASTTableExpression * getTableExpression(const ASTSelectQuery & select, siz ASTPtr extractTableExpression(const ASTSelectQuery & select, size_t table_number); TablesWithColumns getDatabaseAndTablesWithColumns( - const ASTTableExprConstPtrs & table_expressions, - ContextPtr context, - bool include_all = false); + const ASTTableExprConstPtrs & table_expressions, ContextPtr context, bool include_alias_cols, bool include_materialized_cols); } From 9f1ffa777f50e8bf31fd99d4ed12a049776c80f0 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Jul 2021 15:06:33 +0300 Subject: [PATCH 871/931] remove unused code --- src/Storages/MergeTree/MergeTreeData.cpp | 27 ++----- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 8 -- .../StorageFromBasePartsOfProjection.h | 75 ------------------- src/Storages/SelectQueryInfo.h | 5 -- 4 files changed, 6 insertions(+), 109 deletions(-) delete mode 100644 src/Storages/MergeTree/StorageFromBasePartsOfProjection.h diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ae3d2220936..f311d58b7af 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3977,13 +3977,11 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( candidate.where_column_name = analysis_result.where_column_name; candidate.remove_where_filter = analysis_result.remove_where_filter; candidate.before_where = analysis_result.before_where->clone(); - // std::cerr << fmt::format("before_where_actions = \n{}", candidate.before_where->dumpDAG()) << std::endl; + required_columns = candidate.before_where->foldActionsByProjection( required_columns, projection.sample_block_for_keys, candidate.where_column_name); - // std::cerr << fmt::format("before_where_actions = \n{}", candidate.before_where->dumpDAG()) << std::endl; - // std::cerr << fmt::format("where_required_columns = \n{}", fmt::join(required_columns, ", ")) << std::endl; if (required_columns.empty()) return false; @@ -3999,12 +3997,11 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( // required_columns should not contain columns generated by prewhere for (const auto & column : prewhere_actions->getResultColumns()) required_columns.erase(column.name); - // std::cerr << fmt::format("prewhere_actions = \n{}", prewhere_actions->dumpDAG()) << std::endl; + // Prewhere_action should not add missing keys. prewhere_required_columns = prewhere_actions->foldActionsByProjection( prewhere_required_columns, projection.sample_block_for_keys, candidate.prewhere_info->prewhere_column_name, false); - // std::cerr << fmt::format("prewhere_actions = \n{}", prewhere_actions->dumpDAG()) << std::endl; - // std::cerr << fmt::format("prewhere_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; + if (prewhere_required_columns.empty()) return false; candidate.prewhere_info->prewhere_actions = prewhere_actions; @@ -4014,7 +4011,7 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( auto row_level_filter_actions = candidate.prewhere_info->row_level_filter->clone(); prewhere_required_columns = row_level_filter_actions->foldActionsByProjection( prewhere_required_columns, projection.sample_block_for_keys, candidate.prewhere_info->row_level_column_name, false); - // std::cerr << fmt::format("row_level_filter_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; + if (prewhere_required_columns.empty()) return false; candidate.prewhere_info->row_level_filter = row_level_filter_actions; @@ -4023,11 +4020,9 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( if (candidate.prewhere_info->alias_actions) { auto alias_actions = candidate.prewhere_info->alias_actions->clone(); - // std::cerr << fmt::format("alias_actions = \n{}", alias_actions->dumpDAG()) << std::endl; prewhere_required_columns = alias_actions->foldActionsByProjection(prewhere_required_columns, projection.sample_block_for_keys, {}, false); - // std::cerr << fmt::format("alias_actions = \n{}", alias_actions->dumpDAG()) << std::endl; - // std::cerr << fmt::format("alias_required_columns = \n{}", fmt::join(prewhere_required_columns, ", ")) << std::endl; + if (prewhere_required_columns.empty()) return false; candidate.prewhere_info->alias_actions = alias_actions; @@ -4055,7 +4050,6 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( if (projection.type == ProjectionDescription::Type::Aggregate && analysis_result.need_aggregate && can_use_aggregate_projection) { - // std::cerr << fmt::format("====== aggregate projection analysis: {} ======", projection.name) << std::endl; bool match = true; Block aggregates; // Let's first check if all aggregates are provided by current projection @@ -4081,11 +4075,8 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( // needs to provide aggregation keys, and certain children DAG might be substituted by // some keys in projection. candidate.before_aggregation = analysis_result.before_aggregation->clone(); - // std::cerr << fmt::format("keys = {}", fmt::join(keys, ", ")) << std::endl; - // std::cerr << fmt::format("before_aggregation = \n{}", candidate.before_aggregation->dumpDAG()) << std::endl; auto required_columns = candidate.before_aggregation->foldActionsByProjection(keys, projection.sample_block_for_keys); - // std::cerr << fmt::format("before_aggregation = \n{}", candidate.before_aggregation->dumpDAG()) << std::endl; - // std::cerr << fmt::format("aggregate_required_columns = \n{}", fmt::join(required_columns, ", ")) << std::endl; + if (required_columns.empty() && !keys.empty()) continue; @@ -4110,12 +4101,10 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( candidate.required_columns.push_back(aggregate.name); candidates.push_back(std::move(candidate)); } - // std::cerr << fmt::format("====== aggregate projection analysis end: {} ======", projection.name) << std::endl; } if (projection.type == ProjectionDescription::Type::Normal && (analysis_result.hasWhere() || analysis_result.hasPrewhere())) { - // std::cerr << fmt::format("====== normal projection analysis: {} ======", projection.name) << std::endl; const auto & actions = analysis_result.before_aggregation ? analysis_result.before_aggregation : analysis_result.before_order_by; NameSet required_columns; @@ -4127,16 +4116,12 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( candidate.required_columns = {required_columns.begin(), required_columns.end()}; candidates.push_back(std::move(candidate)); } - // std::cerr << fmt::format("====== normal projection analysis end: {} ======", projection.name) << std::endl; } } // Let's select the best projection to execute the query. if (!candidates.empty()) { - // First build a MergeTreeDataSelectCache to check if a projection is indeed better than base - // query_info.merge_tree_data_select_cache = std::make_unique(); - std::shared_ptr max_added_blocks; if (settings.select_sequential_consistency) { diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index cffedf44823..0a05eeb966e 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -201,7 +201,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( // NOTE: prewhere is executed inside readFromParts if (query_info.projection->before_where) { - // std::cerr << fmt::format("projection before_where: {}", query_info.projection->before_where->dumpDAG()); auto where_step = std::make_unique( plan->getCurrentDataStream(), query_info.projection->before_where, @@ -214,7 +213,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( if (query_info.projection->before_aggregation) { - // std::cerr << fmt::format("projection before_aggregation: {}", query_info.projection->before_aggregation->dumpDAG()); auto expression_before_aggregation = std::make_unique(plan->getCurrentDataStream(), query_info.projection->before_aggregation); expression_before_aggregation->setStepDescription("Before GROUP BY"); @@ -268,9 +266,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( { const auto & header_before_aggregation = pipe.getHeader(); - // std::cerr << "============ header before aggregation" << std::endl; - // std::cerr << header_before_aggregation.dumpStructure() << std::endl; - ColumnNumbers keys; for (const auto & key : query_info.projection->aggregation_keys) keys.push_back(header_before_aggregation.getPositionByName(key.name)); @@ -350,9 +345,6 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( return std::make_shared( header, transform_params, many_data, counter++, merge_threads, temporary_data_merge_threads); }); - - // std::cerr << "============ header after aggregation" << std::endl; - // std::cerr << pipe.getHeader().dumpStructure() << std::endl; }; if (!projection_pipe.empty()) diff --git a/src/Storages/MergeTree/StorageFromBasePartsOfProjection.h b/src/Storages/MergeTree/StorageFromBasePartsOfProjection.h deleted file mode 100644 index 5d82716af11..00000000000 --- a/src/Storages/MergeTree/StorageFromBasePartsOfProjection.h +++ /dev/null @@ -1,75 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - - -namespace DB -{ -/// A Storage that allows reading from a single MergeTree data part. -class StorageFromBasePartsOfProjection final : public shared_ptr_helper, public IStorage -{ - friend struct shared_ptr_helper; - -public: - String getName() const override { return "FromBasePartsOfProjection"; } - - Pipe read( - const Names & column_names, - const StorageMetadataPtr & metadata_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum /*processed_stage*/, - size_t max_block_size, - unsigned num_streams) override - { - // NOTE: It's used to read normal parts only - QueryPlan query_plan = std::move(*MergeTreeDataSelectExecutor(storage).readFromParts( - {}, - column_names, - metadata_snapshot, - metadata_snapshot, - query_info, - context, - max_block_size, - num_streams, - nullptr, - query_info.projection ? query_info.projection->merge_tree_data_select_base_cache.get() - : query_info.merge_tree_data_select_cache.get())); - - return query_plan.convertToPipe( - QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); - } - - - bool supportsIndexForIn() const override { return true; } - - bool mayBenefitFromIndexForIn( - const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot) const override - { - return storage.mayBenefitFromIndexForIn(left_in_operand, query_context, metadata_snapshot); - } - - NamesAndTypesList getVirtuals() const override { return storage.getVirtuals(); } - -protected: - StorageFromBasePartsOfProjection(const MergeTreeData & storage_, const StorageMetadataPtr & metadata_snapshot) - : IStorage(storage_.getStorageID()), storage(storage_) - { - setInMemoryMetadata(*metadata_snapshot); - } - - -private: - const MergeTreeData & storage; -}; - -} diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index a7d2ae3e7dd..fc308667db9 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -99,8 +99,6 @@ class IMergeTreeDataPart; using ManyExpressionActions = std::vector; -struct MergeTreeDataSelectCache; - // The projection selected to execute current query struct ProjectionCandidate { @@ -119,8 +117,6 @@ struct ProjectionCandidate ReadInOrderOptimizerPtr order_optimizer; InputOrderInfoPtr input_order_info; ManyExpressionActions group_by_elements_actions; - // std::shared_ptr merge_tree_data_select_base_cache; - // std::shared_ptr merge_tree_data_select_projection_cache; }; /** Query along with some additional data, @@ -160,7 +156,6 @@ struct SelectQueryInfo /// If not null, it means we choose a projection to execute current query. std::optional projection; bool ignore_projections = false; - std::shared_ptr merge_tree_data_select_cache; }; } From 02977007dc7559c126e23a004eef86c717dd19a4 Mon Sep 17 00:00:00 2001 From: Vladimir Date: Thu, 8 Jul 2021 15:45:23 +0300 Subject: [PATCH 872/931] Remove queries with syntax error from 01917_distinct_on.sql --- tests/queries/0_stateless/01917_distinct_on.sql | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01917_distinct_on.sql b/tests/queries/0_stateless/01917_distinct_on.sql index 75dd8c0b7b8..ae528b6e838 100644 --- a/tests/queries/0_stateless/01917_distinct_on.sql +++ b/tests/queries/0_stateless/01917_distinct_on.sql @@ -7,16 +7,17 @@ SELECT DISTINCT ON (a, b) a, b, c FROM t1; SELECT DISTINCT ON (a, b) * FROM t1; SELECT DISTINCT ON (a) * FROM t1; -SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 62 } +-- fuzzer will fail, enable when fixed +-- SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError 62 } -SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError 62 } -SELECT DISTINCT ON a a, b FROM t1; -- { clientError 62 } +-- SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError 62 } +-- SELECT DISTINCT ON a a, b FROM t1; -- { clientError 62 } -- "Code: 47. DB::Exception: Missing columns: 'DISTINCT'" - error can be better -SELECT DISTINCT ON (a, b) DISTINCT a, b FROM t1; -- { serverError 47 } -SELECT DISTINCT DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } +-- SELECT DISTINCT ON (a, b) DISTINCT a, b FROM t1; -- { serverError 47 } +-- SELECT DISTINCT DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } -SELECT ALL DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } -SELECT DISTINCT ON (a, b) ALL a, b FROM t1; -- { clientError 62 } +-- SELECT ALL DISTINCT ON (a, b) a, b FROM t1; -- { clientError 62 } +-- SELECT DISTINCT ON (a, b) ALL a, b FROM t1; -- { clientError 62 } DROP TABLE IF EXISTS t1; From e621fb644d33bad4196e54b3c58a2542d5366fdc Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 8 Jul 2021 16:57:55 +0300 Subject: [PATCH 873/931] Fix abort in ZooKeeper client --- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index a717052a1ba..b0fcafff752 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -852,7 +852,8 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) } /// Send thread will exit after sending close request or on expired flag - send_thread.join(); + if (send_thread.joinable()) + send_thread.join(); } /// Set expired flag after we sent close event @@ -869,7 +870,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive) tryLogCurrentException(__PRETTY_FUNCTION__); } - if (!error_receive) + if (!error_receive && receive_thread.joinable()) receive_thread.join(); { From 818bbd653943cae56ad182ab0eb593cbd4278819 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 8 Jul 2021 17:06:37 +0300 Subject: [PATCH 874/931] Update programs/server/play.html Co-authored-by: Vladimir --- programs/server/play.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index c3e8708f20b..4165a2829bd 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -613,10 +613,10 @@ if (load_dagre_promise) { return load_dagre_promise; } load_dagre_promise = Promise.all([ - loadJS('https://dagrejs.github.io/project/dagre/latest/dagre.min.js'), - loadJS('https://dagrejs.github.io/project/graphlib-dot/latest/graphlib-dot.min.js'), - loadJS('https://dagrejs.github.io/project/dagre-d3/latest/dagre-d3.min.js'), - loadJS('https://cdn.jsdelivr.net/npm/d3@7'), + loadJS('https://dagrejs.github.io/project/dagre/v0.8.5/dagre.min.js'), + loadJS('https://dagrejs.github.io/project/graphlib-dot/v0.6.4/graphlib-dot.min.js'), + loadJS('https://dagrejs.github.io/project/dagre-d3/v0.6.4/dagre-d3.min.js'), + loadJS('https://cdn.jsdelivr.net/npm/d3@7.0.0'), ]); return load_dagre_promise; From 88bc3995e0509a3c67074a90dfc6cb52c9a15cb5 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 8 Jul 2021 17:41:18 +0300 Subject: [PATCH 875/931] Fix throwing exception when iterate over non existing remote directory --- src/Disks/IDiskRemote.cpp | 6 +++++- src/Disks/IDiskRemote.h | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp index b30e9613ed8..f4c207d9cbd 100644 --- a/src/Disks/IDiskRemote.cpp +++ b/src/Disks/IDiskRemote.cpp @@ -417,7 +417,11 @@ void IDiskRemote::removeDirectory(const String & path) DiskDirectoryIteratorPtr IDiskRemote::iterateDirectory(const String & path) { - return std::make_unique(metadata_path + path, path); + String meta_path = metadata_path + path; + if (fs::exists(meta_path) && fs::is_directory(meta_path)) + return std::make_unique(meta_path, path); + else + return std::make_unique(); } diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h index e725e0ed744..360d4e2de33 100644 --- a/src/Disks/IDiskRemote.h +++ b/src/Disks/IDiskRemote.h @@ -193,6 +193,7 @@ struct IDiskRemote::Metadata class RemoteDiskDirectoryIterator final : public IDiskDirectoryIterator { public: + RemoteDiskDirectoryIterator() {} RemoteDiskDirectoryIterator(const String & full_path, const String & folder_path_) : iter(full_path), folder_path(folder_path_) {} void next() override { ++iter; } From ac68d5ea7130429fad3b6fda62021bd8e3ab79b2 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 8 Jul 2021 18:39:41 +0300 Subject: [PATCH 876/931] Fix path concatenation --- src/Disks/IDiskRemote.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp index f4c207d9cbd..a4dcc8037bc 100644 --- a/src/Disks/IDiskRemote.cpp +++ b/src/Disks/IDiskRemote.cpp @@ -417,7 +417,7 @@ void IDiskRemote::removeDirectory(const String & path) DiskDirectoryIteratorPtr IDiskRemote::iterateDirectory(const String & path) { - String meta_path = metadata_path + path; + fs::path meta_path = fs::path(metadata_path) / path; if (fs::exists(meta_path) && fs::is_directory(meta_path)) return std::make_unique(meta_path, path); else From e4b1e0619c3ff9b38653bbe556265606e61bb3bf Mon Sep 17 00:00:00 2001 From: zxc111 Date: Fri, 9 Jul 2021 00:19:42 +0800 Subject: [PATCH 877/931] hex/bin functions support AggregateFunction states. --- src/Functions/FunctionsCoding.h | 18 ++++++++++++++++-- .../0_stateless/01926_bin_unbin.reference | 4 ++++ tests/queries/0_stateless/01926_bin_unbin.sql | 6 ++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index 72f2aa1be1c..4db138a12a2 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -954,6 +955,8 @@ public: template class EncodeToBinaryRepr : public IFunction { +private: + ContextPtr context; public: static constexpr auto name = Impl::name; static constexpr size_t word_size = Impl::word_size; @@ -978,18 +981,29 @@ public: !which.isDateTime64() && !which.isUInt() && !which.isFloat() && - !which.isDecimal()) + !which.isDecimal() && + !which.isAggregateFunction()) throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const IColumn * column = arguments[0].column.get(); ColumnPtr res_column; + WhichDataType which(column->getDataType()); + if (which.isAggregateFunction()) + { + auto to_string = FunctionFactory::instance().get("toString", context); + const ColumnPtr col = to_string->build(arguments)->execute(arguments, result_type, input_rows_count); + const auto * name_col = checkAndGetColumn(col.get()); + tryExecuteString(name_col, res_column); + return res_column; + } + if (tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) || tryExecuteUInt(column, res_column) || diff --git a/tests/queries/0_stateless/01926_bin_unbin.reference b/tests/queries/0_stateless/01926_bin_unbin.reference index f84a858e449..731d0223bb9 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.reference +++ b/tests/queries/0_stateless/01926_bin_unbin.reference @@ -33,3 +33,7 @@ 1 1 1 +1 +1 +2D000000000000000A +001011010000000000000000000000000000000000000000000000000000000000001010 diff --git a/tests/queries/0_stateless/01926_bin_unbin.sql b/tests/queries/0_stateless/01926_bin_unbin.sql index 555770d09c6..e112f8bd8a4 100644 --- a/tests/queries/0_stateless/01926_bin_unbin.sql +++ b/tests/queries/0_stateless/01926_bin_unbin.sql @@ -37,3 +37,9 @@ select bin(unbin('0')) == '00000000'; select hex('') == bin(''); select unhex('') == unbin(''); select unhex('0') == unbin('0'); + +-- hex and bin support AggregateFunction +select hex(sumState(number)) == hex(toString(sumState(number))) from numbers(10); +select hex(avgState(number)) == hex(toString(avgState(number))) from numbers(99); +select hex(avgState(number)) from numbers(10); +select bin(avgState(number)) from numbers(10); From 334d5439c87c17d0a94e1c935ea52fc9bf06a4a9 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 8 Jul 2021 20:29:25 +0000 Subject: [PATCH 878/931] done --- tests/integration/test_cluster_copier/test_two_nodes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_cluster_copier/test_two_nodes.py b/tests/integration/test_cluster_copier/test_two_nodes.py index 817c3571833..255af13213a 100644 --- a/tests/integration/test_cluster_copier/test_two_nodes.py +++ b/tests/integration/test_cluster_copier/test_two_nodes.py @@ -473,17 +473,17 @@ def execute_task(started_cluster, task, cmd_options): # Tests -@pytest.mark.timeout(600) +@pytest.mark.skip(reason="Too flaky :(") def test_different_schema(started_cluster): execute_task(started_cluster, TaskWithDifferentSchema(started_cluster), []) -@pytest.mark.timeout(600) +@pytest.mark.skip(reason="Too flaky :(") def test_ttl_columns(started_cluster): execute_task(started_cluster, TaskTTL(started_cluster), []) -@pytest.mark.timeout(600) +@pytest.mark.skip(reason="Too flaky :(") def test_skip_index(started_cluster): execute_task(started_cluster, TaskSkipIndex(started_cluster), []) From 3dfdcf4604e4c13e3b90743cede89cc5ed942b5c Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 8 Jul 2021 12:21:57 +0300 Subject: [PATCH 879/931] Improve implementation of leftPadString(), rename to leftPad(). Add new functions rightPad(), leftPadUTF8(), rightPadUTF8(). Add a test. --- src/Functions/GatherUtils/Sources.h | 1 + src/Functions/leftPadString.cpp | 194 ----------- src/Functions/padString.cpp | 308 ++++++++++++++++++ src/Functions/registerFunctionsString.cpp | 6 +- src/Functions/ya.make | 2 +- .../0_stateless/01940_pad_string.reference | 54 +++ .../queries/0_stateless/01940_pad_string.sql | 54 +++ 7 files changed, 420 insertions(+), 199 deletions(-) delete mode 100644 src/Functions/leftPadString.cpp create mode 100644 src/Functions/padString.cpp create mode 100644 tests/queries/0_stateless/01940_pad_string.reference create mode 100644 tests/queries/0_stateless/01940_pad_string.sql diff --git a/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h index 4dbaff9f567..9a459860a68 100644 --- a/src/Functions/GatherUtils/Sources.h +++ b/src/Functions/GatherUtils/Sources.h @@ -755,6 +755,7 @@ struct GenericValueSource : public ValueSourceImpl { using Slice = GenericValueSlice; using SinkType = GenericArraySink; + using Column = IColumn; const IColumn * column; size_t total_rows; diff --git a/src/Functions/leftPadString.cpp b/src/Functions/leftPadString.cpp deleted file mode 100644 index cdcfb46eb73..00000000000 --- a/src/Functions/leftPadString.cpp +++ /dev/null @@ -1,194 +0,0 @@ -#include -#include -#include - -#include -#include -#include - -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int BAD_ARGUMENTS; -} - -namespace -{ - struct LeftPadStringImpl - { - static void vector( - const ColumnString::Chars & data, - const ColumnString::Offsets & offsets, - const size_t length, - const String & padstr, - ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) - { - size_t size = offsets.size(); - res_data.resize((length + 1 /* zero terminator */) * size); - res_offsets.resize(size); - - const size_t padstr_size = padstr.size(); - - ColumnString::Offset prev_offset = 0; - ColumnString::Offset res_prev_offset = 0; - for (size_t i = 0; i < size; ++i) - { - size_t data_length = offsets[i] - prev_offset - 1 /* zero terminator */; - if (data_length < length) - { - for (size_t j = 0; j < length - data_length; ++j) - res_data[res_prev_offset + j] = padstr[j % padstr_size]; - memcpy(&res_data[res_prev_offset + length - data_length], &data[prev_offset], data_length); - } - else - { - memcpy(&res_data[res_prev_offset], &data[prev_offset], length); - } - res_data[res_prev_offset + length] = 0; - res_prev_offset += length + 1; - res_offsets[i] = res_prev_offset; - } - } - - static void vectorFixed( - const ColumnFixedString::Chars & data, - const size_t n, - const size_t length, - const String & padstr, - ColumnFixedString::Chars & res_data) - { - const size_t padstr_size = padstr.size(); - const size_t size = data.size() / n; - res_data.resize(length * size); - for (size_t i = 0; i < size; ++i) - { - if (length < n) - { - memcpy(&res_data[i * length], &data[i * n], length); - } - else - { - for (size_t j = 0; j < length - n; ++j) - res_data[i * length + j] = padstr[j % padstr_size]; - memcpy(&res_data[i * length + length - n], &data[i * n], n); - } - } - } - }; - - class FunctionLeftPadString : public IFunction - { - public: - static constexpr auto name = "leftPadString"; - static FunctionPtr create(const ContextPtr) { return std::make_shared(); } - - String getName() const override { return name; } - - bool isVariadic() const override { return true; } - size_t getNumberOfArguments() const override { return 0; } - - bool useDefaultImplementationForConstants() const override { return true; } - - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override - { - size_t number_of_arguments = arguments.size(); - - if (number_of_arguments != 2 && number_of_arguments != 3) - throw Exception( - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", - getName(), - toString(number_of_arguments)); - - if (!isStringOrFixedString(arguments[0])) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); - - if (!isNativeNumber(arguments[1])) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of second argument of function {}", - arguments[1]->getName(), - getName()); - - if (number_of_arguments == 3 && !isStringOrFixedString(arguments[2])) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of third argument of function {}", - arguments[2]->getName(), - getName()); - - return arguments[0]; - } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override - { - const ColumnPtr str_column = arguments[0].column; - String padstr = " "; - if (arguments.size() == 3) - { - const ColumnConst * pad_column = checkAndGetColumnConst(arguments[2].column.get()); - if (!pad_column) - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of third ('pad') argument of function {}. Must be constant string.", - arguments[2].column->getName(), - getName()); - - padstr = pad_column->getValue(); - } - - const ColumnConst * len_column = checkAndGetColumnConst(arguments[1].column.get()); - if (!len_column) - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of second ('len') argument of function {}. Must be a positive integer.", - arguments[1].column->getName(), - getName()); - Int64 len = len_column->getInt(0); - if (len <= 0) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Illegal value {} of second ('len') argument of function {}. Must be a positive integer.", - arguments[1].column->getName(), - getName()); - - if (const ColumnString * strings = checkAndGetColumn(str_column.get())) - { - auto col_res = ColumnString::create(); - LeftPadStringImpl::vector( - strings->getChars(), strings->getOffsets(), len, padstr, col_res->getChars(), col_res->getOffsets()); - return col_res; - } - else if (const ColumnFixedString * strings_fixed = checkAndGetColumn(str_column.get())) - { - auto col_res = ColumnFixedString::create(len); - LeftPadStringImpl::vectorFixed(strings_fixed->getChars(), strings_fixed->getN(), len, padstr, col_res->getChars()); - return col_res; - } - else - { - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of first ('str') argument of function {}. Must be a string or fixed string.", - arguments[0].column->getName(), - getName()); - } - } - }; -} - -void registerFunctionLeftPadString(FunctionFactory & factory) -{ - factory.registerFunction(FunctionFactory::CaseInsensitive); - factory.registerAlias("lpad", "leftPadString", FunctionFactory::CaseInsensitive); -} - -} diff --git a/src/Functions/padString.cpp b/src/Functions/padString.cpp new file mode 100644 index 00000000000..7711ab1a056 --- /dev/null +++ b/src/Functions/padString.cpp @@ -0,0 +1,308 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +using namespace GatherUtils; + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int TOO_LARGE_STRING_SIZE; +} + +namespace +{ + /// The maximum new padded length. + constexpr size_t MAX_NEW_LENGTH = 1000000; + + /// Appends padding characters to a sink based on a pad string. + /// Depending on how many padding characters are required to add + /// the pad string can be copied only partly or be repeated multiple times. + template + class PaddingChars + { + public: + explicit PaddingChars(const String & pad_string_) : pad_string(pad_string_) { init(); } + + ALWAYS_INLINE size_t numCharsInPadString() const + { + if constexpr (is_utf8) + return utf8_offsets.size() - 1; + else + return pad_string.length(); + } + + ALWAYS_INLINE size_t numCharsToNumBytes(size_t count) const + { + if constexpr (is_utf8) + return utf8_offsets[count]; + else + return count; + } + + void appendTo(StringSink & res_sink, size_t num_chars) const + { + if (!num_chars) + return; + + const size_t step = numCharsInPadString(); + while (true) + { + if (num_chars <= step) + { + writeSlice(StringSource::Slice{bit_cast(pad_string.data()), numCharsToNumBytes(num_chars)}, res_sink); + break; + } + writeSlice(StringSource::Slice{bit_cast(pad_string.data()), numCharsToNumBytes(step)}, res_sink); + num_chars -= step; + } + } + + private: + void init() + { + if (pad_string.empty()) + pad_string = " "; + + if constexpr (is_utf8) + { + size_t offset = 0; + utf8_offsets.reserve(pad_string.length() + 1); + while (true) + { + utf8_offsets.push_back(offset); + if (offset == pad_string.length()) + break; + offset += UTF8::seqLength(pad_string[offset]); + if (offset > pad_string.length()) + offset = pad_string.length(); + } + } + + /// Not necessary, but good for performance. + while (numCharsInPadString() < 16) + { + pad_string += pad_string; + if constexpr (is_utf8) + { + size_t old_size = utf8_offsets.size(); + utf8_offsets.reserve((old_size - 1) * 2); + size_t base = utf8_offsets.back(); + for (size_t i = 1; i != old_size; ++i) + utf8_offsets.push_back(utf8_offsets[i] + base); + } + } + } + + String pad_string; + std::vector utf8_offsets; + }; + + /// Returns the number of characters in a slice. + template + inline ALWAYS_INLINE size_t getLengthOfSlice(const StringSource::Slice & slice) + { + if constexpr (is_utf8) + return UTF8::countCodePoints(slice.data, slice.size); + else + return slice.size; + } + + /// Moves the end of a slice back by n characters. + template + inline ALWAYS_INLINE StringSource::Slice removeSuffixFromSlice(const StringSource::Slice & slice, size_t suffix_length) + { + StringSource::Slice res = slice; + if constexpr (is_utf8) + res.size = UTF8StringSource::skipCodePointsBackward(slice.data + slice.size, suffix_length, slice.data) - res.data; + else + res.size -= std::min(suffix_length, res.size); + return res; + } + + /// If `is_right_pad` - it's the rightPad() function instead of leftPad(). + /// If `is_utf8` - lengths are measured in code points instead of bytes. + template + class FunctionPadString : public IFunction + { + public: + static constexpr auto name = is_right_pad ? (is_utf8 ? "rightPadUTF8" : "rightPad") : (is_utf8 ? "leftPadUTF8" : "leftPad"); + static FunctionPtr create(const ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + bool isVariadic() const override { return true; } + size_t getNumberOfArguments() const override { return 0; } + + bool useDefaultImplementationForConstants() const override { return false; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + size_t number_of_arguments = arguments.size(); + + if (number_of_arguments != 2 && number_of_arguments != 3) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", + getName(), + std::to_string(number_of_arguments)); + + if (!isStringOrFixedString(arguments[0])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of the first argument of function {}, should be string", + arguments[0]->getName(), + getName()); + + if (!isUnsignedInteger(arguments[1])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of the second argument of function {}, should be unsigned integer", + arguments[1]->getName(), + getName()); + + if (number_of_arguments == 3 && !isStringOrFixedString(arguments[2])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of the third argument of function {}, should be const string", + arguments[2]->getName(), + getName()); + + return arguments[0]; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + auto column_string = arguments[0].column; + auto column_length = arguments[1].column; + + String pad_string; + if (arguments.size() == 3) + { + auto column_pad = arguments[2].column; + const ColumnConst * column_pad_const = checkAndGetColumnConst(column_pad.get()); + if (!column_pad_const) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {}, third argument of function {} must be a constant string", + column_pad->getName(), + getName()); + + pad_string = column_pad_const->getValue(); + } + PaddingChars padding_chars{pad_string}; + + auto col_res = ColumnString::create(); + StringSink res_sink{*col_res, input_rows_count}; + + if (const ColumnString * col = checkAndGetColumn(column_string.get())) + executeForSource(StringSource{*col}, column_length, padding_chars, res_sink); + else if (const ColumnFixedString * col_fixed = checkAndGetColumn(column_string.get())) + executeForSource(FixedStringSource{*col_fixed}, column_length, padding_chars, res_sink); + else if (const ColumnConst * col_const = checkAndGetColumnConst(column_string.get())) + executeForSource(ConstSource{*col_const}, column_length, padding_chars, res_sink); + else if (const ColumnConst * col_const_fixed = checkAndGetColumnConst(column_string.get())) + executeForSource(ConstSource{*col_const_fixed}, column_length, padding_chars, res_sink); + else + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {}, first argument of function {} must be a string", + arguments[0].column->getName(), + getName()); + + return col_res; + } + + private: + template + void executeForSource( + SourceStrings && strings, + const ColumnPtr & column_length, + const PaddingChars & padding_chars, + StringSink & res_sink) const + { + if (const auto * col_const = checkAndGetColumn(column_length.get())) + executeForSourceAndLength(std::forward(strings), ConstSource{*col_const}, padding_chars, res_sink); + else + executeForSourceAndLength(std::forward(strings), GenericValueSource{*column_length}, padding_chars, res_sink); + } + + template + void executeForSourceAndLength( + SourceStrings && strings, + SourceLengths && lengths, + const PaddingChars & padding_chars, + StringSink & res_sink) const + { + bool is_const_length = lengths.isConst(); + bool need_check_length = true; + + for (; !res_sink.isEnd(); res_sink.next(), strings.next(), lengths.next()) + { + auto str = strings.getWhole(); + size_t current_length = getLengthOfSlice(str); + + auto new_length_slice = lengths.getWhole(); + size_t new_length = new_length_slice.elements->getUInt(new_length_slice.position); + + if (need_check_length) + { + if (new_length > MAX_NEW_LENGTH) + { + throw Exception( + "New padded length (" + std::to_string(new_length) + ") is too big, maximum is: " + std::to_string(MAX_NEW_LENGTH), + ErrorCodes::TOO_LARGE_STRING_SIZE); + } + if (is_const_length) + { + size_t rows_count = res_sink.offsets.size(); + res_sink.reserve((new_length + 1 /* zero terminator */) * rows_count); + need_check_length = false; + } + } + + if (new_length == current_length) + { + writeSlice(str, res_sink); + } + else if (new_length < current_length) + { + str = removeSuffixFromSlice(str, current_length - new_length); + writeSlice(str, res_sink); + } + else if (new_length > current_length) + { + if constexpr (!is_right_pad) + padding_chars.appendTo(res_sink, new_length - current_length); + + writeSlice(str, res_sink); + + if constexpr (is_right_pad) + padding_chars.appendTo(res_sink, new_length - current_length); + } + } + } + }; +} + +void registerFunctionPadString(FunctionFactory & factory) +{ + factory.registerFunction>(); /// leftPad + factory.registerFunction>(); /// leftPadUTF8 + factory.registerFunction>(); /// rightPad + factory.registerFunction>(); /// rightPadUTF8 + + factory.registerAlias("lpad", "leftPad", FunctionFactory::CaseInsensitive); + factory.registerAlias("rpad", "rightPad", FunctionFactory::CaseInsensitive); +} + +} diff --git a/src/Functions/registerFunctionsString.cpp b/src/Functions/registerFunctionsString.cpp index 1c487981844..18a30469386 100644 --- a/src/Functions/registerFunctionsString.cpp +++ b/src/Functions/registerFunctionsString.cpp @@ -29,13 +29,11 @@ void registerFunctionAppendTrailingCharIfAbsent(FunctionFactory &); void registerFunctionStartsWith(FunctionFactory &); void registerFunctionEndsWith(FunctionFactory &); void registerFunctionTrim(FunctionFactory &); +void registerFunctionPadString(FunctionFactory &); void registerFunctionRegexpQuoteMeta(FunctionFactory &); void registerFunctionNormalizeQuery(FunctionFactory &); void registerFunctionNormalizedQueryHash(FunctionFactory &); void registerFunctionCountMatches(FunctionFactory &); -void registerFunctionEncodeXMLComponent(FunctionFactory & factory); -void registerFunctionDecodeXMLComponent(FunctionFactory & factory); -void registerFunctionLeftPadString(FunctionFactory & factory); void registerFunctionEncodeXMLComponent(FunctionFactory &); void registerFunctionDecodeXMLComponent(FunctionFactory &); void registerFunctionExtractTextFromHTML(FunctionFactory &); @@ -71,13 +69,13 @@ void registerFunctionsString(FunctionFactory & factory) registerFunctionStartsWith(factory); registerFunctionEndsWith(factory); registerFunctionTrim(factory); + registerFunctionPadString(factory); registerFunctionRegexpQuoteMeta(factory); registerFunctionNormalizeQuery(factory); registerFunctionNormalizedQueryHash(factory); registerFunctionCountMatches(factory); registerFunctionEncodeXMLComponent(factory); registerFunctionDecodeXMLComponent(factory); - registerFunctionLeftPadString(factory); registerFunctionExtractTextFromHTML(factory); #if USE_BASE64 registerFunctionBase64Encode(factory); diff --git a/src/Functions/ya.make b/src/Functions/ya.make index ba14e9a3e02..5f84511aa52 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -332,7 +332,6 @@ SRCS( jumpConsistentHash.cpp lcm.cpp least.cpp - leftPadString.cpp lengthUTF8.cpp less.cpp lessOrEquals.cpp @@ -388,6 +387,7 @@ SRCS( now.cpp now64.cpp nullIf.cpp + padString.cpp partitionId.cpp pi.cpp plus.cpp diff --git a/tests/queries/0_stateless/01940_pad_string.reference b/tests/queries/0_stateless/01940_pad_string.reference new file mode 100644 index 00000000000..22cd3f9be07 --- /dev/null +++ b/tests/queries/0_stateless/01940_pad_string.reference @@ -0,0 +1,54 @@ +leftPad + +a +ab +abc + abc + abc + abc +ab +*abc +**abc +*******abc +ab +*abc +*.abc +*.*.*.*abc +leftPadUTF8 +а +аб +аб +абвг +ЧАабвг +ЧАСЧАСЧАабвг +rightPad + +a +ab +abc +abc +abc +abc +ab +abc* +abc** +abc******* +ab +abc* +abc*. +abc*.*.*.* +rightPadUTF8 +а +аб +аб +абвг +абвгЧА +абвгЧАСЧАСЧА +numbers + +1^ +_2^^ +__3^^^ +___4^^^^ +____5^^^^^ +_____6^^^^^^ diff --git a/tests/queries/0_stateless/01940_pad_string.sql b/tests/queries/0_stateless/01940_pad_string.sql new file mode 100644 index 00000000000..e4ba0aec6d2 --- /dev/null +++ b/tests/queries/0_stateless/01940_pad_string.sql @@ -0,0 +1,54 @@ +SELECT 'leftPad'; +SELECT leftPad('abc', 0); +SELECT leftPad('abc', 1); +SELECT leftPad('abc', 2); +SELECT leftPad('abc', 3); +SELECT leftPad('abc', 4); +SELECT leftPad('abc', 5); +SELECT leftPad('abc', 10); + +SELECT leftPad('abc', 2, '*'); +SELECT leftPad('abc', 4, '*'); +SELECT leftPad('abc', 5, '*'); +SELECT leftPad('abc', 10, '*'); +SELECT leftPad('abc', 2, '*.'); +SELECT leftPad('abc', 4, '*.'); +SELECT leftPad('abc', 5, '*.'); +SELECT leftPad('abc', 10, '*.'); + +SELECT 'leftPadUTF8'; +SELECT leftPad('абвг', 2); +SELECT leftPadUTF8('абвг', 2); +SELECT leftPad('абвг', 4); +SELECT leftPadUTF8('абвг', 4); +SELECT leftPad('абвг', 12, 'ЧАС'); +SELECT leftPadUTF8('абвг', 12, 'ЧАС'); + +SELECT 'rightPad'; +SELECT rightPad('abc', 0); +SELECT rightPad('abc', 1); +SELECT rightPad('abc', 2); +SELECT rightPad('abc', 3); +SELECT rightPad('abc', 4); +SELECT rightPad('abc', 5); +SELECT rightPad('abc', 10); + +SELECT rightPad('abc', 2, '*'); +SELECT rightPad('abc', 4, '*'); +SELECT rightPad('abc', 5, '*'); +SELECT rightPad('abc', 10, '*'); +SELECT rightPad('abc', 2, '*.'); +SELECT rightPad('abc', 4, '*.'); +SELECT rightPad('abc', 5, '*.'); +SELECT rightPad('abc', 10, '*.'); + +SELECT 'rightPadUTF8'; +SELECT rightPad('абвг', 2); +SELECT rightPadUTF8('абвг', 2); +SELECT rightPad('абвг', 4); +SELECT rightPadUTF8('абвг', 4); +SELECT rightPad('абвг', 12, 'ЧАС'); +SELECT rightPadUTF8('абвг', 12, 'ЧАС'); + +SELECT 'numbers'; +SELECT rightPad(leftPad(toString(number), number, '_'), number*2, '^') FROM numbers(7); From df90e43c3dfaf19d83e7ccabb6ed9983b6667398 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 9 Jul 2021 02:09:51 +0300 Subject: [PATCH 880/931] Add changelog for 21.7 --- CHANGELOG.md | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8987082db30..5e0a0f30804 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,170 @@ +### ClickHouse release v21.7, 2021-07-09 + +#### Backward Incompatible Change + +* Forward/backward incompatible change of maximum buffer size in clickhouse-keeper. Better to do it now (before production), than later. [#25421](https://github.com/ClickHouse/ClickHouse/pull/25421) ([alesapin](https://github.com/alesapin)). +* Improved performance of queries with explicitly defined large sets. Added compatibility setting `legacy_column_name_of_tuple_literal`. It makes sense to set it to `true`, while doing rolling update of cluster from version lower than 21.7 to any higher version. Otherwise distributed queries with explicitly defined sets at `IN` clause may fail during update. [#25371](https://github.com/ClickHouse/ClickHouse/pull/25371) ([Anton Popov](https://github.com/CurtizJ)). + +#### New Feature + +* Added YAML configuration support to configuration loader. This closes [#3607](https://github.com/ClickHouse/ClickHouse/issues/3607). [#21858](https://github.com/ClickHouse/ClickHouse/pull/21858) ([BoloniniD](https://github.com/BoloniniD)). +* Provides a way to restore replicated table when the data is (possibly) present, but the ZooKeeper metadata is lost. Resolves [#13458](https://github.com/ClickHouse/ClickHouse/issues/13458). [#13652](https://github.com/ClickHouse/ClickHouse/pull/13652) ([Mike Kot](https://github.com/myrrc)). +* Now clickhouse-keeper supports ZooKeeper-like `digest` ACLs. [#24448](https://github.com/ClickHouse/ClickHouse/pull/24448) ([alesapin](https://github.com/alesapin)). +* Support structs and maps in Arrow/Parquet/ORC and dictionaries in Arrow input/output formats. Present new setting `output_format_arrow_low_cardinality_as_dictionary`. [#24341](https://github.com/ClickHouse/ClickHouse/pull/24341) ([Kruglov Pavel](https://github.com/Avogar)). +* Dictionaries added support for Array type. [#25119](https://github.com/ClickHouse/ClickHouse/pull/25119) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `bitPositionsToArray`. Closes [#23792](https://github.com/ClickHouse/ClickHouse/issues/23792). Author [Kevin Wan] (@MaxWk). [#25394](https://github.com/ClickHouse/ClickHouse/pull/25394) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `dateName`. Author [Daniil Kondratyev] (@dankondr). [#25372](https://github.com/ClickHouse/ClickHouse/pull/25372) ([Maksim Kita](https://github.com/kitaisreal)). +* Add `toJSONString` function to serialize columns to their JSON representations. [#25164](https://github.com/ClickHouse/ClickHouse/pull/25164) ([Amos Bird](https://github.com/amosbird)). +* Now query_log has two new columns: `initial_query_start_time`, `initial_query_start_time_microsecond` that record the starting time of a distributed query if any. [#25022](https://github.com/ClickHouse/ClickHouse/pull/25022) ([Amos Bird](https://github.com/amosbird)). +* Add aggregate function `segmentLengthSum`. [#24250](https://github.com/ClickHouse/ClickHouse/pull/24250) ([flynn](https://github.com/ucasfl)). +* Add a new boolean setting `prefer_global_in_and_join` which defaults all IN/JOIN as GLOBAL IN/JOIN. [#23434](https://github.com/ClickHouse/ClickHouse/pull/23434) ([Amos Bird](https://github.com/amosbird)). +* Support `ALTER DELETE` queries for `Join` table engine. [#23260](https://github.com/ClickHouse/ClickHouse/pull/23260) ([foolchi](https://github.com/foolchi)). +* Add `quantileBFloat16` aggregate function as well as the corresponding `quantilesBFloat16` and `medianBFloat16`. It is very simple and fast quantile estimator with relative error not more than 0.390625%. This closes [#16641](https://github.com/ClickHouse/ClickHouse/issues/16641). [#23204](https://github.com/ClickHouse/ClickHouse/pull/23204) ([Ivan Novitskiy](https://github.com/RedClusive)). +* Implement `sequenceNextNode()` function useful for `flow analysis`. [#19766](https://github.com/ClickHouse/ClickHouse/pull/19766) ([achimbab](https://github.com/achimbab)). + +#### Experimental Feature + +* Add support for VFS over HDFS. [#11058](https://github.com/ClickHouse/ClickHouse/pull/11058) ([overshov](https://github.com/overshov)). + +#### Performance Improvement + +* Added optimization, that transforms some functions to reading of subcolumns to reduce amount of read data. E.g., statement `col IS NULL` is transformed to reading of subcolumn `col.null`. Optimization can be enabled by setting `optimize_functions_to_subcolumns`. [#24406](https://github.com/ClickHouse/ClickHouse/pull/24406) ([Anton Popov](https://github.com/CurtizJ)). +* Rewrite more columns to possible alias expressions. This may enable better optimization, such as projections. [#24405](https://github.com/ClickHouse/ClickHouse/pull/24405) ([Amos Bird](https://github.com/amosbird)). +* Index of type bloom_filter can be used for expressions with `hasAny` function with constant arrays. This closes: [#24291](https://github.com/ClickHouse/ClickHouse/issues/24291). [#24900](https://github.com/ClickHouse/ClickHouse/pull/24900) ([Vasily Nemkov](https://github.com/Enmk)). +* Add exponential backoff to reschedule read attempt in case RabbitMQ queues are empty. Closes [#24340](https://github.com/ClickHouse/ClickHouse/issues/24340). [#24415](https://github.com/ClickHouse/ClickHouse/pull/24415) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Improvement + +* Add standalone `clickhouse-keeper` symlink to the main `clickhouse` binary. Now it's possible to run coordination without the main clickhouse server. [#24059](https://github.com/ClickHouse/ClickHouse/pull/24059) ([alesapin](https://github.com/alesapin)). +* Use global settings for query to `VIEW`. Fixed the behavior when queries to `VIEW` use local settings, that leads to errors if setting on `CREATE VIEW` and `SELECT` were different. As for now, `VIEW` won't use these modified settings, but you can still pass additional settings in `SETTINGS` section of `CREATE VIEW` query. Close [#20551](https://github.com/ClickHouse/ClickHouse/issues/20551). [#24095](https://github.com/ClickHouse/ClickHouse/pull/24095) ([Vladimir](https://github.com/vdimir)). +* Add two Replicated*MergeTree settings: `max_replicated_fetches_network_bandwidth` and `max_replicated_sends_network_bandwidth` which allows to limit maximum speed of replicated fetches/sends for table. Add two server-wide settings (in `default` user profile): `max_replicated_fetches_network_bandwidth_for_server` and `max_replicated_sends_network_bandwidth_for_server` which limit maximum speed of replication for all tables. The settings are not followed perfectly accurately. Turned off by default. Fixes [#1821](https://github.com/ClickHouse/ClickHouse/issues/1821). [#24573](https://github.com/ClickHouse/ClickHouse/pull/24573) ([alesapin](https://github.com/alesapin)). +* On server start, parts with incorrect partition ID would not be ever removed, but always detached. [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25166](https://github.com/ClickHouse/ClickHouse/pull/25166) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Use separate `clickhouse-bridge` group and user for bridge processes. Set oom_score_adj so the bridges will be first subjects for OOM killer. Set set maximum RSS to 1 GiB. Closes [#23861](https://github.com/ClickHouse/ClickHouse/issues/23861). [#25280](https://github.com/ClickHouse/ClickHouse/pull/25280) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Increase size of background schedule pool to 128 (`background_schedule_pool_size` setting). It allows avoiding replication queue hung on slow zookeeper connection. [#25072](https://github.com/ClickHouse/ClickHouse/pull/25072) ([alesapin](https://github.com/alesapin)). +* Add merge tree setting `max_parts_to_merge_at_once` which limits the number of parts that can be merged in the background at once. Doesn't affect `OPTIMIZE FINAL` query. Fixes [#1820](https://github.com/ClickHouse/ClickHouse/issues/1820). [#24496](https://github.com/ClickHouse/ClickHouse/pull/24496) ([alesapin](https://github.com/alesapin)). +* Allow `not in` operator to be used in partition pruning. [#24894](https://github.com/ClickHouse/ClickHouse/pull/24894) ([Amos Bird](https://github.com/amosbird)). +* Recognize IPv4 addresses like `127.0.1.1` as local. This is controversial and closes [#23504](https://github.com/ClickHouse/ClickHouse/issues/23504). Michael Filimonov will test this feature. [#24316](https://github.com/ClickHouse/ClickHouse/pull/24316) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* ClickHouse database created with MaterializeMySQL now contains all column comments from the MySQL database that materialized. [#25199](https://github.com/ClickHouse/ClickHouse/pull/25199) ([Storozhuk Kostiantyn](https://github.com/sand6255)). +* Add settings (`connection_auto_close`/`connection_max_tries`/`connection_pool_size`) for MySQL storage engine. [#24146](https://github.com/ClickHouse/ClickHouse/pull/24146) ([Azat Khuzhin](https://github.com/azat)). +* Improve startup time of Distributed engine. [#25663](https://github.com/ClickHouse/ClickHouse/pull/25663) ([Azat Khuzhin](https://github.com/azat)). +* Drop replicas from dirname for internal_replication=true (allows INSERT into Distributed with cluster from any number of replicas, before only 15 replicas was supported, everything more will fail with ENAMETOOLONG while creating directory for async blocks). [#25513](https://github.com/ClickHouse/ClickHouse/pull/25513) ([Azat Khuzhin](https://github.com/azat)). +* Added support Interval type for LowCardinality. Closes [#21730](https://github.com/ClickHouse/ClickHouse/issues/21730). [#25410](https://github.com/ClickHouse/ClickHouse/pull/25410) ([Vladimir](https://github.com/vdimir)). +* Add == operator on time conditions for sequenceMatch and sequenceCount functions. For eg: sequenceMatch('(?1)(?t==1)(?2)')(time, data = 1, data = 2). [#25299](https://github.com/ClickHouse/ClickHouse/pull/25299) ([Christophe Kalenzaga](https://github.com/mga-chka)). +* Add settings `http_max_fields`, `http_max_field_name_size`, `http_max_field_value_size`. [#25296](https://github.com/ClickHouse/ClickHouse/pull/25296) ([Ivan](https://github.com/abyss7)). +* Add support for function `if` with Decimal and Int types on its branches. This closes [#20549](https://github.com/ClickHouse/ClickHouse/issues/20549). This closes [#10142](https://github.com/ClickHouse/ClickHouse/issues/10142). [#25283](https://github.com/ClickHouse/ClickHouse/pull/25283) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Update prompt in `clickhouse-client` and display a message when reconnecting. This closes [#10577](https://github.com/ClickHouse/ClickHouse/issues/10577). [#25281](https://github.com/ClickHouse/ClickHouse/pull/25281) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Correct memory tracking in aggregate function `topK`. This closes [#25259](https://github.com/ClickHouse/ClickHouse/issues/25259). [#25260](https://github.com/ClickHouse/ClickHouse/pull/25260) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix topLevelDomain() for IDN hosts (i.e. `example.рф`), before it returns empty string for such hosts. [#25103](https://github.com/ClickHouse/ClickHouse/pull/25103) ([Azat Khuzhin](https://github.com/azat)). +* Detect linux version at runtime (for worked nested epoll, that is required for `async_socket_for_remote`/`use_hedged_requests`, otherwise remote queries may stuck). [#25067](https://github.com/ClickHouse/ClickHouse/pull/25067) ([Azat Khuzhin](https://github.com/azat)). +* For distributed query, when `optimize_skip_unused_shards=1`, allow to skip shard with condition like `(sharding key) IN (one-element-tuple)`. (Tuples with many elements were supported. Tuple with single element did not work because it is parsed as literal). [#24930](https://github.com/ClickHouse/ClickHouse/pull/24930) ([Amos Bird](https://github.com/amosbird)). +* Improved logging of S3 errors, no more double spaces in case of empty keys and buckets. [#24897](https://github.com/ClickHouse/ClickHouse/pull/24897) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Some queries require multi-pass semantic analysis. Try reusing built sets for `IN` in this case. [#24874](https://github.com/ClickHouse/ClickHouse/pull/24874) ([Amos Bird](https://github.com/amosbird)). +* Respect `max_distributed_connections` for `insert_distributed_sync` (otherwise for huge clusters and sync insert it may run out of `max_thread_pool_size`). [#24754](https://github.com/ClickHouse/ClickHouse/pull/24754) ([Azat Khuzhin](https://github.com/azat)). +* Avoid hiding errors like `Limit for rows or bytes to read exceeded` for scalar subqueries. [#24545](https://github.com/ClickHouse/ClickHouse/pull/24545) ([nvartolomei](https://github.com/nvartolomei)). +* Make String-to-Int parser stricter so that `toInt64('+')` will throw. [#24475](https://github.com/ClickHouse/ClickHouse/pull/24475) ([Amos Bird](https://github.com/amosbird)). +* If SSDDictionary is created with DDL query, it can be created only inside user_files directory. [#24466](https://github.com/ClickHouse/ClickHouse/pull/24466) ([Maksim Kita](https://github.com/kitaisreal)). +* PostgreSQL support specifying non default schema for insert queries. Closes [#24149](https://github.com/ClickHouse/ClickHouse/issues/24149). [#24413](https://github.com/ClickHouse/ClickHouse/pull/24413) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix IPv6 addresses resolving (i.e. fixes `select * from remote('[::1]', system.one)`). [#24319](https://github.com/ClickHouse/ClickHouse/pull/24319) ([Azat Khuzhin](https://github.com/azat)). +* Fix trailing whitespaces in FROM clause with subqueries in multiline mode, and also changes the output of the queries slightly in a more human friendly way. [#24151](https://github.com/ClickHouse/ClickHouse/pull/24151) ([Azat Khuzhin](https://github.com/azat)). +* Suppress exceptions from logger code. [#24069](https://github.com/ClickHouse/ClickHouse/pull/24069) ([Azat Khuzhin](https://github.com/azat)). +* Add ability to split distributed batch on failures (i.e. due to memory limits, corruptions), under `distributed_directory_monitor_split_batch_on_failure` (OFF by default). [#23864](https://github.com/ClickHouse/ClickHouse/pull/23864) ([Azat Khuzhin](https://github.com/azat)). +* Handle column name clashes for storage join. Closes [#20309](https://github.com/ClickHouse/ClickHouse/issues/20309). [#23769](https://github.com/ClickHouse/ClickHouse/pull/23769) ([Vladimir](https://github.com/vdimir)). +* Display progress for File table engine in clickhouse-local and on INSERT query in clickhouse-client when data is passed to stdin. Closes [#18209](https://github.com/ClickHouse/ClickHouse/issues/18209). [#23656](https://github.com/ClickHouse/ClickHouse/pull/23656) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bugfixes and improvements of clickhouse-copier. Allow to copy tables with different (but compatible schemas). Closes [#9159](https://github.com/ClickHouse/ClickHouse/issues/9159). Added test to copy ReplacingMergeTree. Closes [#22711](https://github.com/ClickHouse/ClickHouse/issues/22711). Support TTL on columns and Data Skipping Indices. It simply removes it to create internal Distributed table (underlying table will have TTL and skipping indices). Closes [#19384](https://github.com/ClickHouse/ClickHouse/issues/19384). Allow to copy MATERIALIZED and ALIAS columns. There are some cases in which it could be helpful (e.g. if this column is in PRIMARY KEY). Now it could be allowed by setting `allow_to_copy_alias_and_materialized_columns` property to true in task configuration. Closes [#9177](https://github.com/ClickHouse/ClickHouse/issues/9177). Closes [#11007] (https://github.com/ClickHouse/ClickHouse/issues/11007). Closes [#9514](https://github.com/ClickHouse/ClickHouse/issues/9514). Added a property `allow_to_drop_target_partitions` in task configuration to drop partition in original table before moving helping tables. Closes [#20957](https://github.com/ClickHouse/ClickHouse/issues/20957). Get rid of `OPTIMIZE DEDUPLICATE` query. This hack was needed, because `ALTER TABLE MOVE PARTITION` was retried many times and plain MergeTree tables don't have deduplication. Closes [#17966](https://github.com/ClickHouse/ClickHouse/issues/17966). Write progress to ZooKeeper node on path `task_path + /status` in JSON format. Closes [#20955](https://github.com/ClickHouse/ClickHouse/issues/20955). Support for ReplicatedTables without arguments. Closes [#24834](https://github.com/ClickHouse/ClickHouse/issues/24834) .[#23518](https://github.com/ClickHouse/ClickHouse/pull/23518) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Added sleep with backoff between read retries from S3. [#23461](https://github.com/ClickHouse/ClickHouse/pull/23461) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Respect `insert_allow_materialized_columns` (allows materialized columns) for INSERT into `Distributed` table. [#23349](https://github.com/ClickHouse/ClickHouse/pull/23349) ([Azat Khuzhin](https://github.com/azat)). +* Add ability to push down LIMIT for distributed queries. [#23027](https://github.com/ClickHouse/ClickHouse/pull/23027) ([Azat Khuzhin](https://github.com/azat)). +* Fix Zero-Copy replication with several S3 volumes (Fixes [#22679](https://github.com/ClickHouse/ClickHouse/issues/22679)). [#22864](https://github.com/ClickHouse/ClickHouse/pull/22864) ([ianton-ru](https://github.com/ianton-ru)). +* Resolve the actual port number bound when a user requests any available port from the operating system. [#25569](https://github.com/ClickHouse/ClickHouse/pull/25569) ([bnaecker](https://github.com/bnaecker)). + +#### Bug Fix + +* Fix bug which can lead to ZooKeeper client hung inside clickhouse-server. [#24721](https://github.com/ClickHouse/ClickHouse/pull/24721) ([alesapin](https://github.com/alesapin)). +* If ZooKeeper connection was lost and replica was cloned after restoring the connection, its replication queue might contain outdated entries. Fixed crash when replication queue contains intersecting virtual parts. It may rarely happen if some data part was lost. Print error in log instead of terminating. [#24777](https://github.com/ClickHouse/ClickHouse/pull/24777) ([tavplubix](https://github.com/tavplubix)). +* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix bug which can lead to intersecting parts after merges with TTL: `Part all_40_40_0 is covered by all_40_40_1 but should be merged into all_40_41_1. This shouldn't happen often.`. [#25549](https://github.com/ClickHouse/ClickHouse/pull/25549) ([alesapin](https://github.com/alesapin)). +* Fix extremely rare bug on low-memory servers which can lead to the inability to perform merges without restart. Possibly fixes [#24603](https://github.com/ClickHouse/ClickHouse/issues/24603). [#24872](https://github.com/ClickHouse/ClickHouse/pull/24872) ([alesapin](https://github.com/alesapin)). +* Fix extremely rare error `Tagging already tagged part` in replication queue during concurrent `alter move/replace partition`. Possibly fixes [#22142](https://github.com/ClickHouse/ClickHouse/issues/22142). [#24961](https://github.com/ClickHouse/ClickHouse/pull/24961) ([alesapin](https://github.com/alesapin)). +* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([tavplubix](https://github.com/tavplubix)). +* Use old modulo function version when used in partition key. Closes [#23508](https://github.com/ClickHouse/ClickHouse/issues/23508). [#24157](https://github.com/ClickHouse/ClickHouse/pull/24157) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix error `Key expression contains comparison between inconvertible types` for queries with `ARRAY JOIN` in case if array is used in primary key. Fixes [#8247](https://github.com/ClickHouse/ClickHouse/issues/8247). [#25546](https://github.com/ClickHouse/ClickHouse/pull/25546) ([Anton Popov](https://github.com/CurtizJ)). +* Fix wrong totals for query `WITH TOTALS` and `WITH FILL`. Fixes [#20872](https://github.com/ClickHouse/ClickHouse/issues/20872). [#25539](https://github.com/ClickHouse/ClickHouse/pull/25539) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed case, when sometimes conversion of postgres arrays resulted in String data type, not n-dimensional array, because `attndims` works incorrectly in some cases. Closes [#24804](https://github.com/ClickHouse/ClickHouse/issues/24804). [#25538](https://github.com/ClickHouse/ClickHouse/pull/25538) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix data race when querying `system.clusters` while reloading the cluster configuration at the same time. [#25737](https://github.com/ClickHouse/ClickHouse/pull/25737) ([Amos Bird](https://github.com/amosbird)). +* Fixed `No such file or directory` error on moving `Distributed` table between databases. Fixes [#24971](https://github.com/ClickHouse/ClickHouse/issues/24971). [#25667](https://github.com/ClickHouse/ClickHouse/pull/25667) ([tavplubix](https://github.com/tavplubix)). +* `REPLACE PARTITION` might be ignored in rare cases if the source partition was empty. It's fixed. Fixes [#24869](https://github.com/ClickHouse/ClickHouse/issues/24869). [#25665](https://github.com/ClickHouse/ClickHouse/pull/25665) ([tavplubix](https://github.com/tavplubix)). +* Fixed a bug in `Replicated` database engine that might rarely cause some replica to skip enqueued DDL query. [#24805](https://github.com/ClickHouse/ClickHouse/pull/24805) ([tavplubix](https://github.com/tavplubix)). +* Fix null pointer dereference in `EXPLAIN AST` without query. [#25631](https://github.com/ClickHouse/ClickHouse/pull/25631) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix potential crash when calculating aggregate function states by aggregation of aggregate function states of other aggregate functions (not a practical use case). See [#24523](https://github.com/ClickHouse/ClickHouse/issues/24523). [#25015](https://github.com/ClickHouse/ClickHouse/pull/25015) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix waiting of automatic dropping of empty parts. It could lead to full filling of background pool and stuck of replication. [#23315](https://github.com/ClickHouse/ClickHouse/pull/23315) ([Anton Popov](https://github.com/CurtizJ)). +* Fix restore S3 table. [#25601](https://github.com/ClickHouse/ClickHouse/pull/25601) ([ianton-ru](https://github.com/ianton-ru)). +* Fix segfault in `Arrow` format when using `Decimal256`. Add arrow `Decimal256` support. [#25531](https://github.com/ClickHouse/ClickHouse/pull/25531) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)). +* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Support `SimpleAggregateFunction(LowCardinality)` for `SummingMergeTree`. Fixes [#25134](https://github.com/ClickHouse/ClickHouse/issues/25134). [#25300](https://github.com/ClickHouse/ClickHouse/pull/25300) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix Logical Error Cannot sum Array/Tuple in min/maxMap. [#25298](https://github.com/ClickHouse/ClickHouse/pull/25298) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix error `Bad cast from type DB::ColumnLowCardinality to DB::ColumnVector` for queries where `LowCardinality` argument was used for IN (this bug appeared in 21.6). Fixes [#25187](https://github.com/ClickHouse/ClickHouse/issues/25187). [#25290](https://github.com/ClickHouse/ClickHouse/pull/25290) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix joinGetOrNull with not-nullable columns. This fixes [#24261](https://github.com/ClickHouse/ClickHouse/issues/24261). [#25288](https://github.com/ClickHouse/ClickHouse/pull/25288) ([Amos Bird](https://github.com/amosbird)). +* Fix incorrect behaviour and UBSan report in big integers. In previous versions `CAST(1e19 AS UInt128)` returned zero. [#25279](https://github.com/ClickHouse/ClickHouse/pull/25279) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fixed an error which occurred while inserting a subset of columns using CSVWithNames format. Fixes [#25129](https://github.com/ClickHouse/ClickHouse/issues/25129). [#25169](https://github.com/ClickHouse/ClickHouse/pull/25169) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Do not use table's projection for `SELECT` with `FINAL`. It is not supported yet. [#25163](https://github.com/ClickHouse/ClickHouse/pull/25163) ([Amos Bird](https://github.com/amosbird)). +* Fix possible parts loss after updating up to 21.5 in case table used `UUID` in partition key. (It is not recommended to use `UUID` in partition key). Fixes [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25127](https://github.com/ClickHouse/ClickHouse/pull/25127) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix crash in query with cross join and `joined_subquery_requires_alias = 0`. Fixes [#24011](https://github.com/ClickHouse/ClickHouse/issues/24011). [#25082](https://github.com/ClickHouse/ClickHouse/pull/25082) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix bug with constant maps in mapContains that lead to error `empty column was returned by function mapContains`. Closes [#25077](https://github.com/ClickHouse/ClickHouse/issues/25077). [#25080](https://github.com/ClickHouse/ClickHouse/pull/25080) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix bug which allows creating tables with columns referencing themselves like `a UInt32 ALIAS a + 1` or `b UInt32 MATERIALIZED b`. Fixes [#24910](https://github.com/ClickHouse/ClickHouse/issues/24910), [#24292](https://github.com/ClickHouse/ClickHouse/issues/24292). [#25059](https://github.com/ClickHouse/ClickHouse/pull/25059) ([alesapin](https://github.com/alesapin)). +* Fix wrong result when using aggregate projection with **not empty** `GROUP BY` key to execute query with `GROUP BY` by **empty** key. [#25055](https://github.com/ClickHouse/ClickHouse/pull/25055) ([Amos Bird](https://github.com/amosbird)). +* Distinguish KILL MUTATION for different tables (fixes unexpected `Cancelled mutating parts` error). [#25025](https://github.com/ClickHouse/ClickHouse/pull/25025) ([Azat Khuzhin](https://github.com/azat)). +* Fix serialization of splitted nested messages in Protobuf format. This PR fixes [#24647](https://github.com/ClickHouse/ClickHouse/issues/24647). [#25000](https://github.com/ClickHouse/ClickHouse/pull/25000) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix limit/offset settings for distributed queries (ignore on the remote nodes). [#24940](https://github.com/ClickHouse/ClickHouse/pull/24940) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible heap-buffer-overflow in Arrow. [#24922](https://github.com/ClickHouse/ClickHouse/pull/24922) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed bug with declaring S3 disk at root of bucket. [#24898](https://github.com/ClickHouse/ClickHouse/pull/24898) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Fixed possible error 'Cannot read from istream at offset 0' when reading a file from DiskS3. [#24885](https://github.com/ClickHouse/ClickHouse/pull/24885) ([Pavel Kovalenko](https://github.com/Jokser)). +* Fix "Missing columns" exception when joining Distributed Materialized View. [#24870](https://github.com/ClickHouse/ClickHouse/pull/24870) ([Azat Khuzhin](https://github.com/azat)). +* Allow NULL values in postgresql protocol. Closes [#22622](https://github.com/ClickHouse/ClickHouse/issues/22622). [#24857](https://github.com/ClickHouse/ClickHouse/pull/24857) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix bug when exception `Mutation was killed` can be thrown to the client on mutation wait when mutation not loaded into memory yet. [#24809](https://github.com/ClickHouse/ClickHouse/pull/24809) ([alesapin](https://github.com/alesapin)). +* Fixed bug in deserialization of random generator state with might cause some data types such as `AggregateFunction(groupArraySample(N), T))` to behave in a non-deterministic way. [#24538](https://github.com/ClickHouse/ClickHouse/pull/24538) ([tavplubix](https://github.com/tavplubix)). +* Disallow building uniqXXXXStates of other aggregation states. [#24523](https://github.com/ClickHouse/ClickHouse/pull/24523) ([Raúl Marín](https://github.com/Algunenano)). +* Enable reading of subcolumns for distributed tables. [#24472](https://github.com/ClickHouse/ClickHouse/pull/24472) ([Anton Popov](https://github.com/CurtizJ)). +* Fix usage of tuples in `CREATE .. AS SELECT` queries. [#24464](https://github.com/ClickHouse/ClickHouse/pull/24464) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed the behavior when query `SYSTEM RESTART REPLICA` or `SYSTEM SYNC REPLICA` is being processed infinitely. This was detected on server with extremely little amount of RAM. [#24457](https://github.com/ClickHouse/ClickHouse/pull/24457) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix totalBytes computation in StorageBuffer. In current CH version total_writes.bytes counter decreases too much during the buffer flush. It leads to counter overflow and totalBytes return something around 17.44 EB some time after the flush. [#24450](https://github.com/ClickHouse/ClickHouse/pull/24450) ([DimasKovas](https://github.com/DimasKovas)). +* Fix incorrect monotonicity of toWeek function. This fixes [#24422](https://github.com/ClickHouse/ClickHouse/issues/24422) . This bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/5212 , and was exposed later by smarter partition pruner. [#24446](https://github.com/ClickHouse/ClickHouse/pull/24446) ([Amos Bird](https://github.com/amosbird)). +* Fixed the deadlock that can happen during LDAP role (re)mapping, when LDAP group is mapped to a nonexistent local role. [#24431](https://github.com/ClickHouse/ClickHouse/pull/24431) ([Denis Glazachev](https://github.com/traceon)). +* In "multipart/form-data" message consider the CRLF preceding a boundary as part of it. Fixes [#23905](https://github.com/ClickHouse/ClickHouse/issues/23905). [#24399](https://github.com/ClickHouse/ClickHouse/pull/24399) ([Ivan](https://github.com/abyss7)). +* Fix drop partition with intersect fake parts. In rare cases there might be parts with mutation version greater than current block number. [#24321](https://github.com/ClickHouse/ClickHouse/pull/24321) ([Amos Bird](https://github.com/amosbird)). +* Fixed a bug in moving Materialized View from Ordinary to Atomic database (`RENAME TABLE` query). Now inner table is moved to new database together with Materialized View. Fixes [#23926](https://github.com/ClickHouse/ClickHouse/issues/23926). [#24309](https://github.com/ClickHouse/ClickHouse/pull/24309) ([tavplubix](https://github.com/tavplubix)). +* Allow empty HTTP headers. Fixes [#23901](https://github.com/ClickHouse/ClickHouse/issues/23901). [#24285](https://github.com/ClickHouse/ClickHouse/pull/24285) ([Ivan](https://github.com/abyss7)). +* Set `max_threads = 1` to fix mutation fail of StorageMemory. Closes [#24274](https://github.com/ClickHouse/ClickHouse/issues/24274). [#24275](https://github.com/ClickHouse/ClickHouse/pull/24275) ([flynn](https://github.com/ucasfl)). +* Column cardinality in join output same as at the input, close [#23351](https://github.com/ClickHouse/ClickHouse/issues/23351), close [#20315](https://github.com/ClickHouse/ClickHouse/issues/20315). [#24061](https://github.com/ClickHouse/ClickHouse/pull/24061) ([Vladimir](https://github.com/vdimir)). +* Fix the bug in failover behavior when Engine=Kafka was not able to start consumption if the same consumer had an empty assignment previously. Closes [#21118](https://github.com/ClickHouse/ClickHouse/issues/21118). [#21267](https://github.com/ClickHouse/ClickHouse/pull/21267) ([filimonov](https://github.com/filimonov)). +* Fix MySQL select user() return empty. Closes [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697). [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697) ([sundyli](https://github.com/sundy-li)). + +#### Build/Testing/Packaging Improvement + +* Adds cross-platform embedding of binary resources into executables. [#25146](https://github.com/ClickHouse/ClickHouse/pull/25146) ([bnaecker](https://github.com/bnaecker)). +* Flatbuffers library updated to v.2.0.0. Improvements list https://github.com/google/flatbuffers/releases/tag/v2.0.0. [#25474](https://github.com/ClickHouse/ClickHouse/pull/25474) ([Ilya Yatsishin](https://github.com/qoega)). +* Add CI check for darwin-aarch64 cross-compilation. [#25560](https://github.com/ClickHouse/ClickHouse/pull/25560) ([Ivan](https://github.com/abyss7)). +* Ubuntu 20.04 is now used to run integration tests, docker-compose version used to run integration tests is updated to 1.28.2. Environment variables now take effect on docker-compose. Rework test_dictionaries_all_layouts_separate_sources to allow parallel run. [#20393](https://github.com/ClickHouse/ClickHouse/pull/20393) ([Ilya Yatsishin](https://github.com/qoega)). +* Add join related options to stress tests. [#25200](https://github.com/ClickHouse/ClickHouse/pull/25200) ([Vladimir](https://github.com/vdimir)). +* Enabling TestFlows RBAC tests. [#25498](https://github.com/ClickHouse/ClickHouse/pull/25498) ([vzakaznikov](https://github.com/vzakaznikov)). +* Increase LDAP verification cooldown performance tests timeout to 600 sec. [#25374](https://github.com/ClickHouse/ClickHouse/pull/25374) ([vzakaznikov](https://github.com/vzakaznikov)). +* Added rounding to mathematical and arithmetic function tests for consistent snapshot comparison. Cleaned up test names so they're more uniform. [#25297](https://github.com/ClickHouse/ClickHouse/pull/25297) ([MyroTk](https://github.com/MyroTk)). +* Enable build with s3 module in osx [#25217](https://github.com/ClickHouse/ClickHouse/issues/25217). [#25218](https://github.com/ClickHouse/ClickHouse/pull/25218) ([kevin wan](https://github.com/MaxWk)). +* Adding `leadInFrame` and `lagInFrame` window functions TestFlows tests. [#25144](https://github.com/ClickHouse/ClickHouse/pull/25144) ([vzakaznikov](https://github.com/vzakaznikov)). +* Fix using Yandex dockerhub registries for TestFlows. [#25133](https://github.com/ClickHouse/ClickHouse/pull/25133) ([vzakaznikov](https://github.com/vzakaznikov)). +* Disabling extended precision data types TestFlows tests. [#25125](https://github.com/ClickHouse/ClickHouse/pull/25125) ([vzakaznikov](https://github.com/vzakaznikov)). +* Add integration test cases to cover JDBC bridge. [#25047](https://github.com/ClickHouse/ClickHouse/pull/25047) ([Zhichun Wu](https://github.com/zhicwu)). +* Integration tests configuration has special treatment for dictionaries. Removed remaining dictionaries manual setup. [#24728](https://github.com/ClickHouse/ClickHouse/pull/24728) ([Ilya Yatsishin](https://github.com/qoega)). +* Adding support to save clickhouse server logs in TestFlows check. [#24504](https://github.com/ClickHouse/ClickHouse/pull/24504) ([vzakaznikov](https://github.com/vzakaznikov)). +* Add libfuzzer tests for YAMLParser class. [#24480](https://github.com/ClickHouse/ClickHouse/pull/24480) ([BoloniniD](https://github.com/BoloniniD)). +* Testing for big ints using the following functions: * Arithmetic * Array, tuple, and map * Bit * Comparison * Conversion * Logical * Mathematical * Null * Rounding - Creating a table with columns that use the data types. [#24350](https://github.com/ClickHouse/ClickHouse/pull/24350) ([MyroTk](https://github.com/MyroTk)). +* Fix TOCTOU error in installation script. [#25277](https://github.com/ClickHouse/ClickHouse/pull/25277) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Changed CSS theme to dark for better code highlighting. [#25682](https://github.com/ClickHouse/ClickHouse/pull/25682) ([Mike Kot](https://github.com/myrrc)). + + ### ClickHouse release 21.6, 2021-06-05 #### Upgrade Notes From de90cc0e8ffcba4e7e4d44ed4fd01cbb0dde6232 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 9 Jul 2021 03:00:49 +0300 Subject: [PATCH 881/931] Update CHANGELOG.md --- CHANGELOG.md | 139 ++++++++++++++++++++++++--------------------------- 1 file changed, 64 insertions(+), 75 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e0a0f30804..34d11c6a2cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,20 +2,19 @@ #### Backward Incompatible Change -* Forward/backward incompatible change of maximum buffer size in clickhouse-keeper. Better to do it now (before production), than later. [#25421](https://github.com/ClickHouse/ClickHouse/pull/25421) ([alesapin](https://github.com/alesapin)). * Improved performance of queries with explicitly defined large sets. Added compatibility setting `legacy_column_name_of_tuple_literal`. It makes sense to set it to `true`, while doing rolling update of cluster from version lower than 21.7 to any higher version. Otherwise distributed queries with explicitly defined sets at `IN` clause may fail during update. [#25371](https://github.com/ClickHouse/ClickHouse/pull/25371) ([Anton Popov](https://github.com/CurtizJ)). +* Forward/backward incompatible change of maximum buffer size in clickhouse-keeper (an experimental alternative to ZooKeeper). Better to do it now (before production), than later. [#25421](https://github.com/ClickHouse/ClickHouse/pull/25421) ([alesapin](https://github.com/alesapin)). #### New Feature -* Added YAML configuration support to configuration loader. This closes [#3607](https://github.com/ClickHouse/ClickHouse/issues/3607). [#21858](https://github.com/ClickHouse/ClickHouse/pull/21858) ([BoloniniD](https://github.com/BoloniniD)). +* Support configuration in YAML format as alternative to XML. This closes [#3607](https://github.com/ClickHouse/ClickHouse/issues/3607). [#21858](https://github.com/ClickHouse/ClickHouse/pull/21858) ([BoloniniD](https://github.com/BoloniniD)). * Provides a way to restore replicated table when the data is (possibly) present, but the ZooKeeper metadata is lost. Resolves [#13458](https://github.com/ClickHouse/ClickHouse/issues/13458). [#13652](https://github.com/ClickHouse/ClickHouse/pull/13652) ([Mike Kot](https://github.com/myrrc)). -* Now clickhouse-keeper supports ZooKeeper-like `digest` ACLs. [#24448](https://github.com/ClickHouse/ClickHouse/pull/24448) ([alesapin](https://github.com/alesapin)). * Support structs and maps in Arrow/Parquet/ORC and dictionaries in Arrow input/output formats. Present new setting `output_format_arrow_low_cardinality_as_dictionary`. [#24341](https://github.com/ClickHouse/ClickHouse/pull/24341) ([Kruglov Pavel](https://github.com/Avogar)). -* Dictionaries added support for Array type. [#25119](https://github.com/ClickHouse/ClickHouse/pull/25119) ([Maksim Kita](https://github.com/kitaisreal)). +* Added support for `Array` type in dictionaries. [#25119](https://github.com/ClickHouse/ClickHouse/pull/25119) ([Maksim Kita](https://github.com/kitaisreal)). * Added function `bitPositionsToArray`. Closes [#23792](https://github.com/ClickHouse/ClickHouse/issues/23792). Author [Kevin Wan] (@MaxWk). [#25394](https://github.com/ClickHouse/ClickHouse/pull/25394) ([Maksim Kita](https://github.com/kitaisreal)). -* Added function `dateName`. Author [Daniil Kondratyev] (@dankondr). [#25372](https://github.com/ClickHouse/ClickHouse/pull/25372) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `dateName` to return names like 'Friday' or 'April'. Author [Daniil Kondratyev] (@dankondr). [#25372](https://github.com/ClickHouse/ClickHouse/pull/25372) ([Maksim Kita](https://github.com/kitaisreal)). * Add `toJSONString` function to serialize columns to their JSON representations. [#25164](https://github.com/ClickHouse/ClickHouse/pull/25164) ([Amos Bird](https://github.com/amosbird)). -* Now query_log has two new columns: `initial_query_start_time`, `initial_query_start_time_microsecond` that record the starting time of a distributed query if any. [#25022](https://github.com/ClickHouse/ClickHouse/pull/25022) ([Amos Bird](https://github.com/amosbird)). +* Now `query_log` has two new columns: `initial_query_start_time`, `initial_query_start_time_microsecond` that record the starting time of a distributed query if any. [#25022](https://github.com/ClickHouse/ClickHouse/pull/25022) ([Amos Bird](https://github.com/amosbird)). * Add aggregate function `segmentLengthSum`. [#24250](https://github.com/ClickHouse/ClickHouse/pull/24250) ([flynn](https://github.com/ucasfl)). * Add a new boolean setting `prefer_global_in_and_join` which defaults all IN/JOIN as GLOBAL IN/JOIN. [#23434](https://github.com/ClickHouse/ClickHouse/pull/23434) ([Amos Bird](https://github.com/amosbird)). * Support `ALTER DELETE` queries for `Join` table engine. [#23260](https://github.com/ClickHouse/ClickHouse/pull/23260) ([foolchi](https://github.com/foolchi)). @@ -24,145 +23,135 @@ #### Experimental Feature -* Add support for VFS over HDFS. [#11058](https://github.com/ClickHouse/ClickHouse/pull/11058) ([overshov](https://github.com/overshov)). +* Add support for virtual filesystem over HDFS. [#11058](https://github.com/ClickHouse/ClickHouse/pull/11058) ([overshov](https://github.com/overshov)) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Now clickhouse-keeper (an experimental alternative to ZooKeeper) supports ZooKeeper-like `digest` ACLs. [#24448](https://github.com/ClickHouse/ClickHouse/pull/24448) ([alesapin](https://github.com/alesapin)). #### Performance Improvement -* Added optimization, that transforms some functions to reading of subcolumns to reduce amount of read data. E.g., statement `col IS NULL` is transformed to reading of subcolumn `col.null`. Optimization can be enabled by setting `optimize_functions_to_subcolumns`. [#24406](https://github.com/ClickHouse/ClickHouse/pull/24406) ([Anton Popov](https://github.com/CurtizJ)). +* Added optimization that transforms some functions to reading of subcolumns to reduce amount of read data. E.g., statement `col IS NULL` is transformed to reading of subcolumn `col.null`. Optimization can be enabled by setting `optimize_functions_to_subcolumns` which is currently off by default. [#24406](https://github.com/ClickHouse/ClickHouse/pull/24406) ([Anton Popov](https://github.com/CurtizJ)). * Rewrite more columns to possible alias expressions. This may enable better optimization, such as projections. [#24405](https://github.com/ClickHouse/ClickHouse/pull/24405) ([Amos Bird](https://github.com/amosbird)). -* Index of type bloom_filter can be used for expressions with `hasAny` function with constant arrays. This closes: [#24291](https://github.com/ClickHouse/ClickHouse/issues/24291). [#24900](https://github.com/ClickHouse/ClickHouse/pull/24900) ([Vasily Nemkov](https://github.com/Enmk)). -* Add exponential backoff to reschedule read attempt in case RabbitMQ queues are empty. Closes [#24340](https://github.com/ClickHouse/ClickHouse/issues/24340). [#24415](https://github.com/ClickHouse/ClickHouse/pull/24415) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Index of type `bloom_filter` can be used for expressions with `hasAny` function with constant arrays. This closes: [#24291](https://github.com/ClickHouse/ClickHouse/issues/24291). [#24900](https://github.com/ClickHouse/ClickHouse/pull/24900) ([Vasily Nemkov](https://github.com/Enmk)). +* Add exponential backoff to reschedule read attempt in case RabbitMQ queues are empty. (ClickHouse has support for importing data from RabbitMQ). Closes [#24340](https://github.com/ClickHouse/ClickHouse/issues/24340). [#24415](https://github.com/ClickHouse/ClickHouse/pull/24415) ([Kseniia Sumarokova](https://github.com/kssenii)). #### Improvement +* Allow to limit bandwidth for replication. Add two Replicated\*MergeTree settings: `max_replicated_fetches_network_bandwidth` and `max_replicated_sends_network_bandwidth` which allows to limit maximum speed of replicated fetches/sends for table. Add two server-wide settings (in `default` user profile): `max_replicated_fetches_network_bandwidth_for_server` and `max_replicated_sends_network_bandwidth_for_server` which limit maximum speed of replication for all tables. The settings are not followed perfectly accurately. Turned off by default. Fixes [#1821](https://github.com/ClickHouse/ClickHouse/issues/1821). [#24573](https://github.com/ClickHouse/ClickHouse/pull/24573) ([alesapin](https://github.com/alesapin)). +* Resource constraints and isolation for ODBC and Library bridges. Use separate `clickhouse-bridge` group and user for bridge processes. Set oom_score_adj so the bridges will be first subjects for OOM killer. Set set maximum RSS to 1 GiB. Closes [#23861](https://github.com/ClickHouse/ClickHouse/issues/23861). [#25280](https://github.com/ClickHouse/ClickHouse/pull/25280) ([Kseniia Sumarokova](https://github.com/kssenii)). * Add standalone `clickhouse-keeper` symlink to the main `clickhouse` binary. Now it's possible to run coordination without the main clickhouse server. [#24059](https://github.com/ClickHouse/ClickHouse/pull/24059) ([alesapin](https://github.com/alesapin)). * Use global settings for query to `VIEW`. Fixed the behavior when queries to `VIEW` use local settings, that leads to errors if setting on `CREATE VIEW` and `SELECT` were different. As for now, `VIEW` won't use these modified settings, but you can still pass additional settings in `SETTINGS` section of `CREATE VIEW` query. Close [#20551](https://github.com/ClickHouse/ClickHouse/issues/20551). [#24095](https://github.com/ClickHouse/ClickHouse/pull/24095) ([Vladimir](https://github.com/vdimir)). -* Add two Replicated*MergeTree settings: `max_replicated_fetches_network_bandwidth` and `max_replicated_sends_network_bandwidth` which allows to limit maximum speed of replicated fetches/sends for table. Add two server-wide settings (in `default` user profile): `max_replicated_fetches_network_bandwidth_for_server` and `max_replicated_sends_network_bandwidth_for_server` which limit maximum speed of replication for all tables. The settings are not followed perfectly accurately. Turned off by default. Fixes [#1821](https://github.com/ClickHouse/ClickHouse/issues/1821). [#24573](https://github.com/ClickHouse/ClickHouse/pull/24573) ([alesapin](https://github.com/alesapin)). * On server start, parts with incorrect partition ID would not be ever removed, but always detached. [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25166](https://github.com/ClickHouse/ClickHouse/pull/25166) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Use separate `clickhouse-bridge` group and user for bridge processes. Set oom_score_adj so the bridges will be first subjects for OOM killer. Set set maximum RSS to 1 GiB. Closes [#23861](https://github.com/ClickHouse/ClickHouse/issues/23861). [#25280](https://github.com/ClickHouse/ClickHouse/pull/25280) ([Kseniia Sumarokova](https://github.com/kssenii)). * Increase size of background schedule pool to 128 (`background_schedule_pool_size` setting). It allows avoiding replication queue hung on slow zookeeper connection. [#25072](https://github.com/ClickHouse/ClickHouse/pull/25072) ([alesapin](https://github.com/alesapin)). * Add merge tree setting `max_parts_to_merge_at_once` which limits the number of parts that can be merged in the background at once. Doesn't affect `OPTIMIZE FINAL` query. Fixes [#1820](https://github.com/ClickHouse/ClickHouse/issues/1820). [#24496](https://github.com/ClickHouse/ClickHouse/pull/24496) ([alesapin](https://github.com/alesapin)). -* Allow `not in` operator to be used in partition pruning. [#24894](https://github.com/ClickHouse/ClickHouse/pull/24894) ([Amos Bird](https://github.com/amosbird)). +* Allow `NOT IN` operator to be used in partition pruning. [#24894](https://github.com/ClickHouse/ClickHouse/pull/24894) ([Amos Bird](https://github.com/amosbird)). * Recognize IPv4 addresses like `127.0.1.1` as local. This is controversial and closes [#23504](https://github.com/ClickHouse/ClickHouse/issues/23504). Michael Filimonov will test this feature. [#24316](https://github.com/ClickHouse/ClickHouse/pull/24316) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* ClickHouse database created with MaterializeMySQL now contains all column comments from the MySQL database that materialized. [#25199](https://github.com/ClickHouse/ClickHouse/pull/25199) ([Storozhuk Kostiantyn](https://github.com/sand6255)). +* ClickHouse database created with MaterializeMySQL (it is an experimental feature) now contains all column comments from the MySQL database that materialized. [#25199](https://github.com/ClickHouse/ClickHouse/pull/25199) ([Storozhuk Kostiantyn](https://github.com/sand6255)). * Add settings (`connection_auto_close`/`connection_max_tries`/`connection_pool_size`) for MySQL storage engine. [#24146](https://github.com/ClickHouse/ClickHouse/pull/24146) ([Azat Khuzhin](https://github.com/azat)). * Improve startup time of Distributed engine. [#25663](https://github.com/ClickHouse/ClickHouse/pull/25663) ([Azat Khuzhin](https://github.com/azat)). -* Drop replicas from dirname for internal_replication=true (allows INSERT into Distributed with cluster from any number of replicas, before only 15 replicas was supported, everything more will fail with ENAMETOOLONG while creating directory for async blocks). [#25513](https://github.com/ClickHouse/ClickHouse/pull/25513) ([Azat Khuzhin](https://github.com/azat)). -* Added support Interval type for LowCardinality. Closes [#21730](https://github.com/ClickHouse/ClickHouse/issues/21730). [#25410](https://github.com/ClickHouse/ClickHouse/pull/25410) ([Vladimir](https://github.com/vdimir)). -* Add == operator on time conditions for sequenceMatch and sequenceCount functions. For eg: sequenceMatch('(?1)(?t==1)(?2)')(time, data = 1, data = 2). [#25299](https://github.com/ClickHouse/ClickHouse/pull/25299) ([Christophe Kalenzaga](https://github.com/mga-chka)). +* Improvement for Distributed tables. Drop replicas from dirname for internal_replication=true (allows INSERT into Distributed with cluster from any number of replicas, before only 15 replicas was supported, everything more will fail with ENAMETOOLONG while creating directory for async blocks). [#25513](https://github.com/ClickHouse/ClickHouse/pull/25513) ([Azat Khuzhin](https://github.com/azat)). +* Added support `Interval` type for `LowCardinality`. It is needed for intermediate values of some expressions. Closes [#21730](https://github.com/ClickHouse/ClickHouse/issues/21730). [#25410](https://github.com/ClickHouse/ClickHouse/pull/25410) ([Vladimir](https://github.com/vdimir)). +* Add `==` operator on time conditions for `sequenceMatch` and `sequenceCount` functions. For eg: sequenceMatch('(?1)(?t==1)(?2)')(time, data = 1, data = 2). [#25299](https://github.com/ClickHouse/ClickHouse/pull/25299) ([Christophe Kalenzaga](https://github.com/mga-chka)). * Add settings `http_max_fields`, `http_max_field_name_size`, `http_max_field_value_size`. [#25296](https://github.com/ClickHouse/ClickHouse/pull/25296) ([Ivan](https://github.com/abyss7)). -* Add support for function `if` with Decimal and Int types on its branches. This closes [#20549](https://github.com/ClickHouse/ClickHouse/issues/20549). This closes [#10142](https://github.com/ClickHouse/ClickHouse/issues/10142). [#25283](https://github.com/ClickHouse/ClickHouse/pull/25283) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add support for function `if` with `Decimal` and `Int` types on its branches. This closes [#20549](https://github.com/ClickHouse/ClickHouse/issues/20549). This closes [#10142](https://github.com/ClickHouse/ClickHouse/issues/10142). [#25283](https://github.com/ClickHouse/ClickHouse/pull/25283) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Update prompt in `clickhouse-client` and display a message when reconnecting. This closes [#10577](https://github.com/ClickHouse/ClickHouse/issues/10577). [#25281](https://github.com/ClickHouse/ClickHouse/pull/25281) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Correct memory tracking in aggregate function `topK`. This closes [#25259](https://github.com/ClickHouse/ClickHouse/issues/25259). [#25260](https://github.com/ClickHouse/ClickHouse/pull/25260) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Fix topLevelDomain() for IDN hosts (i.e. `example.рф`), before it returns empty string for such hosts. [#25103](https://github.com/ClickHouse/ClickHouse/pull/25103) ([Azat Khuzhin](https://github.com/azat)). -* Detect linux version at runtime (for worked nested epoll, that is required for `async_socket_for_remote`/`use_hedged_requests`, otherwise remote queries may stuck). [#25067](https://github.com/ClickHouse/ClickHouse/pull/25067) ([Azat Khuzhin](https://github.com/azat)). +* Fix `topLevelDomain` for IDN hosts (i.e. `example.рф`), before it returns empty string for such hosts. [#25103](https://github.com/ClickHouse/ClickHouse/pull/25103) ([Azat Khuzhin](https://github.com/azat)). +* Detect Linux kernel version at runtime (for worked nested epoll, that is required for `async_socket_for_remote`/`use_hedged_requests`, otherwise remote queries may stuck). [#25067](https://github.com/ClickHouse/ClickHouse/pull/25067) ([Azat Khuzhin](https://github.com/azat)). * For distributed query, when `optimize_skip_unused_shards=1`, allow to skip shard with condition like `(sharding key) IN (one-element-tuple)`. (Tuples with many elements were supported. Tuple with single element did not work because it is parsed as literal). [#24930](https://github.com/ClickHouse/ClickHouse/pull/24930) ([Amos Bird](https://github.com/amosbird)). -* Improved logging of S3 errors, no more double spaces in case of empty keys and buckets. [#24897](https://github.com/ClickHouse/ClickHouse/pull/24897) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Improved log messages of S3 errors, no more double whitespaces in case of empty keys and buckets. [#24897](https://github.com/ClickHouse/ClickHouse/pull/24897) ([Vladimir Chebotarev](https://github.com/excitoon)). * Some queries require multi-pass semantic analysis. Try reusing built sets for `IN` in this case. [#24874](https://github.com/ClickHouse/ClickHouse/pull/24874) ([Amos Bird](https://github.com/amosbird)). * Respect `max_distributed_connections` for `insert_distributed_sync` (otherwise for huge clusters and sync insert it may run out of `max_thread_pool_size`). [#24754](https://github.com/ClickHouse/ClickHouse/pull/24754) ([Azat Khuzhin](https://github.com/azat)). * Avoid hiding errors like `Limit for rows or bytes to read exceeded` for scalar subqueries. [#24545](https://github.com/ClickHouse/ClickHouse/pull/24545) ([nvartolomei](https://github.com/nvartolomei)). * Make String-to-Int parser stricter so that `toInt64('+')` will throw. [#24475](https://github.com/ClickHouse/ClickHouse/pull/24475) ([Amos Bird](https://github.com/amosbird)). -* If SSDDictionary is created with DDL query, it can be created only inside user_files directory. [#24466](https://github.com/ClickHouse/ClickHouse/pull/24466) ([Maksim Kita](https://github.com/kitaisreal)). -* PostgreSQL support specifying non default schema for insert queries. Closes [#24149](https://github.com/ClickHouse/ClickHouse/issues/24149). [#24413](https://github.com/ClickHouse/ClickHouse/pull/24413) ([Kseniia Sumarokova](https://github.com/kssenii)). +* If `SSD_CACHE` is created with DDL query, it can be created only inside `user_files` directory. [#24466](https://github.com/ClickHouse/ClickHouse/pull/24466) ([Maksim Kita](https://github.com/kitaisreal)). +* PostgreSQL support for specifying non default schema for insert queries. Closes [#24149](https://github.com/ClickHouse/ClickHouse/issues/24149). [#24413](https://github.com/ClickHouse/ClickHouse/pull/24413) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix IPv6 addresses resolving (i.e. fixes `select * from remote('[::1]', system.one)`). [#24319](https://github.com/ClickHouse/ClickHouse/pull/24319) ([Azat Khuzhin](https://github.com/azat)). * Fix trailing whitespaces in FROM clause with subqueries in multiline mode, and also changes the output of the queries slightly in a more human friendly way. [#24151](https://github.com/ClickHouse/ClickHouse/pull/24151) ([Azat Khuzhin](https://github.com/azat)). -* Suppress exceptions from logger code. [#24069](https://github.com/ClickHouse/ClickHouse/pull/24069) ([Azat Khuzhin](https://github.com/azat)). -* Add ability to split distributed batch on failures (i.e. due to memory limits, corruptions), under `distributed_directory_monitor_split_batch_on_failure` (OFF by default). [#23864](https://github.com/ClickHouse/ClickHouse/pull/23864) ([Azat Khuzhin](https://github.com/azat)). -* Handle column name clashes for storage join. Closes [#20309](https://github.com/ClickHouse/ClickHouse/issues/20309). [#23769](https://github.com/ClickHouse/ClickHouse/pull/23769) ([Vladimir](https://github.com/vdimir)). -* Display progress for File table engine in clickhouse-local and on INSERT query in clickhouse-client when data is passed to stdin. Closes [#18209](https://github.com/ClickHouse/ClickHouse/issues/18209). [#23656](https://github.com/ClickHouse/ClickHouse/pull/23656) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Bugfixes and improvements of clickhouse-copier. Allow to copy tables with different (but compatible schemas). Closes [#9159](https://github.com/ClickHouse/ClickHouse/issues/9159). Added test to copy ReplacingMergeTree. Closes [#22711](https://github.com/ClickHouse/ClickHouse/issues/22711). Support TTL on columns and Data Skipping Indices. It simply removes it to create internal Distributed table (underlying table will have TTL and skipping indices). Closes [#19384](https://github.com/ClickHouse/ClickHouse/issues/19384). Allow to copy MATERIALIZED and ALIAS columns. There are some cases in which it could be helpful (e.g. if this column is in PRIMARY KEY). Now it could be allowed by setting `allow_to_copy_alias_and_materialized_columns` property to true in task configuration. Closes [#9177](https://github.com/ClickHouse/ClickHouse/issues/9177). Closes [#11007] (https://github.com/ClickHouse/ClickHouse/issues/11007). Closes [#9514](https://github.com/ClickHouse/ClickHouse/issues/9514). Added a property `allow_to_drop_target_partitions` in task configuration to drop partition in original table before moving helping tables. Closes [#20957](https://github.com/ClickHouse/ClickHouse/issues/20957). Get rid of `OPTIMIZE DEDUPLICATE` query. This hack was needed, because `ALTER TABLE MOVE PARTITION` was retried many times and plain MergeTree tables don't have deduplication. Closes [#17966](https://github.com/ClickHouse/ClickHouse/issues/17966). Write progress to ZooKeeper node on path `task_path + /status` in JSON format. Closes [#20955](https://github.com/ClickHouse/ClickHouse/issues/20955). Support for ReplicatedTables without arguments. Closes [#24834](https://github.com/ClickHouse/ClickHouse/issues/24834) .[#23518](https://github.com/ClickHouse/ClickHouse/pull/23518) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Improvement for Distributed tables. Add ability to split distributed batch on failures (i.e. due to memory limits, corruptions), under `distributed_directory_monitor_split_batch_on_failure` (OFF by default). [#23864](https://github.com/ClickHouse/ClickHouse/pull/23864) ([Azat Khuzhin](https://github.com/azat)). +* Handle column name clashes for `Join` table engine. Closes [#20309](https://github.com/ClickHouse/ClickHouse/issues/20309). [#23769](https://github.com/ClickHouse/ClickHouse/pull/23769) ([Vladimir](https://github.com/vdimir)). +* Display progress for `File` table engine in `clickhouse-local` and on INSERT query in `clickhouse-client` when data is passed to stdin. Closes [#18209](https://github.com/ClickHouse/ClickHouse/issues/18209). [#23656](https://github.com/ClickHouse/ClickHouse/pull/23656) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bugfixes and improvements of `clickhouse-copier`. Allow to copy tables with different (but compatible schemas). Closes [#9159](https://github.com/ClickHouse/ClickHouse/issues/9159). Added test to copy ReplacingMergeTree. Closes [#22711](https://github.com/ClickHouse/ClickHouse/issues/22711). Support TTL on columns and Data Skipping Indices. It simply removes it to create internal Distributed table (underlying table will have TTL and skipping indices). Closes [#19384](https://github.com/ClickHouse/ClickHouse/issues/19384). Allow to copy MATERIALIZED and ALIAS columns. There are some cases in which it could be helpful (e.g. if this column is in PRIMARY KEY). Now it could be allowed by setting `allow_to_copy_alias_and_materialized_columns` property to true in task configuration. Closes [#9177](https://github.com/ClickHouse/ClickHouse/issues/9177). Closes [#11007] (https://github.com/ClickHouse/ClickHouse/issues/11007). Closes [#9514](https://github.com/ClickHouse/ClickHouse/issues/9514). Added a property `allow_to_drop_target_partitions` in task configuration to drop partition in original table before moving helping tables. Closes [#20957](https://github.com/ClickHouse/ClickHouse/issues/20957). Get rid of `OPTIMIZE DEDUPLICATE` query. This hack was needed, because `ALTER TABLE MOVE PARTITION` was retried many times and plain MergeTree tables don't have deduplication. Closes [#17966](https://github.com/ClickHouse/ClickHouse/issues/17966). Write progress to ZooKeeper node on path `task_path + /status` in JSON format. Closes [#20955](https://github.com/ClickHouse/ClickHouse/issues/20955). Support for ReplicatedTables without arguments. Closes [#24834](https://github.com/ClickHouse/ClickHouse/issues/24834) .[#23518](https://github.com/ClickHouse/ClickHouse/pull/23518) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Added sleep with backoff between read retries from S3. [#23461](https://github.com/ClickHouse/ClickHouse/pull/23461) ([Vladimir Chebotarev](https://github.com/excitoon)). * Respect `insert_allow_materialized_columns` (allows materialized columns) for INSERT into `Distributed` table. [#23349](https://github.com/ClickHouse/ClickHouse/pull/23349) ([Azat Khuzhin](https://github.com/azat)). * Add ability to push down LIMIT for distributed queries. [#23027](https://github.com/ClickHouse/ClickHouse/pull/23027) ([Azat Khuzhin](https://github.com/azat)). -* Fix Zero-Copy replication with several S3 volumes (Fixes [#22679](https://github.com/ClickHouse/ClickHouse/issues/22679)). [#22864](https://github.com/ClickHouse/ClickHouse/pull/22864) ([ianton-ru](https://github.com/ianton-ru)). -* Resolve the actual port number bound when a user requests any available port from the operating system. [#25569](https://github.com/ClickHouse/ClickHouse/pull/25569) ([bnaecker](https://github.com/bnaecker)). +* Fix zero-copy replication with several S3 volumes (Fixes [#22679](https://github.com/ClickHouse/ClickHouse/issues/22679)). [#22864](https://github.com/ClickHouse/ClickHouse/pull/22864) ([ianton-ru](https://github.com/ianton-ru)). +* Resolve the actual port number bound when a user requests any available port from the operating system to show it in the log message. [#25569](https://github.com/ClickHouse/ClickHouse/pull/25569) ([bnaecker](https://github.com/bnaecker)). +* Fixed case, when sometimes conversion of postgres arrays resulted in String data type, not n-dimensional array, because `attndims` works incorrectly in some cases. Closes [#24804](https://github.com/ClickHouse/ClickHouse/issues/24804). [#25538](https://github.com/ClickHouse/ClickHouse/pull/25538) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix convertion of DateTime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Distinguish KILL MUTATION for different tables (fixes unexpected `Cancelled mutating parts` error). [#25025](https://github.com/ClickHouse/ClickHouse/pull/25025) ([Azat Khuzhin](https://github.com/azat)). +* Allow to declare S3 disk at root of bucket (S3 virtual filesystem is an experimental feature under development). [#24898](https://github.com/ClickHouse/ClickHouse/pull/24898) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Enable reading of subcolumns (e.g. components of Tuples) for distributed tables. [#24472](https://github.com/ClickHouse/ClickHouse/pull/24472) ([Anton Popov](https://github.com/CurtizJ)). +* A feature for MySQL compatibility protocol: make `user` function to return correct output. Closes [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697). [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697) ([sundyli](https://github.com/sundy-li)). #### Bug Fix -* Fix bug which can lead to ZooKeeper client hung inside clickhouse-server. [#24721](https://github.com/ClickHouse/ClickHouse/pull/24721) ([alesapin](https://github.com/alesapin)). -* If ZooKeeper connection was lost and replica was cloned after restoring the connection, its replication queue might contain outdated entries. Fixed crash when replication queue contains intersecting virtual parts. It may rarely happen if some data part was lost. Print error in log instead of terminating. [#24777](https://github.com/ClickHouse/ClickHouse/pull/24777) ([tavplubix](https://github.com/tavplubix)). -* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix bug which can lead to intersecting parts after merges with TTL: `Part all_40_40_0 is covered by all_40_40_1 but should be merged into all_40_41_1. This shouldn't happen often.`. [#25549](https://github.com/ClickHouse/ClickHouse/pull/25549) ([alesapin](https://github.com/alesapin)). +* Improvement for backward compatibility. Use old modulo function version when used in partition key. Closes [#23508](https://github.com/ClickHouse/ClickHouse/issues/23508). [#24157](https://github.com/ClickHouse/ClickHouse/pull/24157) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix extremely rare bug on low-memory servers which can lead to the inability to perform merges without restart. Possibly fixes [#24603](https://github.com/ClickHouse/ClickHouse/issues/24603). [#24872](https://github.com/ClickHouse/ClickHouse/pull/24872) ([alesapin](https://github.com/alesapin)). * Fix extremely rare error `Tagging already tagged part` in replication queue during concurrent `alter move/replace partition`. Possibly fixes [#22142](https://github.com/ClickHouse/ClickHouse/issues/22142). [#24961](https://github.com/ClickHouse/ClickHouse/pull/24961) ([alesapin](https://github.com/alesapin)). +* Fix potential crash when calculating aggregate function states by aggregation of aggregate function states of other aggregate functions (not a practical use case). See [#24523](https://github.com/ClickHouse/ClickHouse/issues/24523). [#25015](https://github.com/ClickHouse/ClickHouse/pull/25015) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fixed the behavior when query `SYSTEM RESTART REPLICA` or `SYSTEM SYNC REPLICA` does not finish. This was detected on server with extremely low amount of RAM. [#24457](https://github.com/ClickHouse/ClickHouse/pull/24457) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix bug which can lead to ZooKeeper client hung inside clickhouse-server. [#24721](https://github.com/ClickHouse/ClickHouse/pull/24721) ([alesapin](https://github.com/alesapin)). +* If ZooKeeper connection was lost and replica was cloned after restoring the connection, its replication queue might contain outdated entries. Fixed failed assertion when replication queue contains intersecting virtual parts. It may rarely happen if some data part was lost. Print error in log instead of terminating. [#24777](https://github.com/ClickHouse/ClickHouse/pull/24777) ([tavplubix](https://github.com/tavplubix)). +* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix bug which can lead to intersecting parts after merges with TTL: `Part all_40_40_0 is covered by all_40_40_1 but should be merged into all_40_41_1. This shouldn't happen often.`. [#25549](https://github.com/ClickHouse/ClickHouse/pull/25549) ([alesapin](https://github.com/alesapin)). * On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([tavplubix](https://github.com/tavplubix)). -* Use old modulo function version when used in partition key. Closes [#23508](https://github.com/ClickHouse/ClickHouse/issues/23508). [#24157](https://github.com/ClickHouse/ClickHouse/pull/24157) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix error `Key expression contains comparison between inconvertible types` for queries with `ARRAY JOIN` in case if array is used in primary key. Fixes [#8247](https://github.com/ClickHouse/ClickHouse/issues/8247). [#25546](https://github.com/ClickHouse/ClickHouse/pull/25546) ([Anton Popov](https://github.com/CurtizJ)). * Fix wrong totals for query `WITH TOTALS` and `WITH FILL`. Fixes [#20872](https://github.com/ClickHouse/ClickHouse/issues/20872). [#25539](https://github.com/ClickHouse/ClickHouse/pull/25539) ([Anton Popov](https://github.com/CurtizJ)). -* Fixed case, when sometimes conversion of postgres arrays resulted in String data type, not n-dimensional array, because `attndims` works incorrectly in some cases. Closes [#24804](https://github.com/ClickHouse/ClickHouse/issues/24804). [#25538](https://github.com/ClickHouse/ClickHouse/pull/25538) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix data race when querying `system.clusters` while reloading the cluster configuration at the same time. [#25737](https://github.com/ClickHouse/ClickHouse/pull/25737) ([Amos Bird](https://github.com/amosbird)). * Fixed `No such file or directory` error on moving `Distributed` table between databases. Fixes [#24971](https://github.com/ClickHouse/ClickHouse/issues/24971). [#25667](https://github.com/ClickHouse/ClickHouse/pull/25667) ([tavplubix](https://github.com/tavplubix)). * `REPLACE PARTITION` might be ignored in rare cases if the source partition was empty. It's fixed. Fixes [#24869](https://github.com/ClickHouse/ClickHouse/issues/24869). [#25665](https://github.com/ClickHouse/ClickHouse/pull/25665) ([tavplubix](https://github.com/tavplubix)). * Fixed a bug in `Replicated` database engine that might rarely cause some replica to skip enqueued DDL query. [#24805](https://github.com/ClickHouse/ClickHouse/pull/24805) ([tavplubix](https://github.com/tavplubix)). * Fix null pointer dereference in `EXPLAIN AST` without query. [#25631](https://github.com/ClickHouse/ClickHouse/pull/25631) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix potential crash when calculating aggregate function states by aggregation of aggregate function states of other aggregate functions (not a practical use case). See [#24523](https://github.com/ClickHouse/ClickHouse/issues/24523). [#25015](https://github.com/ClickHouse/ClickHouse/pull/25015) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix waiting of automatic dropping of empty parts. It could lead to full filling of background pool and stuck of replication. [#23315](https://github.com/ClickHouse/ClickHouse/pull/23315) ([Anton Popov](https://github.com/CurtizJ)). -* Fix restore S3 table. [#25601](https://github.com/ClickHouse/ClickHouse/pull/25601) ([ianton-ru](https://github.com/ianton-ru)). -* Fix segfault in `Arrow` format when using `Decimal256`. Add arrow `Decimal256` support. [#25531](https://github.com/ClickHouse/ClickHouse/pull/25531) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix restore of a table stored in S3 virtual filesystem (it is an experimental feature not ready for production). [#25601](https://github.com/ClickHouse/ClickHouse/pull/25601) ([ianton-ru](https://github.com/ianton-ru)). +* Fix nullptr dereference in `Arrow` format when using `Decimal256`. Add `Decimal256` support for `Arrow` format. [#25531](https://github.com/ClickHouse/ClickHouse/pull/25531) ([Kruglov Pavel](https://github.com/Avogar)). * Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)). -* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* A fix for `clickhouse-copier` tool: Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)). * Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Support `SimpleAggregateFunction(LowCardinality)` for `SummingMergeTree`. Fixes [#25134](https://github.com/ClickHouse/ClickHouse/issues/25134). [#25300](https://github.com/ClickHouse/ClickHouse/pull/25300) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix Logical Error Cannot sum Array/Tuple in min/maxMap. [#25298](https://github.com/ClickHouse/ClickHouse/pull/25298) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix logical error with exception message "Cannot sum Array/Tuple in min/maxMap". [#25298](https://github.com/ClickHouse/ClickHouse/pull/25298) ([Kruglov Pavel](https://github.com/Avogar)). * Fix error `Bad cast from type DB::ColumnLowCardinality to DB::ColumnVector` for queries where `LowCardinality` argument was used for IN (this bug appeared in 21.6). Fixes [#25187](https://github.com/ClickHouse/ClickHouse/issues/25187). [#25290](https://github.com/ClickHouse/ClickHouse/pull/25290) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix joinGetOrNull with not-nullable columns. This fixes [#24261](https://github.com/ClickHouse/ClickHouse/issues/24261). [#25288](https://github.com/ClickHouse/ClickHouse/pull/25288) ([Amos Bird](https://github.com/amosbird)). +* Fix incorrect behaviour of `joinGetOrNull` with not-nullable columns. This fixes [#24261](https://github.com/ClickHouse/ClickHouse/issues/24261). [#25288](https://github.com/ClickHouse/ClickHouse/pull/25288) ([Amos Bird](https://github.com/amosbird)). * Fix incorrect behaviour and UBSan report in big integers. In previous versions `CAST(1e19 AS UInt128)` returned zero. [#25279](https://github.com/ClickHouse/ClickHouse/pull/25279) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fixed an error which occurred while inserting a subset of columns using CSVWithNames format. Fixes [#25129](https://github.com/ClickHouse/ClickHouse/issues/25129). [#25169](https://github.com/ClickHouse/ClickHouse/pull/25169) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Do not use table's projection for `SELECT` with `FINAL`. It is not supported yet. [#25163](https://github.com/ClickHouse/ClickHouse/pull/25163) ([Amos Bird](https://github.com/amosbird)). * Fix possible parts loss after updating up to 21.5 in case table used `UUID` in partition key. (It is not recommended to use `UUID` in partition key). Fixes [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25127](https://github.com/ClickHouse/ClickHouse/pull/25127) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix crash in query with cross join and `joined_subquery_requires_alias = 0`. Fixes [#24011](https://github.com/ClickHouse/ClickHouse/issues/24011). [#25082](https://github.com/ClickHouse/ClickHouse/pull/25082) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). -* Fix bug with constant maps in mapContains that lead to error `empty column was returned by function mapContains`. Closes [#25077](https://github.com/ClickHouse/ClickHouse/issues/25077). [#25080](https://github.com/ClickHouse/ClickHouse/pull/25080) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix bug which allows creating tables with columns referencing themselves like `a UInt32 ALIAS a + 1` or `b UInt32 MATERIALIZED b`. Fixes [#24910](https://github.com/ClickHouse/ClickHouse/issues/24910), [#24292](https://github.com/ClickHouse/ClickHouse/issues/24292). [#25059](https://github.com/ClickHouse/ClickHouse/pull/25059) ([alesapin](https://github.com/alesapin)). -* Fix wrong result when using aggregate projection with **not empty** `GROUP BY` key to execute query with `GROUP BY` by **empty** key. [#25055](https://github.com/ClickHouse/ClickHouse/pull/25055) ([Amos Bird](https://github.com/amosbird)). -* Distinguish KILL MUTATION for different tables (fixes unexpected `Cancelled mutating parts` error). [#25025](https://github.com/ClickHouse/ClickHouse/pull/25025) ([Azat Khuzhin](https://github.com/azat)). +* Fix bug with constant maps in mapContains function that lead to error `empty column was returned by function mapContains`. Closes [#25077](https://github.com/ClickHouse/ClickHouse/issues/25077). [#25080](https://github.com/ClickHouse/ClickHouse/pull/25080) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove possibility to create tables with columns referencing themselves like `a UInt32 ALIAS a + 1` or `b UInt32 MATERIALIZED b`. Fixes [#24910](https://github.com/ClickHouse/ClickHouse/issues/24910), [#24292](https://github.com/ClickHouse/ClickHouse/issues/24292). [#25059](https://github.com/ClickHouse/ClickHouse/pull/25059) ([alesapin](https://github.com/alesapin)). +* Fix wrong result when using aggregate projection with *not empty* `GROUP BY` key to execute query with `GROUP BY` by *empty* key. [#25055](https://github.com/ClickHouse/ClickHouse/pull/25055) ([Amos Bird](https://github.com/amosbird)). * Fix serialization of splitted nested messages in Protobuf format. This PR fixes [#24647](https://github.com/ClickHouse/ClickHouse/issues/24647). [#25000](https://github.com/ClickHouse/ClickHouse/pull/25000) ([Vitaly Baranov](https://github.com/vitlibar)). * Fix limit/offset settings for distributed queries (ignore on the remote nodes). [#24940](https://github.com/ClickHouse/ClickHouse/pull/24940) ([Azat Khuzhin](https://github.com/azat)). -* Fix possible heap-buffer-overflow in Arrow. [#24922](https://github.com/ClickHouse/ClickHouse/pull/24922) ([Kruglov Pavel](https://github.com/Avogar)). -* Fixed bug with declaring S3 disk at root of bucket. [#24898](https://github.com/ClickHouse/ClickHouse/pull/24898) ([Vladimir Chebotarev](https://github.com/excitoon)). -* Fixed possible error 'Cannot read from istream at offset 0' when reading a file from DiskS3. [#24885](https://github.com/ClickHouse/ClickHouse/pull/24885) ([Pavel Kovalenko](https://github.com/Jokser)). +* Fix possible heap-buffer-overflow in `Arrow` format. [#24922](https://github.com/ClickHouse/ClickHouse/pull/24922) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed possible error 'Cannot read from istream at offset 0' when reading a file from DiskS3 (S3 virtual filesystem is an experimental feature under development that should not be used in production). [#24885](https://github.com/ClickHouse/ClickHouse/pull/24885) ([Pavel Kovalenko](https://github.com/Jokser)). * Fix "Missing columns" exception when joining Distributed Materialized View. [#24870](https://github.com/ClickHouse/ClickHouse/pull/24870) ([Azat Khuzhin](https://github.com/azat)). -* Allow NULL values in postgresql protocol. Closes [#22622](https://github.com/ClickHouse/ClickHouse/issues/22622). [#24857](https://github.com/ClickHouse/ClickHouse/pull/24857) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow `NULL` values in postgresql compatibility protocol. Closes [#22622](https://github.com/ClickHouse/ClickHouse/issues/22622). [#24857](https://github.com/ClickHouse/ClickHouse/pull/24857) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix bug when exception `Mutation was killed` can be thrown to the client on mutation wait when mutation not loaded into memory yet. [#24809](https://github.com/ClickHouse/ClickHouse/pull/24809) ([alesapin](https://github.com/alesapin)). * Fixed bug in deserialization of random generator state with might cause some data types such as `AggregateFunction(groupArraySample(N), T))` to behave in a non-deterministic way. [#24538](https://github.com/ClickHouse/ClickHouse/pull/24538) ([tavplubix](https://github.com/tavplubix)). -* Disallow building uniqXXXXStates of other aggregation states. [#24523](https://github.com/ClickHouse/ClickHouse/pull/24523) ([Raúl Marín](https://github.com/Algunenano)). -* Enable reading of subcolumns for distributed tables. [#24472](https://github.com/ClickHouse/ClickHouse/pull/24472) ([Anton Popov](https://github.com/CurtizJ)). +* Disallow building uniqXXXXStates of other aggregation states. [#24523](https://github.com/ClickHouse/ClickHouse/pull/24523) ([Raúl Marín](https://github.com/Algunenano)). Then allow it back by actually eliminating the root cause of the related issue. ([alexey-milovidov](https://github.com/alexey-milovidov)). * Fix usage of tuples in `CREATE .. AS SELECT` queries. [#24464](https://github.com/ClickHouse/ClickHouse/pull/24464) ([Anton Popov](https://github.com/CurtizJ)). -* Fixed the behavior when query `SYSTEM RESTART REPLICA` or `SYSTEM SYNC REPLICA` is being processed infinitely. This was detected on server with extremely little amount of RAM. [#24457](https://github.com/ClickHouse/ClickHouse/pull/24457) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Fix totalBytes computation in StorageBuffer. In current CH version total_writes.bytes counter decreases too much during the buffer flush. It leads to counter overflow and totalBytes return something around 17.44 EB some time after the flush. [#24450](https://github.com/ClickHouse/ClickHouse/pull/24450) ([DimasKovas](https://github.com/DimasKovas)). -* Fix incorrect monotonicity of toWeek function. This fixes [#24422](https://github.com/ClickHouse/ClickHouse/issues/24422) . This bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/5212 , and was exposed later by smarter partition pruner. [#24446](https://github.com/ClickHouse/ClickHouse/pull/24446) ([Amos Bird](https://github.com/amosbird)). -* Fixed the deadlock that can happen during LDAP role (re)mapping, when LDAP group is mapped to a nonexistent local role. [#24431](https://github.com/ClickHouse/ClickHouse/pull/24431) ([Denis Glazachev](https://github.com/traceon)). +* Fix computation of total bytes in `Buffer` table. In current ClickHouse version total_writes.bytes counter decreases too much during the buffer flush. It leads to counter overflow and totalBytes return something around 17.44 EB some time after the flush. [#24450](https://github.com/ClickHouse/ClickHouse/pull/24450) ([DimasKovas](https://github.com/DimasKovas)). +* Fix incorrect information about the monotonicity of toWeek function. This fixes [#24422](https://github.com/ClickHouse/ClickHouse/issues/24422) . This bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/5212 , and was exposed later by smarter partition pruner. [#24446](https://github.com/ClickHouse/ClickHouse/pull/24446) ([Amos Bird](https://github.com/amosbird)). +* When user authentication is managed by LDAP. Fixed potential deadlock that can happen during LDAP role (re)mapping, when LDAP group is mapped to a nonexistent local role. [#24431](https://github.com/ClickHouse/ClickHouse/pull/24431) ([Denis Glazachev](https://github.com/traceon)). * In "multipart/form-data" message consider the CRLF preceding a boundary as part of it. Fixes [#23905](https://github.com/ClickHouse/ClickHouse/issues/23905). [#24399](https://github.com/ClickHouse/ClickHouse/pull/24399) ([Ivan](https://github.com/abyss7)). * Fix drop partition with intersect fake parts. In rare cases there might be parts with mutation version greater than current block number. [#24321](https://github.com/ClickHouse/ClickHouse/pull/24321) ([Amos Bird](https://github.com/amosbird)). * Fixed a bug in moving Materialized View from Ordinary to Atomic database (`RENAME TABLE` query). Now inner table is moved to new database together with Materialized View. Fixes [#23926](https://github.com/ClickHouse/ClickHouse/issues/23926). [#24309](https://github.com/ClickHouse/ClickHouse/pull/24309) ([tavplubix](https://github.com/tavplubix)). * Allow empty HTTP headers. Fixes [#23901](https://github.com/ClickHouse/ClickHouse/issues/23901). [#24285](https://github.com/ClickHouse/ClickHouse/pull/24285) ([Ivan](https://github.com/abyss7)). -* Set `max_threads = 1` to fix mutation fail of StorageMemory. Closes [#24274](https://github.com/ClickHouse/ClickHouse/issues/24274). [#24275](https://github.com/ClickHouse/ClickHouse/pull/24275) ([flynn](https://github.com/ucasfl)). -* Column cardinality in join output same as at the input, close [#23351](https://github.com/ClickHouse/ClickHouse/issues/23351), close [#20315](https://github.com/ClickHouse/ClickHouse/issues/20315). [#24061](https://github.com/ClickHouse/ClickHouse/pull/24061) ([Vladimir](https://github.com/vdimir)). -* Fix the bug in failover behavior when Engine=Kafka was not able to start consumption if the same consumer had an empty assignment previously. Closes [#21118](https://github.com/ClickHouse/ClickHouse/issues/21118). [#21267](https://github.com/ClickHouse/ClickHouse/pull/21267) ([filimonov](https://github.com/filimonov)). -* Fix MySQL select user() return empty. Closes [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697). [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697) ([sundyli](https://github.com/sundy-li)). +* Correct processing of mutations (ALTER UPDATE/DELETE) in Memory tables. Closes [#24274](https://github.com/ClickHouse/ClickHouse/issues/24274). [#24275](https://github.com/ClickHouse/ClickHouse/pull/24275) ([flynn](https://github.com/ucasfl)). +* Make column LowCardinality property in JOIN output the same as in the input, close [#23351](https://github.com/ClickHouse/ClickHouse/issues/23351), close [#20315](https://github.com/ClickHouse/ClickHouse/issues/20315). [#24061](https://github.com/ClickHouse/ClickHouse/pull/24061) ([Vladimir](https://github.com/vdimir)). +* A fix for Kafka tables. Fix the bug in failover behavior when Engine = Kafka was not able to start consumption if the same consumer had an empty assignment previously. Closes [#21118](https://github.com/ClickHouse/ClickHouse/issues/21118). [#21267](https://github.com/ClickHouse/ClickHouse/pull/21267) ([filimonov](https://github.com/filimonov)). #### Build/Testing/Packaging Improvement -* Adds cross-platform embedding of binary resources into executables. [#25146](https://github.com/ClickHouse/ClickHouse/pull/25146) ([bnaecker](https://github.com/bnaecker)). -* Flatbuffers library updated to v.2.0.0. Improvements list https://github.com/google/flatbuffers/releases/tag/v2.0.0. [#25474](https://github.com/ClickHouse/ClickHouse/pull/25474) ([Ilya Yatsishin](https://github.com/qoega)). -* Add CI check for darwin-aarch64 cross-compilation. [#25560](https://github.com/ClickHouse/ClickHouse/pull/25560) ([Ivan](https://github.com/abyss7)). -* Ubuntu 20.04 is now used to run integration tests, docker-compose version used to run integration tests is updated to 1.28.2. Environment variables now take effect on docker-compose. Rework test_dictionaries_all_layouts_separate_sources to allow parallel run. [#20393](https://github.com/ClickHouse/ClickHouse/pull/20393) ([Ilya Yatsishin](https://github.com/qoega)). -* Add join related options to stress tests. [#25200](https://github.com/ClickHouse/ClickHouse/pull/25200) ([Vladimir](https://github.com/vdimir)). -* Enabling TestFlows RBAC tests. [#25498](https://github.com/ClickHouse/ClickHouse/pull/25498) ([vzakaznikov](https://github.com/vzakaznikov)). -* Increase LDAP verification cooldown performance tests timeout to 600 sec. [#25374](https://github.com/ClickHouse/ClickHouse/pull/25374) ([vzakaznikov](https://github.com/vzakaznikov)). -* Added rounding to mathematical and arithmetic function tests for consistent snapshot comparison. Cleaned up test names so they're more uniform. [#25297](https://github.com/ClickHouse/ClickHouse/pull/25297) ([MyroTk](https://github.com/MyroTk)). +* Add `darwin-aarch64` (Mac M1 / Apple Silicon) builds in CI [#25560](https://github.com/ClickHouse/ClickHouse/pull/25560) ([Ivan](https://github.com/abyss7)) and put the links to the docs and website ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Adds cross-platform embedding of binary resources into executables. It works on Illumos. [#25146](https://github.com/ClickHouse/ClickHouse/pull/25146) ([bnaecker](https://github.com/bnaecker)). +* Add join related options to stress tests to improve fuzzing. [#25200](https://github.com/ClickHouse/ClickHouse/pull/25200) ([Vladimir](https://github.com/vdimir)). * Enable build with s3 module in osx [#25217](https://github.com/ClickHouse/ClickHouse/issues/25217). [#25218](https://github.com/ClickHouse/ClickHouse/pull/25218) ([kevin wan](https://github.com/MaxWk)). -* Adding `leadInFrame` and `lagInFrame` window functions TestFlows tests. [#25144](https://github.com/ClickHouse/ClickHouse/pull/25144) ([vzakaznikov](https://github.com/vzakaznikov)). -* Fix using Yandex dockerhub registries for TestFlows. [#25133](https://github.com/ClickHouse/ClickHouse/pull/25133) ([vzakaznikov](https://github.com/vzakaznikov)). -* Disabling extended precision data types TestFlows tests. [#25125](https://github.com/ClickHouse/ClickHouse/pull/25125) ([vzakaznikov](https://github.com/vzakaznikov)). * Add integration test cases to cover JDBC bridge. [#25047](https://github.com/ClickHouse/ClickHouse/pull/25047) ([Zhichun Wu](https://github.com/zhicwu)). * Integration tests configuration has special treatment for dictionaries. Removed remaining dictionaries manual setup. [#24728](https://github.com/ClickHouse/ClickHouse/pull/24728) ([Ilya Yatsishin](https://github.com/qoega)). -* Adding support to save clickhouse server logs in TestFlows check. [#24504](https://github.com/ClickHouse/ClickHouse/pull/24504) ([vzakaznikov](https://github.com/vzakaznikov)). * Add libfuzzer tests for YAMLParser class. [#24480](https://github.com/ClickHouse/ClickHouse/pull/24480) ([BoloniniD](https://github.com/BoloniniD)). -* Testing for big ints using the following functions: * Arithmetic * Array, tuple, and map * Bit * Comparison * Conversion * Logical * Mathematical * Null * Rounding - Creating a table with columns that use the data types. [#24350](https://github.com/ClickHouse/ClickHouse/pull/24350) ([MyroTk](https://github.com/MyroTk)). +* Ubuntu 20.04 is now used to run integration tests, docker-compose version used to run integration tests is updated to 1.28.2. Environment variables now take effect on docker-compose. Rework test_dictionaries_all_layouts_separate_sources to allow parallel run. [#20393](https://github.com/ClickHouse/ClickHouse/pull/20393) ([Ilya Yatsishin](https://github.com/qoega)). * Fix TOCTOU error in installation script. [#25277](https://github.com/ClickHouse/ClickHouse/pull/25277) ([alexey-milovidov](https://github.com/alexey-milovidov)). -* Changed CSS theme to dark for better code highlighting. [#25682](https://github.com/ClickHouse/ClickHouse/pull/25682) ([Mike Kot](https://github.com/myrrc)). ### ClickHouse release 21.6, 2021-06-05 From 325e0764fe3e42560f8ccbed22c4d392f343cf28 Mon Sep 17 00:00:00 2001 From: ywill3 <87159180+ywill3@users.noreply.github.com> Date: Fri, 9 Jul 2021 10:05:27 +0800 Subject: [PATCH 882/931] Update bitmap-functions.md bitmapSubsetLimit example sql is wrong --- docs/zh/sql-reference/functions/bitmap-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/sql-reference/functions/bitmap-functions.md b/docs/zh/sql-reference/functions/bitmap-functions.md index 5a6baf2f217..57bf69cb5c3 100644 --- a/docs/zh/sql-reference/functions/bitmap-functions.md +++ b/docs/zh/sql-reference/functions/bitmap-functions.md @@ -81,7 +81,7 @@ SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11, **示例** ``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res ``` ┌─res───────────────────────┐ From 89c72da7f3a5ff03464a8be8e749d75507c50867 Mon Sep 17 00:00:00 2001 From: ywill3 <87159180+ywill3@users.noreply.github.com> Date: Fri, 9 Jul 2021 10:14:10 +0800 Subject: [PATCH 883/931] Update bitmap-functions.md may be inappropriate sub-title --- docs/zh/sql-reference/functions/bitmap-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/sql-reference/functions/bitmap-functions.md b/docs/zh/sql-reference/functions/bitmap-functions.md index 5a6baf2f217..ee3cb712f38 100644 --- a/docs/zh/sql-reference/functions/bitmap-functions.md +++ b/docs/zh/sql-reference/functions/bitmap-functions.md @@ -174,7 +174,7 @@ SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re │ [3] │ └─────┘ -## 位图 {#bitmapor} +## 位图或 {#bitmapor} 为两个位图对象进行或操作,返回一个新的位图对象。 From 9290d6f112a47e923616ce104d5e3295016d90ad Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Jul 2021 06:06:21 +0300 Subject: [PATCH 884/931] Fix UBSan report in pointInPolygon --- src/Functions/PolygonUtils.h | 10 +++++++++- .../0_stateless/01940_point_in_polygon_ubsan.reference | 1 + .../0_stateless/01940_point_in_polygon_ubsan.sql | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01940_point_in_polygon_ubsan.reference create mode 100644 tests/queries/0_stateless/01940_point_in_polygon_ubsan.sql diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index 3367b52cc36..ea91a187229 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -304,6 +305,13 @@ void PointInPolygonWithGrid::calcGridAttributes( y_scale = 1 / cell_height; x_shift = -min_corner.x(); y_shift = -min_corner.y(); + + if (!(isFinite(x_scale) + && isFinite(y_scale) + && isFinite(x_shift) + && isFinite(y_shift) + && isFinite(grid_size))) + throw Exception("Polygon is not valid: bounding box is unbounded", ErrorCodes::BAD_ARGUMENTS); } template @@ -358,7 +366,7 @@ bool PointInPolygonWithGrid::contains(CoordinateType x, Coordina if (has_empty_bound) return false; - if (std::isnan(x) || std::isnan(y)) + if (!isFinite(x) || !isFinite(y)) return false; CoordinateType float_row = (y + y_shift) * y_scale; diff --git a/tests/queries/0_stateless/01940_point_in_polygon_ubsan.reference b/tests/queries/0_stateless/01940_point_in_polygon_ubsan.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/01940_point_in_polygon_ubsan.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/01940_point_in_polygon_ubsan.sql b/tests/queries/0_stateless/01940_point_in_polygon_ubsan.sql new file mode 100644 index 00000000000..d011725691f --- /dev/null +++ b/tests/queries/0_stateless/01940_point_in_polygon_ubsan.sql @@ -0,0 +1,2 @@ +SET validate_polygons = 0; +SELECT pointInPolygon((-inf, 1023), [(10.000100135803223, 10000000000.), (inf, 0.9998999834060669), (1.1920928955078125e-7, 100.0000991821289), (1.000100016593933, 100.0000991821289)]); From 09c20d5d0854693fc556b127a7a4ff2456ac9cc2 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 9 Jul 2021 06:15:53 +0300 Subject: [PATCH 885/931] Update geoToH3.cpp --- src/Functions/geoToH3.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index 57973ab94fe..16f8de72eb0 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -84,14 +84,10 @@ public: coord.lng = degsToRads(lon); coord.lat = degsToRads(lat); - H3Index hindex; - H3Error err = latLngToCell(&coord, res, &hindex); - if (err) - { - throw Exception( - "Incorrect coordinates lat:" + std::to_string(coord.lat) + " lng:" + std::to_string(coord.lng) + " err:" + std::to_string(err), - ErrorCodes::INCORRECT_DATA); - } + H3Index hindex; + H3Error err = latLngToCell(&coord, res, &hindex); + if (err) + throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect coordinates latitude: {}, longitude: {}, error: {}", coord.lat, coord.lon, err); dst_data[row] = hindex; } From b264c3f192c16f48fd5ad095fd67b307eb4749cf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Jul 2021 06:26:35 +0300 Subject: [PATCH 886/931] Make graph pipeline rendering compatible with Dagre.JS --- src/Processors/printPipeline.cpp | 2 +- src/Processors/printPipeline.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/printPipeline.cpp b/src/Processors/printPipeline.cpp index 5cdab1ed3ff..cbf8cb3a77d 100644 --- a/src/Processors/printPipeline.cpp +++ b/src/Processors/printPipeline.cpp @@ -103,7 +103,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool out << "digraph\n{\n"; out << " rankdir=\"LR\";\n"; - out << " { node [shape = box]\n"; + out << " { node [shape = rect]\n"; /// Nodes // TODO quoting and escaping size_t next_step = 0; diff --git a/src/Processors/printPipeline.h b/src/Processors/printPipeline.h index 9497bc3cc3c..6ff5fb24c37 100644 --- a/src/Processors/printPipeline.h +++ b/src/Processors/printPipeline.h @@ -16,7 +16,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri { out << "digraph\n{\n"; out << " rankdir=\"LR\";\n"; - out << " { node [shape = box]\n"; + out << " { node [shape = rect]\n"; auto get_proc_id = [](const IProcessor & proc) -> UInt64 { From 0bad9453924dedade34329493e64e7703e06f1fb Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 9 Jul 2021 06:49:08 +0300 Subject: [PATCH 887/931] Update PolygonUtils.h --- src/Functions/PolygonUtils.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index ea91a187229..a050de2edb6 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -41,6 +41,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } From 95f8ca4e037acd7dacb1667edddb096f2ee28d99 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 9 Jul 2021 12:16:03 +0300 Subject: [PATCH 888/931] Do not use default impl for low cardinality for joinGet --- src/Functions/FunctionJoinGet.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionJoinGet.h b/src/Functions/FunctionJoinGet.h index 2250fa3ccf0..c701625e9cd 100644 --- a/src/Functions/FunctionJoinGet.h +++ b/src/Functions/FunctionJoinGet.h @@ -28,7 +28,7 @@ public: static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; bool useDefaultImplementationForNulls() const override { return false; } - bool useDefaultImplementationForLowCardinalityColumns() const override { return true; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override; From 43880af722d9a136ace02bf8b4e5f68b32ae3f42 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 9 Jul 2021 12:20:36 +0300 Subject: [PATCH 889/931] Add test to join_get_low_card_fix --- .../01735_join_get_low_card_fix.reference | 7 ++++++- .../0_stateless/01735_join_get_low_card_fix.sql | 15 ++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/01735_join_get_low_card_fix.reference b/tests/queries/0_stateless/01735_join_get_low_card_fix.reference index 0b20aead00e..a9e2f17562a 100644 --- a/tests/queries/0_stateless/01735_join_get_low_card_fix.reference +++ b/tests/queries/0_stateless/01735_join_get_low_card_fix.reference @@ -1 +1,6 @@ -yyy +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01735_join_get_low_card_fix.sql b/tests/queries/0_stateless/01735_join_get_low_card_fix.sql index bdc979bc11e..e2002112360 100644 --- a/tests/queries/0_stateless/01735_join_get_low_card_fix.sql +++ b/tests/queries/0_stateless/01735_join_get_low_card_fix.sql @@ -1,9 +1,14 @@ -drop table if exists join_tbl; +DROP TABLE IF EXISTS join_tbl; -create table join_tbl (`id` String, `name` String) engine Join(any, left, id); +CREATE TABLE join_tbl (`id` String, `name` String, lcname LowCardinality(String)) ENGINE = Join(any, left, id); -insert into join_tbl values ('xxx', 'yyy'); +INSERT INTO join_tbl VALUES ('xxx', 'yyy', 'yyy'); -select joinGet('join_tbl', 'name', toLowCardinality('xxx')); +SELECT joinGet('join_tbl', 'name', 'xxx') == 'yyy'; +SELECT joinGet('join_tbl', 'name', toLowCardinality('xxx')) == 'yyy'; +SELECT joinGet('join_tbl', 'name', toLowCardinality(materialize('xxx'))) == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', 'xxx') == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', toLowCardinality('xxx')) == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', toLowCardinality(materialize('xxx'))) == 'yyy'; -drop table if exists join_tbl; +DROP TABLE IF EXISTS join_tbl; From 0ec402ff64a4b3d02b3d03bbe056f0b05bc80a9b Mon Sep 17 00:00:00 2001 From: zxc111 Date: Fri, 9 Jul 2021 18:35:26 +0800 Subject: [PATCH 890/931] castColumn instead of execute toString function --- src/Functions/FunctionsCoding.h | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h index 4db138a12a2..00b09acea1f 100644 --- a/src/Functions/FunctionsCoding.h +++ b/src/Functions/FunctionsCoding.h @@ -19,13 +19,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include @@ -955,8 +955,6 @@ public: template class EncodeToBinaryRepr : public IFunction { -private: - ContextPtr context; public: static constexpr auto name = Impl::name; static constexpr size_t word_size = Impl::word_size; @@ -989,7 +987,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { const IColumn * column = arguments[0].column.get(); ColumnPtr res_column; @@ -997,10 +995,9 @@ public: WhichDataType which(column->getDataType()); if (which.isAggregateFunction()) { - auto to_string = FunctionFactory::instance().get("toString", context); - const ColumnPtr col = to_string->build(arguments)->execute(arguments, result_type, input_rows_count); - const auto * name_col = checkAndGetColumn(col.get()); - tryExecuteString(name_col, res_column); + const ColumnPtr to_string = castColumn(arguments[0], std::make_shared()); + const auto * str_column = checkAndGetColumn(to_string.get()); + tryExecuteString(str_column, res_column); return res_column; } From b2fb551bd4cf82d77c66935eb4b3ff4ed7ea77b4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 9 Jul 2021 16:00:50 +0300 Subject: [PATCH 891/931] Fix several bugs in ZooKeeper snapshots deserialization --- src/Common/ZooKeeper/ZooKeeperCommon.h | 10 ++-- src/Coordination/KeeperStorage.cpp | 55 +++++++++++++++----- src/Coordination/ZooKeeperDataReader.cpp | 65 +++++++++++++++++------- 3 files changed, 95 insertions(+), 35 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.h b/src/Common/ZooKeeper/ZooKeeperCommon.h index a816c1eb8bb..eb7f42f900a 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.h +++ b/src/Common/ZooKeeper/ZooKeeperCommon.h @@ -45,6 +45,8 @@ struct ZooKeeperRequest : virtual Request /// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not. bool probably_sent = false; + bool restored_from_zookeeper_log = false; + ZooKeeperRequest() = default; ZooKeeperRequest(const ZooKeeperRequest &) = default; virtual ~ZooKeeperRequest() override = default; @@ -172,6 +174,9 @@ struct ZooKeeperCloseResponse final : ZooKeeperResponse struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest { + /// used only during restore from zookeeper log + int32_t parent_cversion = -1; + ZooKeeperCreateRequest() = default; explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {} @@ -183,9 +188,6 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest bool isReadRequest() const override { return false; } size_t bytesSize() const override { return CreateRequest::bytesSize() + sizeof(xid) + sizeof(has_watch); } - - /// During recovery from log we don't rehash ACLs - bool need_to_hash_acls = true; }; struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse @@ -362,8 +364,6 @@ struct ZooKeeperSetACLRequest final : SetACLRequest, ZooKeeperRequest bool isReadRequest() const override { return false; } size_t bytesSize() const override { return SetACLRequest::bytesSize() + sizeof(xid); } - - bool need_to_hash_acls = true; }; struct ZooKeeperSetACLResponse final : SetACLResponse, ZooKeeperResponse diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 97c78e04f05..6a57dd63ff3 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -267,13 +267,12 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest } else { - auto & session_auth_ids = storage.session_and_auth[session_id]; KeeperStorage::Node created_node; Coordination::ACLs node_acls; - if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) + if (!fixupACL(request.acls, session_auth_ids, node_acls, !request.restored_from_zookeeper_log)) { response.error = Coordination::Error::ZINVALIDACL; return {response_ptr, {}}; @@ -307,16 +306,28 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest path_created += seq_num_str.str(); } + int32_t parent_cversion = request.parent_cversion; auto child_path = getBaseName(path_created); int64_t prev_parent_zxid; - container.updateValue(parent_path, [child_path, zxid, &prev_parent_zxid] (KeeperStorage::Node & parent) + int32_t prev_parent_cversion; + container.updateValue(parent_path, [child_path, zxid, &prev_parent_zxid, + parent_cversion, &prev_parent_cversion] (KeeperStorage::Node & parent) { + + parent.children.insert(child_path); + prev_parent_cversion = parent.stat.cversion; + prev_parent_zxid = parent.stat.pzxid; + /// Increment sequential number even if node is not sequential ++parent.seq_num; - parent.children.insert(child_path); - ++parent.stat.cversion; - prev_parent_zxid = parent.stat.pzxid; - parent.stat.pzxid = zxid; + + if (parent_cversion == -1) + ++parent.stat.cversion; + else if (parent_cversion > parent.stat.cversion) + parent.stat.cversion = parent_cversion; + + if (zxid > parent.stat.pzxid) + parent.stat.pzxid = zxid; ++parent.stat.numChildren; }); @@ -326,7 +337,7 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest if (request.is_ephemeral) ephemerals[session_id].emplace(path_created); - undo = [&storage, prev_parent_zxid, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id] + undo = [&storage, prev_parent_zxid, prev_parent_cversion, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path, child_path, acl_id] { storage.container.erase(path_created); storage.acl_map.removeUsage(acl_id); @@ -334,11 +345,11 @@ struct KeeperStorageCreateRequest final : public KeeperStorageRequest if (is_ephemeral) storage.ephemerals[session_id].erase(path_created); - storage.container.updateValue(parent_path, [child_path, prev_parent_zxid] (KeeperStorage::Node & undo_parent) + storage.container.updateValue(parent_path, [child_path, prev_parent_zxid, prev_parent_cversion] (KeeperStorage::Node & undo_parent) { - --undo_parent.stat.cversion; --undo_parent.stat.numChildren; --undo_parent.seq_num; + undo_parent.stat.cversion = prev_parent_cversion; undo_parent.stat.pzxid = prev_parent_zxid; undo_parent.children.erase(child_path); }); @@ -411,8 +422,23 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Delete, node_acls, session_auths); } + /// Garbage required to apply log to "fuzzy" zookeeper snapshot + void updateParentPzxid(const std::string & child_path, int64_t zxid, KeeperStorage::Container & container) const + { + auto parent_path = parentPath(child_path); + auto parent_it = container.find(parent_path); + if (parent_it != container.end()) + { + container.updateValue(parent_path, [zxid](KeeperStorage::Node & parent) + { + if (parent.stat.pzxid < zxid) + parent.stat.pzxid = zxid; + }); + } + } + using KeeperStorageRequest::KeeperStorageRequest; - std::pair process(KeeperStorage & storage, int64_t /*zxid*/, int64_t /*session_id*/) const override + std::pair process(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/) const override { auto & container = storage.container; auto & ephemerals = storage.ephemerals; @@ -425,6 +451,8 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest auto it = container.find(request.path); if (it == container.end()) { + if (request.restored_from_zookeeper_log) + updateParentPzxid(request.path, zxid, container); response.error = Coordination::Error::ZNONODE; } else if (request.version != -1 && request.version != it->value.stat.version) @@ -437,6 +465,9 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest } else { + if (request.restored_from_zookeeper_log) + updateParentPzxid(request.path, zxid, container); + auto prev_node = it->value; if (prev_node.stat.ephemeralOwner != 0) { @@ -719,7 +750,7 @@ struct KeeperStorageSetACLRequest final : public KeeperStorageRequest auto & session_auth_ids = storage.session_and_auth[session_id]; Coordination::ACLs node_acls; - if (!fixupACL(request.acls, session_auth_ids, node_acls, request.need_to_hash_acls)) + if (!fixupACL(request.acls, session_auth_ids, node_acls, !request.restored_from_zookeeper_log)) { response.error = Coordination::Error::ZINVALIDACL; return {response_ptr, {}}; diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 8bcce25cfee..9bc9ae71209 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -174,7 +174,22 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st LOG_INFO(log, "Deserializing data from snapshot"); int64_t zxid_from_nodes = deserializeStorageData(storage, reader, log); - storage.zxid = std::max(zxid, zxid_from_nodes); + /// In ZooKeeper Snapshots can contain inconsistent state of storage. They call + /// this inconsistent state "fuzzy". So it's guaranteed that snapshot contain all + /// records up to zxid from snapshot name and also some records for future. + /// But it doesn't mean that we have just some state of storage from future (like zxid + 100 log records). + /// We have incorrect state of storage where some random log entries from future were applied.... + /// + /// In ZooKeeper they say that their transactions log is idempotent and can be applied to "fuzzy" state as is. + /// It's true but there is no any general invariant which produces this property. They just have banch of ad-hoc "if" which detects + /// "fuzzy" state inconsistencies and apply log records in special way. Several examples: + /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L453-L463 + /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L476-L480 + /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L547-L549 + if (zxid_from_nodes > zxid) + LOG_WARNING(log, "ZooKeeper snapshot was in inconsistent (fuzzy) state. Will try to apply log."); + + storage.zxid = zxid; LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid); } @@ -210,16 +225,18 @@ void deserializeLogMagic(ReadBuffer & in) static constexpr int32_t LOG_HEADER = 1514884167; /// "ZKLG" if (magic_header != LOG_HEADER) - throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header); + throw Exception(ErrorCodes::CORRUPTED_DATA, "Incorrect magic header in file, expected {}, got {}", LOG_HEADER, magic_header); if (version != 2) - throw Exception(ErrorCodes::NOT_IMPLEMENTED,"Cannot deserialize ZooKeeper data other than version 2, got version {}", version); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot deserialize ZooKeeper data other than version 2, got version {}", version); } -/// For some reason zookeeper stores slightly different records in log then -/// requests. For example: -/// class CreateTxn { +/// ZooKeeper transactions log differs from requests. The main reason: to store records in log +/// in some "finalized" state (for example with concrete versions). +/// +/// Example: +/// class CreateTxn { /// ustring path; /// buffer data; /// vector acl; @@ -289,10 +306,9 @@ Coordination::ZooKeeperRequestPtr deserializeCreateTxn(ReadBuffer & in) Coordination::read(result->data, in); Coordination::read(result->acls, in); Coordination::read(result->is_ephemeral, in); - result->need_to_hash_acls = false; - /// How we should use it? It should just increment on request execution - int32_t parent_c_version; - Coordination::read(parent_c_version, in); + Coordination::read(result->parent_cversion, in); + + result->restored_from_zookeeper_log = true; return result; } @@ -300,6 +316,7 @@ Coordination::ZooKeeperRequestPtr deserializeDeleteTxn(ReadBuffer & in) { std::shared_ptr result = std::make_shared(); Coordination::read(result->path, in); + result->restored_from_zookeeper_log = true; return result; } @@ -309,6 +326,7 @@ Coordination::ZooKeeperRequestPtr deserializeSetTxn(ReadBuffer & in) Coordination::read(result->path, in); Coordination::read(result->data, in); Coordination::read(result->version, in); + result->restored_from_zookeeper_log = true; /// It stores version + 1 (which should be, not for request) result->version -= 1; @@ -320,6 +338,7 @@ Coordination::ZooKeeperRequestPtr deserializeCheckVersionTxn(ReadBuffer & in) std::shared_ptr result = std::make_shared(); Coordination::read(result->path, in); Coordination::read(result->version, in); + result->restored_from_zookeeper_log = true; return result; } @@ -329,14 +348,19 @@ Coordination::ZooKeeperRequestPtr deserializeCreateSession(ReadBuffer & in) int32_t timeout; Coordination::read(timeout, in); result->session_timeout_ms = timeout; + result->restored_from_zookeeper_log = true; return result; } -Coordination::ZooKeeperRequestPtr deserializeCloseSession(ReadBuffer & in) +Coordination::ZooKeeperRequestPtr deserializeCloseSession(ReadBuffer & in, bool empty) { std::shared_ptr result = std::make_shared(); - std::vector data; - Coordination::read(data, in); + if (!empty) + { + std::vector data; + Coordination::read(data, in); + } + result->restored_from_zookeeper_log = true; return result; } @@ -356,14 +380,14 @@ Coordination::ZooKeeperRequestPtr deserializeSetACLTxn(ReadBuffer & in) Coordination::read(result->version, in); /// It stores version + 1 (which should be, not for request) result->version -= 1; - result->need_to_hash_acls = false; + result->restored_from_zookeeper_log = true; return result; } Coordination::ZooKeeperRequestPtr deserializeMultiTxn(ReadBuffer & in); -Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn) +Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtxn, int64_t txn_length = 0) { int32_t type; Coordination::read(type, in); @@ -372,6 +396,11 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx if (subtxn) Coordination::read(sub_txn_length, in); + bool empty_txn = !subtxn && txn_length == 32; /// Possible for old-style CloseTxn's + + if (empty_txn && type != -11) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Empty non close-session transaction found"); + int64_t in_count_before = in.count(); switch (type) @@ -398,7 +427,7 @@ Coordination::ZooKeeperRequestPtr deserializeTxnImpl(ReadBuffer & in, bool subtx result = deserializeCreateSession(in); break; case -11: - result = deserializeCloseSession(in); + result = deserializeCloseSession(in, empty_txn); break; case -1: result = deserializeErrorTxn(in); @@ -442,7 +471,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) if (request == nullptr) return true; - for (const auto & subrequest : dynamic_cast(request.get())->requests) //-V522 + for (const auto & subrequest : dynamic_cast(request.get())->requests) // -V522 if (subrequest == nullptr) return true; return false; @@ -470,7 +499,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l int64_t time; Coordination::read(time, in); - Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false); + Coordination::ZooKeeperRequestPtr request = deserializeTxnImpl(in, false, txn_len); /// Skip all other bytes int64_t bytes_read = in.count() - count_before; From 60b22aaac8cdf1010ea542d07c52f8435af64c14 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 9 Jul 2021 16:03:23 +0300 Subject: [PATCH 892/931] Better message --- src/Coordination/ZooKeeperDataReader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 9bc9ae71209..bb2eb550ddf 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -187,7 +187,7 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L476-L480 /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L547-L549 if (zxid_from_nodes > zxid) - LOG_WARNING(log, "ZooKeeper snapshot was in inconsistent (fuzzy) state. Will try to apply log."); + LOG_WARNING(log, "ZooKeeper snapshot was in inconsistent (fuzzy) state. Will try to apply log. ZooKeeper create non fuzzy snapshot with restart. You can just restart ZooKeeper server and get consistent version."); storage.zxid = zxid; From 947bb4a9421cf8932b66c2126423a855f7abfea1 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Jul 2021 16:43:00 +0300 Subject: [PATCH 893/931] Fix progress bar for local --- programs/local/LocalServer.cpp | 18 +++++++++++++----- src/Common/ProgressIndication.cpp | 3 --- src/Interpreters/executeQuery.cpp | 5 ++++- src/Interpreters/executeQuery.h | 3 ++- src/Processors/Formats/IOutputFormat.cpp | 4 +++- src/Processors/Formats/IOutputFormat.h | 6 +++++- 6 files changed, 27 insertions(+), 12 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2633f0e9426..6be7ba1ad73 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -388,24 +388,32 @@ void LocalServer::processQueries() /// Use the same query_id (and thread group) for all queries CurrentThread::QueryScope query_scope_holder(context); - ///Set progress show + /// Set progress show need_render_progress = config().getBool("progress", false); + std::function finalize_progress; if (need_render_progress) { + /// Set progress callback, which can be run from multiple threads. context->setProgressCallback([&](const Progress & value) { /// Write progress only if progress was updated if (progress_indication.updateProgress(value)) progress_indication.writeProgress(); }); + + /// Set finalizing callback for progress, which is called right before finalizing query output. + finalize_progress = [&]() + { + progress_indication.clearProgressOutput(); + }; + + /// Set callback for file processing progress. + progress_indication.setFileProgressCallback(context); } bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); - if (need_render_progress) - progress_indication.setFileProgressCallback(context); - std::exception_ptr exception; for (const auto & query : queries) @@ -425,7 +433,7 @@ void LocalServer::processQueries() try { - executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}); + executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}, finalize_progress); } catch (...) { diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index e1a7c420c54..0d65eaece86 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -4,9 +4,6 @@ #include #include -/// FIXME: progress bar in clickhouse-local needs to be cleared after query execution -/// - same as it is now in clickhouse-client. Also there is no writeFinalProgress call -/// in clickhouse-local. namespace DB { diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 5b55754f00a..99c08c70b7c 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -948,7 +948,8 @@ void executeQuery( WriteBuffer & ostr, bool allow_into_outfile, ContextMutablePtr context, - std::function set_result_details) + std::function set_result_details, + std::function before_finalize_callback) { PODArray parse_buf; const char * begin; @@ -1079,6 +1080,8 @@ void executeQuery( out->onProgress(progress); }); + out->setBeforeFinalizeCallback(before_finalize_callback); + if (set_result_details) set_result_details( context->getClientInfo().current_query_id, out->getContentType(), format_name, DateLUT::instance().getTimeZone()); diff --git a/src/Interpreters/executeQuery.h b/src/Interpreters/executeQuery.h index 6448b26a652..77f142de121 100644 --- a/src/Interpreters/executeQuery.h +++ b/src/Interpreters/executeQuery.h @@ -17,7 +17,8 @@ void executeQuery( WriteBuffer & ostr, /// Where to write query output to. bool allow_into_outfile, /// If true and the query contains INTO OUTFILE section, redirect output to that file. ContextMutablePtr context, /// DB, tables, data types, storage engines, functions, aggregate functions... - std::function set_result_details /// If a non-empty callback is passed, it will be called with the query id, the content-type, the format, and the timezone. + std::function set_result_details, /// If a non-empty callback is passed, it will be called with the query id, the content-type, the format, and the timezone. + std::function before_finalize_callback = {} /// Will be set in output format to be called before finalize. ); diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 88649d9ca25..7d82c267f36 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -76,6 +76,9 @@ void IOutputFormat::work() if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedLimit()) setRowsBeforeLimit(rows_before_limit_counter->get()); + if (before_finalize_callback) + before_finalize_callback(); + finalize(); finalized = true; return; @@ -117,4 +120,3 @@ void IOutputFormat::write(const Block & block) } } - diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index 4c2b3f30070..4d86d18f70e 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -67,6 +67,9 @@ public: /// Passed value are delta, that must be summarized. virtual void onProgress(const Progress & /*progress*/) {} + /// Set callback, which will be called before call to finalize(). + void setBeforeFinalizeCallback(std::function callback) { before_finalize_callback = callback; } + /// Content-Type to set when sending HTTP response. virtual std::string getContentType() const { return "text/plain; charset=UTF-8"; } @@ -91,6 +94,7 @@ private: size_t result_bytes = 0; bool prefix_written = false; + + std::function before_finalize_callback; }; } - From 03c4853451664c07ecb1e88adcd75038ab5f16be Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 9 Jul 2021 17:20:02 +0300 Subject: [PATCH 894/931] Functions dictGet, dictHas complex key dictionary key argument tuple fix --- .../functions/ext-dict-functions.md | 22 ++--- src/Functions/FunctionsExternalDictionaries.h | 94 +++++++++++-------- ..._dict_get_has_complex_single_key.reference | 10 ++ .../01941_dict_get_has_complex_single_key.sql | 26 +++++ 4 files changed, 103 insertions(+), 49 deletions(-) create mode 100644 tests/queries/0_stateless/01941_dict_get_has_complex_single_key.reference create mode 100644 tests/queries/0_stateless/01941_dict_get_has_complex_single_key.sql diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 7c0fe11ae64..d7f142dd8b1 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -12,7 +12,7 @@ For information on connecting and configuring external dictionaries, see [Extern ## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} -Retrieves values from an external dictionary. +Retrieves values from an external dictionary. ``` sql dictGet('dict_name', attr_names, id_expr) @@ -24,7 +24,7 @@ dictGetOrNull('dict_name', attr_name, id_expr) - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)). -- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. +- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning dictionary key-type value or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. - `default_value_expr` — Values returned if the dictionary does not contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) or [Tuple](../../sql-reference/data-types/tuple.md)([Expression](../../sql-reference/syntax.md#syntax-expressions)), returning the value (or values) in the data types configured for the `attr_names` attribute. **Returned value** @@ -138,7 +138,7 @@ Configure the external dictionary: c2 String - + 0 @@ -237,7 +237,7 @@ dictHas('dict_name', id_expr) **Arguments** - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. +- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning dictionary key-type value or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. **Returned value** @@ -292,16 +292,16 @@ Type: `UInt8`. Returns first-level children as an array of indexes. It is the inverse transformation for [dictGetHierarchy](#dictgethierarchy). -**Syntax** +**Syntax** ``` sql dictGetChildren(dict_name, key) ``` -**Arguments** +**Arguments** -- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). -- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. +- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. **Returned values** @@ -339,7 +339,7 @@ SELECT dictGetChildren('hierarchy_flat_dictionary', number) FROM system.numbers ## dictGetDescendant {#dictgetdescendant} -Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively. +Returns all descendants as if [dictGetChildren](#dictgetchildren) function was applied `level` times recursively. **Syntax** @@ -347,9 +347,9 @@ Returns all descendants as if [dictGetChildren](#dictgetchildren) function was a dictGetDescendants(dict_name, key, level) ``` -**Arguments** +**Arguments** -- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). - `key` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md)-type value. - `level` — Hierarchy level. If `level = 0` returns all descendants to the end. [UInt8](../../sql-reference/data-types/int-uint.md). diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index 381401be2c5..118855b4bf8 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -163,13 +163,6 @@ public: arguments[0]->getName(), getName()); - if (!WhichDataType(arguments[1]).isUInt64() && - !isTuple(arguments[1])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of second argument of function {} must be UInt64 or tuple(...)", - arguments[1]->getName(), - getName()); - return std::make_shared(); } @@ -189,8 +182,8 @@ public: auto dictionary_key_type = dictionary->getKeyType(); const ColumnWithTypeAndName & key_column_with_type = arguments[1]; - const auto key_column = key_column_with_type.column; - const auto key_column_type = WhichDataType(key_column_with_type.type); + auto key_column = key_column_with_type.column; + auto key_column_type = key_column_with_type.type; ColumnPtr range_col = nullptr; DataTypePtr range_col_type = nullptr; @@ -214,7 +207,7 @@ public: if (dictionary_key_type == DictionaryKeyType::simple) { - if (!key_column_type.isUInt64()) + if (!WhichDataType(key_column_type).isUInt64()) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument of function {} must be UInt64 when dictionary is simple. Actual type {}.", @@ -225,24 +218,39 @@ public: } else if (dictionary_key_type == DictionaryKeyType::complex) { - if (!key_column_type.isTuple()) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Second argument of function {} must be tuple when dictionary is complex. Actual type {}.", - getName(), - key_column_with_type.type->getName()); - /// Functions in external dictionaries_loader only support full-value (not constant) columns with keys. - ColumnPtr key_column_full = key_column->convertToFullColumnIfConst(); + key_column = key_column->convertToFullColumnIfConst(); + size_t keys_size = dictionary->getStructure().getKeysSize(); - const auto & key_columns = typeid_cast(*key_column_full).getColumnsCopy(); - const auto & key_types = static_cast(*key_column_with_type.type).getElements(); + if (!isTuple(key_column_type)) + { + if (keys_size > 1) + { + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Third argument of function {} must be tuple when dictionary is complex and key contains more than 1 attribute." + "Actual type {}.", + getName(), + key_column_type->getName()); + } + else + { + Columns tuple_columns = {std::move(key_column)}; + key_column = ColumnTuple::create(tuple_columns); + + DataTypes tuple_types = {key_column_type}; + key_column_type = std::make_shared(tuple_types); + } + } + + const auto & key_columns = assert_cast(*key_column).getColumnsCopy(); + const auto & key_types = assert_cast(*key_column_type).getElements(); return dictionary->hasKeys(key_columns, key_types); } else { - if (!key_column_type.isUInt64()) + if (!WhichDataType(key_column_type).isUInt64()) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument of function {} must be UInt64 when dictionary is range. Actual type {}.", @@ -346,13 +354,6 @@ public: Strings attribute_names = getAttributeNamesFromColumn(arguments[1].column, arguments[1].type); auto dictionary = helper.getDictionary(dictionary_name); - - if (!WhichDataType(arguments[2].type).isUInt64() && !isTuple(arguments[2].type)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of third argument of function {}, must be UInt64 or tuple(...).", - arguments[2].type->getName(), - getName()); - auto dictionary_key_type = dictionary->getKeyType(); size_t current_arguments_index = 3; @@ -446,18 +447,35 @@ public: } else if (dictionary_key_type == DictionaryKeyType::complex) { - if (!isTuple(key_col_with_type.type)) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Third argument of function {} must be tuple when dictionary is complex. Actual type {}.", - getName(), - key_col_with_type.type->getName()); - /// Functions in external dictionaries_loader only support full-value (not constant) columns with keys. - ColumnPtr key_column_full = key_col_with_type.column->convertToFullColumnIfConst(); + ColumnPtr key_column = key_col_with_type.column->convertToFullColumnIfConst(); + DataTypePtr key_column_type = key_col_with_type.type; - const auto & key_columns = typeid_cast(*key_column_full).getColumnsCopy(); - const auto & key_types = static_cast(*key_col_with_type.type).getElements(); + size_t keys_size = dictionary->getStructure().getKeysSize(); + + if (!isTuple(key_column_type)) + { + if (keys_size > 1) + { + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Third argument of function {} must be tuple when dictionary is complex and key contains more than 1 attribute." + "Actual type {}.", + getName(), + key_col_with_type.type->getName()); + } + else + { + Columns tuple_columns = {std::move(key_column)}; + key_column = ColumnTuple::create(tuple_columns); + + DataTypes tuple_types = {key_column_type}; + key_column_type = std::make_shared(tuple_types); + } + } + + const auto & key_columns = assert_cast(*key_column).getColumnsCopy(); + const auto & key_types = assert_cast(*key_column_type).getElements(); result = executeDictionaryRequest( dictionary, diff --git a/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.reference b/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.reference new file mode 100644 index 00000000000..c7e9cb788cb --- /dev/null +++ b/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.reference @@ -0,0 +1,10 @@ +dictGet +Value +Value +Value +Value +dictHas +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.sql b/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.sql new file mode 100644 index 00000000000..a44107d6882 --- /dev/null +++ b/tests/queries/0_stateless/01941_dict_get_has_complex_single_key.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS test_dictionary_source; +CREATE TABLE test_dictionary_source (key String, value String) ENGINE=TinyLog; + +INSERT INTO test_dictionary_source VALUES ('Key', 'Value'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary(key String, value String) +PRIMARY KEY key +LAYOUT(COMPLEX_KEY_HASHED()) +SOURCE(CLICKHOUSE(TABLE 'test_dictionary_source')) +LIFETIME(0); + +SELECT 'dictGet'; +SELECT dictGet('test_dictionary', 'value', tuple('Key')); +SELECT dictGet('test_dictionary', 'value', tuple(materialize('Key'))); +SELECT dictGet('test_dictionary', 'value', 'Key'); +SELECT dictGet('test_dictionary', 'value', materialize('Key')); + +SELECT 'dictHas'; +SELECT dictHas('test_dictionary', tuple('Key')); +SELECT dictHas('test_dictionary', tuple(materialize('Key'))); +SELECT dictHas('test_dictionary', 'Key'); +SELECT dictHas('test_dictionary', materialize('Key')); + +DROP DICTIONARY test_dictionary; +DROP TABLE test_dictionary_source; From 19a83b75b751395ace20eea49f9d605f62721cb5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 9 Jul 2021 18:20:45 +0300 Subject: [PATCH 895/931] Update version_date.tsv after release 21.7.2.7 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 541dea23698..3b12363712a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v21.7.2.7-stable 2021-07-09 v21.6.6.51-stable 2021-07-02 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 From 2fc16dd69280532b22b0347ff4987aa78ccbc43e Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 9 Jul 2021 18:53:32 +0300 Subject: [PATCH 896/931] Add minus sign in prometheus metric name in test --- tests/integration/test_prometheus_endpoint/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_prometheus_endpoint/test.py b/tests/integration/test_prometheus_endpoint/test.py index 06276803c3d..c2b5a57218b 100644 --- a/tests/integration/test_prometheus_endpoint/test.py +++ b/tests/integration/test_prometheus_endpoint/test.py @@ -30,7 +30,7 @@ def parse_response_line(line): if line.startswith("#"): return {} - match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? (\d)', line) + match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:-]+)(\{.*\})? (\d)', line) assert match, line name, _, val = match.groups() return {name: int(val)} From c01f4588b4647068a085cf58aab31d1f57785fa2 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 9 Jul 2021 19:12:26 +0300 Subject: [PATCH 897/931] Validate prometheus metric name with regex --- src/Server/PrometheusMetricsWriter.cpp | 22 +++++++++++++------ .../test_prometheus_endpoint/test.py | 2 +- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/Server/PrometheusMetricsWriter.cpp b/src/Server/PrometheusMetricsWriter.cpp index 787f0fcd95e..30ae6f6fe42 100644 --- a/src/Server/PrometheusMetricsWriter.cpp +++ b/src/Server/PrometheusMetricsWriter.cpp @@ -4,7 +4,7 @@ #include #include -#include +#include namespace { @@ -24,9 +24,13 @@ void writeOutLine(DB::WriteBuffer & wb, T && val, TArgs &&... args) writeOutLine(wb, std::forward(args)...); } -void replaceInvalidChars(std::string & metric_name) +/// Returns false if name is not valid +bool replaceInvalidChars(std::string & metric_name) { - std::replace(metric_name.begin(), metric_name.end(), '.', '_'); + /// dirty solution + metric_name = std::regex_replace(metric_name, std::regex("[^a-zA-Z0-9_:]"), "_"); + metric_name = std::regex_replace(metric_name, std::regex("^[^a-zA-Z]*"), ""); + return !metric_name.empty(); } } @@ -57,7 +61,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{ProfileEvents::getName(static_cast(i))}; std::string metric_doc{ProfileEvents::getDocumentation(static_cast(i))}; - replaceInvalidChars(metric_name); + if (!replaceInvalidChars(metric_name)) + continue; std::string key{profile_events_prefix + metric_name}; writeOutLine(wb, "# HELP", key, metric_doc); @@ -75,7 +80,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{CurrentMetrics::getName(static_cast(i))}; std::string metric_doc{CurrentMetrics::getDocumentation(static_cast(i))}; - replaceInvalidChars(metric_name); + if (!replaceInvalidChars(metric_name)) + continue; std::string key{current_metrics_prefix + metric_name}; writeOutLine(wb, "# HELP", key, metric_doc); @@ -91,7 +97,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const { std::string key{asynchronous_metrics_prefix + name_value.first}; - replaceInvalidChars(key); + if (!replaceInvalidChars(key)) + continue; auto value = name_value.second; // TODO: add HELP section? asynchronous_metrics contains only key and value @@ -108,7 +115,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{CurrentStatusInfo::getName(static_cast(i))}; std::string metric_doc{CurrentStatusInfo::getDocumentation(static_cast(i))}; - replaceInvalidChars(metric_name); + if (!replaceInvalidChars(metric_name)) + continue; std::string key{current_status_prefix + metric_name}; writeOutLine(wb, "# HELP", key, metric_doc); diff --git a/tests/integration/test_prometheus_endpoint/test.py b/tests/integration/test_prometheus_endpoint/test.py index c2b5a57218b..06276803c3d 100644 --- a/tests/integration/test_prometheus_endpoint/test.py +++ b/tests/integration/test_prometheus_endpoint/test.py @@ -30,7 +30,7 @@ def parse_response_line(line): if line.startswith("#"): return {} - match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:-]+)(\{.*\})? (\d)', line) + match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? (\d)', line) assert match, line name, _, val = match.groups() return {name: int(val)} From b7cc1904ccd6856e14be8aafcb0d02ce2202047a Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 9 Jul 2021 21:04:31 +0300 Subject: [PATCH 898/931] Add comments to the implementations of the pad functions --- src/Functions/padString.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/Functions/padString.cpp b/src/Functions/padString.cpp index 7711ab1a056..c03733a1198 100644 --- a/src/Functions/padString.cpp +++ b/src/Functions/padString.cpp @@ -89,6 +89,9 @@ namespace } /// Not necessary, but good for performance. + /// We repeat `pad_string` multiple times until it's length becomes 16 or more. + /// It speeds up the function appendTo() because it allows to copy padding characters by portions of at least + /// 16 bytes instead of single bytes. while (numCharsInPadString() < 16) { pad_string += pad_string; @@ -104,6 +107,12 @@ namespace } String pad_string; + + /// Offsets of code points in `pad_string`: + /// utf8_offsets[0] is the offset of the first code point in `pad_string`, it's always 0; + /// utf8_offsets[1] is the offset of the second code point in `pad_string`; + /// utf8_offsets[2] is the offset of the third code point in `pad_string`; + /// ... std::vector utf8_offsets; }; @@ -243,30 +252,32 @@ namespace const PaddingChars & padding_chars, StringSink & res_sink) const { - bool is_const_length = lengths.isConst(); - bool need_check_length = true; + bool is_const_new_length = lengths.isConst(); + size_t new_length = 0; + /// Insert padding characters to each string from `strings`, write the result strings into `res_sink`. + /// If for some input string its current length is greater than the specified new length then that string + /// will be trimmed to the specified new length instead of padding. for (; !res_sink.isEnd(); res_sink.next(), strings.next(), lengths.next()) { auto str = strings.getWhole(); size_t current_length = getLengthOfSlice(str); - auto new_length_slice = lengths.getWhole(); - size_t new_length = new_length_slice.elements->getUInt(new_length_slice.position); - - if (need_check_length) + if (!res_sink.rowNum() || !is_const_new_length) { + /// If `is_const_new_length` is true we can get and check the new length only once. + auto new_length_slice = lengths.getWhole(); + new_length = new_length_slice.elements->getUInt(new_length_slice.position); if (new_length > MAX_NEW_LENGTH) { throw Exception( "New padded length (" + std::to_string(new_length) + ") is too big, maximum is: " + std::to_string(MAX_NEW_LENGTH), ErrorCodes::TOO_LARGE_STRING_SIZE); } - if (is_const_length) + if (is_const_new_length) { size_t rows_count = res_sink.offsets.size(); res_sink.reserve((new_length + 1 /* zero terminator */) * rows_count); - need_check_length = false; } } From 53f5c63e2cd991658b52da34bdecf5aef4a6719e Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 9 Jul 2021 22:16:57 +0300 Subject: [PATCH 899/931] Update geoToH3.cpp --- src/Functions/geoToH3.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index 16f8de72eb0..d269f9a3a24 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -87,7 +87,7 @@ public: H3Index hindex; H3Error err = latLngToCell(&coord, res, &hindex); if (err) - throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect coordinates latitude: {}, longitude: {}, error: {}", coord.lat, coord.lon, err); + throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect coordinates latitude: {}, longitude: {}, error: {}", coord.lat, coord.lng, err); dst_data[row] = hindex; } From 623b368a707cf17713cf38079161439b9b0c7a6e Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 9 Jul 2021 22:58:21 +0300 Subject: [PATCH 900/931] Add draft for clickhouse-keeper docs --- docs/en/operations/clickhouse-keeper.md | 114 ++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 docs/en/operations/clickhouse-keeper.md diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md new file mode 100644 index 00000000000..3dec4c74088 --- /dev/null +++ b/docs/en/operations/clickhouse-keeper.md @@ -0,0 +1,114 @@ +--- +toc_priority: 66 +toc_title: ClickHouse Keeper +--- + +# [pre-production] clickhouse-keeper + +ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../../engines/table-engines/mergetree-family/replication/) and [distributed DDL](../../sql-reference/distributed-ddl/) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper. + +!!! warning "Warning" + This feature currently in pre-production stage. We test it in our CI and on small internal installations. + +## Implemetation details + +ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages. + +By default, `clickhouse-keeper` provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with `clickhouse-keeper`. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to `clickhouse-keeper` snapshot. Interserver protocol in `clickhouse-keeper` also incompatible with ZooKeeper so mixed ZooKeeper/clickhouse-keeper cluster is impossible. + +## Configuration + +`clickhouse-keeper` can be used as a standalone replacement for ZooKeeper or as an internal part of the `clickhouse-server`, but in both cases configuration is almost the same `.xml` file. The main `clickhouse-keeper` configuration tag is ``. Keeper configuration has the following parameters: + +- `tcp_port` — the port for a client to connect (default for ZooKeeper is `2181`) +- `tcp_port_secure` — the secure port for a client to connect +- `server_id` — unique server id, each participant of the clickhouse-keeper cluster must have a unique number (1, 2, 3, and so on) +- `log_storage_path` — path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper) +- `snapshot_storage_path` — path to coordination snapshots + +Other common parameters are inherited from clickhouse-server config (`listen_host`, `logger` and so on). + +Internal coordination settings are located in `.` section: + +- `operation_timeout_ms` — timeout for a single client operation +- `session_timeout_ms` — timeout for client session +- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them +- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers +- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election +- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election +- `rotate_log_storage_interval` — how many logs to store in a single file +- `reserved_log_items` — how many coordination logs to store before compaction +- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of logs) +- `snapshots_to_keep` — how many snapshots to keep +- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs +- `force_sync` — call `fsync` on each write to coordination log +- `raft_logs_level` — text logging level about coordination (trace, debug, and so on) +- `shutdown_timeout` — wait to finish internal connections and shutdown +- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate + +Quorum configuration is located in `.` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `` are: + +- `id` — server_id in quorum +- `hostname` — hostname where this server placed +- `port` — port where this server listen for connections + + +Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1: + +```xml + + 2181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 10000 + 30000 + trace + + + + + 1 + zoo1 + 9444 + + + 2 + zoo2 + 9444 + + + 3 + zoo3 + 9444 + + + +``` + +## How to run + +`clickhouse-keeper` is bundled into `clickhouse-server` package, just add configuration of `` and start clickhouse-server as always. If you want to run standalone `clickhouse-keeper` you can start it in a similar way with: + +```bash +clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon +``` + +## [experimental] Migration from ZooKeeper + +Seamlessly migration from ZooKeeper to `clickhouse-keeper` is impossible you have to stop your ZooKeeper cluster, convert data and start `clickhouse-keeper`. `clickhouse-keeper-converter` tool allows to convert ZooKeeper logs and snapshots to `clickhouse-keeper` snapshot. It works only with ZooKeeper > 3.4. Steps for migration: + +1. Stop all ZooKeeper nodes. + +2. [optional, but recommended] Found ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create consistent snapshot. + +3. Run `clickhouse-keeper-converter` on leader, example + +```bash +clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots +``` + +4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist only on leader node, leader will sync it automatically to other nodes. + From 3588c5ad1722e378f07a8d9c03e191dbf7ba8311 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 9 Jul 2021 23:32:14 +0300 Subject: [PATCH 901/931] add color for double colon --- programs/client/Client.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c4aef014971..9c1c8338321 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -430,6 +430,7 @@ private: {TokenType::ClosingRoundBracket, Replxx::Color::BROWN}, {TokenType::OpeningSquareBracket, Replxx::Color::BROWN}, {TokenType::ClosingSquareBracket, Replxx::Color::BROWN}, + {TokenType::DoubleColon, Replxx::Color::BROWN}, {TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE}, {TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE}, From 56c04c604e72d7b9ce3a8e7c020d08ac5c0bb830 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 9 Jul 2021 23:44:57 +0300 Subject: [PATCH 902/931] Remove misleading stderr --- src/Common/Config/configReadClient.cpp | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/Common/Config/configReadClient.cpp b/src/Common/Config/configReadClient.cpp index cbe5b3f7bc2..e7bc0b72814 100644 --- a/src/Common/Config/configReadClient.cpp +++ b/src/Common/Config/configReadClient.cpp @@ -10,16 +10,10 @@ namespace fs = std::filesystem; namespace DB { -/// Checks if file exists without throwing an exception but with message in console. -bool safeFsExists(const auto & path) +bool safeFsExists(const String & path) { std::error_code ec; - bool res = fs::exists(path, ec); - if (ec) - { - std::cerr << "Can't check '" << path << "': [" << ec.value() << "] " << ec.message() << std::endl; - } - return res; + return fs::exists(path, ec); }; bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::string & home_path) From dc81ae7c6ee83dc7717b5b0615e6c2087a984647 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 10 Jul 2021 01:51:34 +0300 Subject: [PATCH 903/931] Compile AggregateFunctionBitwise --- .../AggregateFunctionBitwise.h | 117 ++++++++- src/Core/Settings.h | 2 +- tests/performance/jit_aggregate_functions.xml | 19 +- .../00165_jit_aggregate_functions.reference | 240 +++++++++--------- .../00165_jit_aggregate_functions.sql | 80 +++++- 5 files changed, 322 insertions(+), 136 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionBitwise.h b/src/AggregateFunctions/AggregateFunctionBitwise.h index 5582a200921..90db2469828 100644 --- a/src/AggregateFunctions/AggregateFunctionBitwise.h +++ b/src/AggregateFunctions/AggregateFunctionBitwise.h @@ -9,6 +9,14 @@ #include +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_EMBEDDED_COMPILER +# include +# include +#endif namespace DB { @@ -21,6 +29,21 @@ struct AggregateFunctionGroupBitOrData T value = 0; static const char * name() { return "groupBitOr"; } void update(T x) { value |= x; } + +#if USE_EMBEDDED_COMPILER + + static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr) + { + auto type = toNativeType(builder); + builder.CreateStore(llvm::Constant::getNullValue(type), value_ptr); + } + + static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs) + { + return builder.CreateOr(lhs, rhs); + } + +#endif }; template @@ -29,6 +52,21 @@ struct AggregateFunctionGroupBitAndData T value = -1; /// Two's complement arithmetic, sign extension. static const char * name() { return "groupBitAnd"; } void update(T x) { value &= x; } + +#if USE_EMBEDDED_COMPILER + + static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr) + { + auto type = toNativeType(builder); + builder.CreateStore(llvm::ConstantInt::get(type, -1), value_ptr); + } + + static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs) + { + return builder.CreateAnd(lhs, rhs); + } + +#endif }; template @@ -37,6 +75,21 @@ struct AggregateFunctionGroupBitXorData T value = 0; static const char * name() { return "groupBitXor"; } void update(T x) { value ^= x; } + +#if USE_EMBEDDED_COMPILER + + static void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * value_ptr) + { + auto type = toNativeType(builder); + builder.CreateStore(llvm::Constant::getNullValue(type), value_ptr); + } + + static llvm::Value* compileUpdate(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs) + { + return builder.CreateXor(lhs, rhs); + } + +#endif }; @@ -45,7 +98,7 @@ template class AggregateFunctionBitwise final : public IAggregateFunctionDataHelper> { public: - AggregateFunctionBitwise(const DataTypePtr & type) + explicit AggregateFunctionBitwise(const DataTypePtr & type) : IAggregateFunctionDataHelper>({type}, {}) {} String getName() const override { return Data::name(); } @@ -81,6 +134,68 @@ public: { assert_cast &>(to).getData().push_back(this->data(place).value); } + +#if USE_EMBEDDED_COMPILER + + bool isCompilable() const override + { + auto return_type = getReturnType(); + return canBeNativeType(*return_type); + } + + void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + Data::compileCreate(builder, value_ptr); + } + + void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector & argument_values) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + auto * value = b.CreateLoad(return_type, value_ptr); + + const auto & argument_value = argument_values[0]; + auto * result_value = Data::compileUpdate(builder, value, argument_value); + + b.CreateStore(result_value, value_ptr); + } + + void compileMerge(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_dst_ptr, llvm::Value * aggregate_data_src_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + + auto * value_dst_ptr = b.CreatePointerCast(aggregate_data_dst_ptr, return_type->getPointerTo()); + auto * value_dst = b.CreateLoad(return_type, value_dst_ptr); + + auto * value_src_ptr = b.CreatePointerCast(aggregate_data_src_ptr, return_type->getPointerTo()); + auto * value_src = b.CreateLoad(return_type, value_src_ptr); + + auto * result_value = Data::compileUpdate(builder, value_dst, value_src); + + b.CreateStore(result_value, value_dst_ptr); + } + + llvm::Value * compileGetResult(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override + { + llvm::IRBuilder<> & b = static_cast &>(builder); + + auto * return_type = toNativeType(b, getReturnType()); + auto * value_ptr = b.CreatePointerCast(aggregate_data_ptr, return_type->getPointerTo()); + + return b.CreateLoad(return_type, value_ptr); + } + +#endif + }; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 28e46160a98..8c733415dec 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -108,7 +108,7 @@ class IColumn; M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ - M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ + M(UInt64, min_count_to_compile_aggregate_expression, 0, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index 21683ef2004..88ac46b2da8 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -69,6 +69,9 @@ any anyLast count + groupBitOr + groupBitAnd + groupBitXor @@ -119,7 +122,7 @@ SELECT {function}(value_1), {function}(value_2), - groupBitAnd(value_3), + sum(toUInt256(value_3)), {function}(value_3) FROM {table} GROUP BY key @@ -140,7 +143,7 @@ SELECT {function}If(value_1, predicate), {function}If(value_2, predicate), - groupBitAndIf(value_3, predicate), + sumIf(toUInt256(value_3), predicate), {function}If(value_3, predicate) FROM {table} GROUP BY key @@ -163,7 +166,7 @@ SELECT {function}(value_1), {function}(value_2), - groupBitAnd(value_3), + sum(toUInt256(value_3)), {function}(value_3), {function}(value_4), {function}(value_5) @@ -188,7 +191,7 @@ SELECT {function}If(value_1, predicate), {function}If(value_2, predicate), - groupBitAndIf(value_3, predicate), + sumIf(toUInt256(value_3), predicate), {function}If(value_3, predicate), {function}If(value_4, predicate), {function}If(value_5, predicate) @@ -212,7 +215,7 @@ SELECT {function}(WatchID), {function}(CounterID), - groupBitAnd(ClientIP), + sum(toUInt256(ClientIP)), {function}(ClientIP) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} @@ -235,7 +238,7 @@ SELECT {function}(WatchID), {function}(CounterID), - groupBitAnd(ClientIP), + sum(toUInt256(ClientIP)), {function}(ClientIP), {function}(GoodEvent), {function}(CounterClass) @@ -260,7 +263,7 @@ SELECT {function}If(WatchID, predicate), {function}If(CounterID, predicate), - groupBitAndIf(ClientIP, predicate), + sumIf(toUInt256(ClientIP), predicate), {function}If(ClientIP, predicate) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} @@ -285,7 +288,7 @@ SELECT {function}If(WatchID, predicate), {function}If(CounterID, predicate), - groupBitAndIf(ClientIP, predicate), + sumIf(toUInt256(ClientIP), predicate), {function}If(ClientIP, predicate), {function}If(GoodEvent, predicate), {function}If(CounterClass, predicate) diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference index 2d94ad190ca..451a676754c 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.reference +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.reference @@ -1,128 +1,128 @@ Aggregation using JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 9223372036854775807 4611686018427387904 4544239379628300646 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 9223372036854775807 4611686018427387904 4091184823334377716 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 9223372036854775807 4611686018427387904 3725992504798702670 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 9223372036854775807 4611686018427387904 6536441508464694614 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 9223372036854775807 4611686018427387904 1797862753609257231 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 9223372036854775807 4611686018427387904 8737124378202300429 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 9223372036854775807 4611686018427387904 8209915323001116338 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 9223372036854775807 4611686018427387904 8909082036598843562 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 9223372036854775807 4611686018427387904 5411365383789552292 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 9223372036854775807 4611686018427387904 2059255810151375435 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 9223372036854775807 4611686018427387904 8852740550386113674 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 9223372036854775807 4611686018427387904 7849665866595760148 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 9223372036854775807 4611686018427387904 3435410911925610424 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 9223372036854775807 4611686018427387904 511910855240035342 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 9223372036854775807 4611686018427387904 2256071920672551964 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 9223372036854775807 4611686018427387904 6236276364886386410 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 9223372036854775807 4611686018427387904 4755775861151848768 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 9223372036854775807 4611686018427387904 5371586112642152558 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 9223372036854775807 4611686018427387904 4150567963952988110 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 9223372036854775807 4611686018427387904 3238284030821087319 Simple functions with non compilable function -1704509 4611700827100483880 9223360787015464643 10441337359398154812 4611686018427387904 19954243669348.844 9648741.579254271 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 4611686018427387904 2054229034942.3723 51998323.94457991 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 4611686018427387904 27615161624211.875 12261797.824844675 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 4611686018427387904 27479710385933.586 53095331.60360441 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 4611686018427387904 85240848090850.69 22373416.533275086 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 4611686018427387904 67568783303242.086 3154349.826950714 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 4611686018427387904 170693446547158.72 201431892.4773785 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 4611686018427387904 94622946187035.42 1425270865.0901496 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 4611686018427387904 26532987929602.555 3695122.4062526934 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 4611686018427387904 233352070043266.62 36535786.81446395 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 4611686018427387904 68246505203164.63 6316535.831023813 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 4611686018427387904 185015319325648.16 9962165.34831339 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 4611686018427387904 255019232629204.38 7937191.271698021 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 4611686018427387904 38308020331864.36 14590240.469105456 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 4611686018427387904 214467085941034.7 7257521.096258734 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 4611686018427387904 231724477077663.4 4737362.521046629 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 4611686018427387904 141657635880324.8 1613795518.1065989 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 4611686018427387904 283531099960470.8 9938452.835998287 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 4611686018427387904 120762239817777.88 579655378.4603049 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 4611686018427387904 123712996438970.34 867841.595541967 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 3620921835565807284859452 19954243669348.844 9648741.579254271 523264 9223372036854775807 4611686018427387904 4544239379628300646 +732797 4611701940806302259 9223355550934604746 977192643464016658 3289442827160604417733394 2054229034942.3723 51998323.94457991 475698 9223372036854775807 4611686018427387904 4091184823334377716 +598875 4611701407242345792 9223362250391155632 9312163881623734456 2330921446573746856380600 27615161624211.875 12261797.824844675 337212 9223372036854775807 4611686018427387904 3725992504798702670 +792887 4611699550286611812 9223290551912005343 6930300520201292824 1745179600137886041476120 27479710385933.586 53095331.60360441 252197 9223372036854775807 4611686018427387904 6536441508464694614 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 1356295121550317411019929 85240848090850.69 22373416.533275086 196036 9223372036854775807 4611686018427387904 1797862753609257231 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 1018731388338768841564439 67568783303242.086 3154349.826950714 147211 9223372036854775807 4611686018427387904 8737124378202300429 +716829 4611852156092872082 9223361623076951140 15381015774917924786 623810478612337115371442 170693446547158.72 201431892.4773785 90109 9223372036854775807 4611686018427387904 8209915323001116338 +59183 4611730685242027332 9223354909338698162 8078812522502896568 589916507545680254024632 94622946187035.42 1425270865.0901496 85379 9223372036854775807 4611686018427387904 8909082036598843562 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 538517864195994778911814 26532987929602.555 3695122.4062526934 77807 9223372036854775807 4611686018427387904 5411365383789552292 +800784 4611752907938305166 9223340418389788041 18082918611792817587 535545510122473785781683 233352070043266.62 36535786.81446395 77492 9223372036854775807 4611686018427387904 2059255810151375435 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 506405014842860050255126 68246505203164.63 6316535.831023813 73213 9223372036854775807 4611686018427387904 8852740550386113674 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 476547495537329753708996 185015319325648.16 9962165.34831339 68945 9223372036854775807 4611686018427387904 7849665866595760148 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 467236365548464278670014 255019232629204.38 7937191.271698021 67570 9223372036854775807 4611686018427387904 3435410911925610424 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 444126268697527941770060 38308020331864.36 14590240.469105456 64174 9223372036854775807 4611686018427387904 511910855240035342 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 417407443977973675608140 214467085941034.7 7257521.096258734 60456 9223372036854775807 4611686018427387904 2256071920672551964 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 403462269796593691082374 231724477077663.4 4737362.521046629 58389 9223372036854775807 4611686018427387904 6236276364886386410 +170282 4611833225706935900 9223371583739401906 8076893424988479310 394417911933408911581006 141657635880324.8 1613795518.1065989 57017 9223372036854775807 4611686018427387904 4755775861151848768 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 361995300393829962204226 283531099960470.8 9938452.835998287 52345 9223372036854775807 4611686018427387904 5371586112642152558 +63469 4611695097019173921 9223353530156141191 6296784708578574520 360843057610541117735096 120762239817777.88 579655378.4603049 52142 9223372036854775807 4611686018427387904 4150567963952988110 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 330534668598011678200421 123712996438970.34 867841.595541967 47758 9223372036854775807 4611686018427387904 3238284030821087319 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 2224726.7626273884 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 5898616.931652982 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 53771550.26565126 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 92835869.96920013 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 39794091.419183925 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 9276773.708181158 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 291083243.75407773 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 5925109959.715378 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 12412830.045471078 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 53535427.52018088 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 10496765.20741332 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 18966925.191309396 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 6271211.193812284 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 6885575.861759452 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 12220152.393889504 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 2482202.163802278 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 2515144222.953728 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 34845264.2080656 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 7825349797.6059 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 26049107.15514301 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 2224726.7626273884 261874 9223372036854775806 4611686018427387904 4518874482384062894 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 5898616.931652982 237784 9223372036854775806 4611686018427387904 3641900047478154650 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 53771550.26565126 167966 9223372036854775806 4611686018427387904 1688477495230210408 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 92835869.96920013 125539 9223372036854775806 4611686018427387904 4850868151095058072 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 39794091.419183925 97845 9223372036854775806 4611686018427387904 6845214684357194564 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 9276773.708181158 73368 9223372036854775806 4611686018427387904 1384302533387727316 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 291083243.75407773 44993 9223372036854775806 4611686018427387904 6344483471397203854 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 5925109959.715378 42817 9223372036854775806 4611686018427387904 5909305558020042898 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 12412830.045471078 38861 9223372036854775806 4611686018427387904 4672855013852508626 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 53535427.52018088 38767 9223372036854775806 4611686018427387904 7801864489649220514 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 10496765.20741332 36477 9223372036854775806 4611686018427387904 5941995311893397960 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 18966925.191309396 34353 9223372036854775806 4611686018427387904 6700111718676827412 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 6271211.193812284 33768 9223372036854775806 4611686018427387904 2325654077031843898 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 6885575.861759452 32156 9223372036854775806 4611686018427387904 2114922310535979832 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 12220152.393889504 30172 9223372036854775806 4611686018427387904 4399934528735249092 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 2482202.163802278 29249 9223372036854775806 4611686018427387904 8763910740678180498 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 2515144222.953728 28587 9223372036854775806 4611686018427387904 8217388408377809010 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 34845264.2080656 25993 9223372036854775806 4611686018427387904 4689180182672571856 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 7825349797.6059 25996 9223372036854775806 4611686018427387904 2067736879306995526 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 26049107.15514301 23939 9223372036854775806 4611686018427387904 8318055464870862444 Aggregation without JIT compilation Simple functions -1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 19954243669348.844 9648741.579254271 523264 9223372036854775807 4611686018427387904 4544239379628300646 +732797 4611701940806302259 9223355550934604746 977192643464016658 2054229034942.3723 51998323.94457991 475698 9223372036854775807 4611686018427387904 4091184823334377716 +598875 4611701407242345792 9223362250391155632 9312163881623734456 27615161624211.875 12261797.824844675 337212 9223372036854775807 4611686018427387904 3725992504798702670 +792887 4611699550286611812 9223290551912005343 6930300520201292824 27479710385933.586 53095331.60360441 252197 9223372036854775807 4611686018427387904 6536441508464694614 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 85240848090850.69 22373416.533275086 196036 9223372036854775807 4611686018427387904 1797862753609257231 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 67568783303242.086 3154349.826950714 147211 9223372036854775807 4611686018427387904 8737124378202300429 +716829 4611852156092872082 9223361623076951140 15381015774917924786 170693446547158.72 201431892.4773785 90109 9223372036854775807 4611686018427387904 8209915323001116338 +59183 4611730685242027332 9223354909338698162 8078812522502896568 94622946187035.42 1425270865.0901496 85379 9223372036854775807 4611686018427387904 8909082036598843562 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 26532987929602.555 3695122.4062526934 77807 9223372036854775807 4611686018427387904 5411365383789552292 +800784 4611752907938305166 9223340418389788041 18082918611792817587 233352070043266.62 36535786.81446395 77492 9223372036854775807 4611686018427387904 2059255810151375435 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 68246505203164.63 6316535.831023813 73213 9223372036854775807 4611686018427387904 8852740550386113674 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 185015319325648.16 9962165.34831339 68945 9223372036854775807 4611686018427387904 7849665866595760148 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 255019232629204.38 7937191.271698021 67570 9223372036854775807 4611686018427387904 3435410911925610424 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 38308020331864.36 14590240.469105456 64174 9223372036854775807 4611686018427387904 511910855240035342 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 214467085941034.7 7257521.096258734 60456 9223372036854775807 4611686018427387904 2256071920672551964 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 231724477077663.4 4737362.521046629 58389 9223372036854775807 4611686018427387904 6236276364886386410 +170282 4611833225706935900 9223371583739401906 8076893424988479310 141657635880324.8 1613795518.1065989 57017 9223372036854775807 4611686018427387904 4755775861151848768 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 283531099960470.8 9938452.835998287 52345 9223372036854775807 4611686018427387904 5371586112642152558 +63469 4611695097019173921 9223353530156141191 6296784708578574520 120762239817777.88 579655378.4603049 52142 9223372036854775807 4611686018427387904 4150567963952988110 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 123712996438970.34 867841.595541967 47758 9223372036854775807 4611686018427387904 3238284030821087319 Simple functions with non compilable function -1704509 4611700827100483880 9223360787015464643 10441337359398154812 4611686018427387904 19954243669348.844 9648741.579254271 523264 -732797 4611701940806302259 9223355550934604746 977192643464016658 4611686018427387904 2054229034942.3723 51998323.94457991 475698 -598875 4611701407242345792 9223362250391155632 9312163881623734456 4611686018427387904 27615161624211.875 12261797.824844675 337212 -792887 4611699550286611812 9223290551912005343 6930300520201292824 4611686018427387904 27479710385933.586 53095331.60360441 252197 -3807842 4611710821592843606 9223326163906184987 16710274896338005145 4611686018427387904 85240848090850.69 22373416.533275086 196036 -25703952 4611709443519524003 9223353913449113943 9946868158853570839 4611686018427387904 67568783303242.086 3154349.826950714 147211 -716829 4611852156092872082 9223361623076951140 15381015774917924786 4611686018427387904 170693446547158.72 201431892.4773785 90109 -59183 4611730685242027332 9223354909338698162 8078812522502896568 4611686018427387904 94622946187035.42 1425270865.0901496 85379 -33010362 4611704682869732882 9223268545373999677 2064452191838585926 4611686018427387904 26532987929602.555 3695122.4062526934 77807 -800784 4611752907938305166 9223340418389788041 18082918611792817587 4611686018427387904 233352070043266.62 36535786.81446395 77492 -20810645 4611712185532639162 9223218900001937412 4996531385439292694 4611686018427387904 68246505203164.63 6316535.831023813 73213 -25843850 4611690025407720929 9223346023778617822 12755881190906812868 4611686018427387904 185015319325648.16 9962165.34831339 68945 -23447120 4611796031755620254 9223329309291309758 17231649548755339966 4611686018427387904 255019232629204.38 7937191.271698021 67570 -14739804 4611692230555590277 9223313509005166531 2458378896777063244 4611686018427387904 38308020331864.36 14590240.469105456 64174 -32077710 4611884228437061959 9223352444952988904 12965822147651192908 4611686018427387904 214467085941034.7 7257521.096258734 60456 -22446879 4611846229717089436 9223124373140579096 13530160492087688838 4611686018427387904 231724477077663.4 4737362.521046629 58389 -170282 4611833225706935900 9223371583739401906 8076893424988479310 4611686018427387904 141657635880324.8 1613795518.1065989 57017 -11482817 4611708000353743073 9223337838355779113 14841435427430843458 4611686018427387904 283531099960470.8 9938452.835998287 52345 -63469 4611695097019173921 9223353530156141191 6296784708578574520 4611686018427387904 120762239817777.88 579655378.4603049 52142 -29103473 4611744585914335132 9223333530281362537 5908285283932344933 4611686018427387904 123712996438970.34 867841.595541967 47758 +1704509 4611700827100483880 9223360787015464643 10441337359398154812 3620921835565807284859452 19954243669348.844 9648741.579254271 523264 9223372036854775807 4611686018427387904 4544239379628300646 +732797 4611701940806302259 9223355550934604746 977192643464016658 3289442827160604417733394 2054229034942.3723 51998323.94457991 475698 9223372036854775807 4611686018427387904 4091184823334377716 +598875 4611701407242345792 9223362250391155632 9312163881623734456 2330921446573746856380600 27615161624211.875 12261797.824844675 337212 9223372036854775807 4611686018427387904 3725992504798702670 +792887 4611699550286611812 9223290551912005343 6930300520201292824 1745179600137886041476120 27479710385933.586 53095331.60360441 252197 9223372036854775807 4611686018427387904 6536441508464694614 +3807842 4611710821592843606 9223326163906184987 16710274896338005145 1356295121550317411019929 85240848090850.69 22373416.533275086 196036 9223372036854775807 4611686018427387904 1797862753609257231 +25703952 4611709443519524003 9223353913449113943 9946868158853570839 1018731388338768841564439 67568783303242.086 3154349.826950714 147211 9223372036854775807 4611686018427387904 8737124378202300429 +716829 4611852156092872082 9223361623076951140 15381015774917924786 623810478612337115371442 170693446547158.72 201431892.4773785 90109 9223372036854775807 4611686018427387904 8209915323001116338 +59183 4611730685242027332 9223354909338698162 8078812522502896568 589916507545680254024632 94622946187035.42 1425270865.0901496 85379 9223372036854775807 4611686018427387904 8909082036598843562 +33010362 4611704682869732882 9223268545373999677 2064452191838585926 538517864195994778911814 26532987929602.555 3695122.4062526934 77807 9223372036854775807 4611686018427387904 5411365383789552292 +800784 4611752907938305166 9223340418389788041 18082918611792817587 535545510122473785781683 233352070043266.62 36535786.81446395 77492 9223372036854775807 4611686018427387904 2059255810151375435 +20810645 4611712185532639162 9223218900001937412 4996531385439292694 506405014842860050255126 68246505203164.63 6316535.831023813 73213 9223372036854775807 4611686018427387904 8852740550386113674 +25843850 4611690025407720929 9223346023778617822 12755881190906812868 476547495537329753708996 185015319325648.16 9962165.34831339 68945 9223372036854775807 4611686018427387904 7849665866595760148 +23447120 4611796031755620254 9223329309291309758 17231649548755339966 467236365548464278670014 255019232629204.38 7937191.271698021 67570 9223372036854775807 4611686018427387904 3435410911925610424 +14739804 4611692230555590277 9223313509005166531 2458378896777063244 444126268697527941770060 38308020331864.36 14590240.469105456 64174 9223372036854775807 4611686018427387904 511910855240035342 +32077710 4611884228437061959 9223352444952988904 12965822147651192908 417407443977973675608140 214467085941034.7 7257521.096258734 60456 9223372036854775807 4611686018427387904 2256071920672551964 +22446879 4611846229717089436 9223124373140579096 13530160492087688838 403462269796593691082374 231724477077663.4 4737362.521046629 58389 9223372036854775807 4611686018427387904 6236276364886386410 +170282 4611833225706935900 9223371583739401906 8076893424988479310 394417911933408911581006 141657635880324.8 1613795518.1065989 57017 9223372036854775807 4611686018427387904 4755775861151848768 +11482817 4611708000353743073 9223337838355779113 14841435427430843458 361995300393829962204226 283531099960470.8 9938452.835998287 52345 9223372036854775807 4611686018427387904 5371586112642152558 +63469 4611695097019173921 9223353530156141191 6296784708578574520 360843057610541117735096 120762239817777.88 579655378.4603049 52142 9223372036854775807 4611686018427387904 4150567963952988110 +29103473 4611744585914335132 9223333530281362537 5908285283932344933 330534668598011678200421 123712996438970.34 867841.595541967 47758 9223372036854775807 4611686018427387904 3238284030821087319 Simple functions if combinator -1704509 4611700827100483880 9223310246721229500 16398241567152875142 2224726.7626273884 261874 -732797 4611721382223060002 9223355550934604746 16281585268876620522 5898616.931652982 237784 -598875 4611701407242345792 9223362250391155632 3577699408183553052 53771550.26565126 167966 -792887 4611699550286611812 9223164887726235740 7088177025760385824 92835869.96920013 125539 -3807842 4611710821592843606 9223283397553859544 5756765290752687660 39794091.419183925 97845 -25703952 4611784761593342388 9223241341744449690 4782279928971192568 9276773.708181158 73368 -716829 4611852156092872082 9223361623076951140 8613712481895484190 291083243.75407773 44993 -59183 4611730685242027332 9223354909338698162 18369075291092794110 5925109959.715378 42817 -33010362 4611704682869732882 9223092117352620518 9991152681891671022 12412830.045471078 38861 -800784 4611752907938305166 9223309994342931384 5251877538869750510 53535427.52018088 38767 -20810645 4611712185532639162 9223218900001937412 11803718472901310700 10496765.20741332 36477 -25843850 4611744529689964352 9223346023778617822 127137885677350808 18966925.191309396 34353 -23447120 4611796031755620254 9223329309291309758 1841522159325376278 6271211.193812284 33768 -14739804 4611762063154116632 9223007205463222212 16302703534054321116 6885575.861759452 32156 -32077710 4612033458080771112 9223352444952988904 421072759851674408 12220152.393889504 30172 -22446879 4611846229717089436 9223124373140579096 6577134317587565298 2482202.163802278 29249 -170282 4611833225706935900 9223371583739401906 15764226366913732386 2515144222.953728 28587 -11482817 4611990575414646848 9223302669582414438 9828522700609834800 34845264.2080656 25993 -63469 4612175339998036670 9222961628400798084 17239621485933250238 7825349797.6059 25996 -29103473 4611744585914335132 9223035551850347954 12590190375872647672 26049107.15514301 23939 +1704509 4611700827100483880 9223310246721229500 16398241567152875142 62618822667209.71 2224726.7626273884 261874 9223372036854775806 4611686018427387904 4518874482384062894 +732797 4611721382223060002 9223355550934604746 16281585268876620522 68472164943295.68 5898616.931652982 237784 9223372036854775806 4611686018427387904 3641900047478154650 +598875 4611701407242345792 9223362250391155632 3577699408183553052 21300140553347.42 53771550.26565126 167966 9223372036854775806 4611686018427387904 1688477495230210408 +792887 4611699550286611812 9223164887726235740 7088177025760385824 56461952267903.89 92835869.96920013 125539 9223372036854775806 4611686018427387904 4850868151095058072 +3807842 4611710821592843606 9223283397553859544 5756765290752687660 58835559208469.4 39794091.419183925 97845 9223372036854775806 4611686018427387904 6845214684357194564 +25703952 4611784761593342388 9223241341744449690 4782279928971192568 65182094768443.91 9276773.708181158 73368 9223372036854775806 4611686018427387904 1384302533387727316 +716829 4611852156092872082 9223361623076951140 8613712481895484190 191445613359755.62 291083243.75407773 44993 9223372036854775806 4611686018427387904 6344483471397203854 +59183 4611730685242027332 9223354909338698162 18369075291092794110 429013599530392 5925109959.715378 42817 9223372036854775806 4611686018427387904 5909305558020042898 +33010362 4611704682869732882 9223092117352620518 9991152681891671022 257099731913529.5 12412830.045471078 38861 9223372036854775806 4611686018427387904 4672855013852508626 +800784 4611752907938305166 9223309994342931384 5251877538869750510 135472890315726.03 53535427.52018088 38767 9223372036854775806 4611686018427387904 7801864489649220514 +20810645 4611712185532639162 9223218900001937412 11803718472901310700 323593455407553 10496765.20741332 36477 9223372036854775806 4611686018427387904 5941995311893397960 +25843850 4611744529689964352 9223346023778617822 127137885677350808 3700925266420.715 18966925.191309396 34353 9223372036854775806 4611686018427387904 6700111718676827412 +23447120 4611796031755620254 9223329309291309758 1841522159325376278 54534534450526.42 6271211.193812284 33768 9223372036854775806 4611686018427387904 2325654077031843898 +14739804 4611762063154116632 9223007205463222212 16302703534054321116 506987919332451.8 6885575.861759452 32156 9223372036854775806 4611686018427387904 2114922310535979832 +32077710 4612033458080771112 9223352444952988904 421072759851674408 13955745719596.793 12220152.393889504 30172 9223372036854775806 4611686018427387904 4399934528735249092 +22446879 4611846229717089436 9223124373140579096 6577134317587565298 224866980668999.47 2482202.163802278 29249 9223372036854775806 4611686018427387904 8763910740678180498 +170282 4611833225706935900 9223371583739401906 15764226366913732386 551447384017691 2515144222.953728 28587 9223372036854775806 4611686018427387904 8217388408377809010 +11482817 4611990575414646848 9223302669582414438 9828522700609834800 378121905921203.2 34845264.2080656 25993 9223372036854775806 4611686018427387904 4689180182672571856 +63469 4612175339998036670 9222961628400798084 17239621485933250238 663164390134376.5 7825349797.6059 25996 9223372036854775806 4611686018427387904 2067736879306995526 +29103473 4611744585914335132 9223035551850347954 12590190375872647672 525927999326314.7 26049107.15514301 23939 9223372036854775806 4611686018427387904 8318055464870862444 diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql index 90917209d1b..6c13c6e4d42 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -5,18 +5,52 @@ SELECT 'Aggregation using JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions with non compilable function'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), groupBitAnd(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + sum(toUInt128(WatchID)), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgIf(WatchID, predicate), avgWeightedIf(WatchID, CounterID, predicate), countIf(WatchID, predicate) FROM test.hits +SELECT + CounterID, + minIf(WatchID,predicate), + maxIf(WatchID, predicate), + sumIf(WatchID, predicate), + avgIf(WatchID, predicate), + avgWeightedIf(WatchID, CounterID, predicate), + countIf(WatchID, predicate), + groupBitOrIf(WatchID, predicate), + groupBitAndIf(WatchID, predicate), + groupBitXorIf(WatchID, predicate) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SET compile_aggregate_expressions = 0; @@ -25,15 +59,49 @@ SELECT 'Aggregation without JIT compilation'; SELECT 'Simple functions'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions with non compilable function'; -SELECT CounterID, min(WatchID), max(WatchID), sum(WatchID), groupBitAnd(WatchID), avg(WatchID), avgWeighted(WatchID, CounterID), count(WatchID) FROM test.hits +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + sum(toUInt128(WatchID)), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; SELECT 'Simple functions if combinator'; WITH (WatchID % 2 == 0) AS predicate -SELECT CounterID, minIf(WatchID,predicate), maxIf(WatchID, predicate), sumIf(WatchID, predicate), avgWeightedIf(WatchID, CounterID, predicate), countIf(WatchID, predicate) FROM test.hits +SELECT + CounterID, + minIf(WatchID,predicate), + maxIf(WatchID, predicate), + sumIf(WatchID, predicate), + avgIf(WatchID, predicate), + avgWeightedIf(WatchID, CounterID, predicate), + countIf(WatchID, predicate), + groupBitOrIf(WatchID, predicate), + groupBitAndIf(WatchID, predicate), + groupBitXorIf(WatchID, predicate) +FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; From 856892a81754b73ad54030860f24d6060e3f7fb6 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 10 Jul 2021 05:46:11 +0300 Subject: [PATCH 904/931] Update geoToH3.cpp --- src/Functions/geoToH3.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp index d269f9a3a24..2dad8fc13f2 100644 --- a/src/Functions/geoToH3.cpp +++ b/src/Functions/geoToH3.cpp @@ -83,7 +83,7 @@ public: LatLng coord; coord.lng = degsToRads(lon); coord.lat = degsToRads(lat); - + H3Index hindex; H3Error err = latLngToCell(&coord, res, &hindex); if (err) From 354a57aea87fe57627e53571d89a180909657972 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 05:49:36 +0300 Subject: [PATCH 905/931] Drop Arcadia --- src/Functions/ya.make | 3 +-- src/Functions/ya.make.in | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Functions/ya.make b/src/Functions/ya.make index d6da7eadd35..c78ef1908d1 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -4,13 +4,12 @@ OWNER(g:clickhouse) LIBRARY() CFLAGS( - -DUSE_H3 -DUSE_SSL -DUSE_XXHASH + -DUSE_SSL -DUSE_XXHASH ) ADDINCL( library/cpp/consistent_hashing contrib/libs/farmhash - contrib/libs/h3/h3lib/include contrib/libs/hyperscan/src contrib/libs/libdivide contrib/libs/rapidjson/include diff --git a/src/Functions/ya.make.in b/src/Functions/ya.make.in index f75773fb47e..cfc58b7bf5d 100644 --- a/src/Functions/ya.make.in +++ b/src/Functions/ya.make.in @@ -3,13 +3,12 @@ OWNER(g:clickhouse) LIBRARY() CFLAGS( - -DUSE_H3 -DUSE_SSL -DUSE_XXHASH + -DUSE_SSL -DUSE_XXHASH ) ADDINCL( library/cpp/consistent_hashing contrib/libs/farmhash - contrib/libs/h3/h3lib/include contrib/libs/hyperscan/src contrib/libs/libdivide contrib/libs/rapidjson/include From ed3b30115845052c799f7d81b08d787ad3ac1e3c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jul 2021 08:18:57 +0300 Subject: [PATCH 906/931] Auto version update to [21.8.1.7409] [54453] --- cmake/autogenerated_versions.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 49cf30d2556..3249eb765c5 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -6,7 +6,7 @@ SET(VERSION_REVISION 54453) SET(VERSION_MAJOR 21) SET(VERSION_MINOR 8) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH fb895056568e26200629c7d19626e92d2dedc70d) -SET(VERSION_DESCRIBE v21.8.1.1-prestable) -SET(VERSION_STRING 21.8.1.1) +SET(VERSION_GITHASH f48c5af90c2ad51955d1ee3b6b05d006b03e4238) +SET(VERSION_DESCRIBE v21.8.1.7409-prestable) +SET(VERSION_STRING 21.8.1.7409) # end of autochange From 8ae8b26954d07ec1bdadaeae73b2aaf0e19c2ea0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jul 2021 08:22:52 +0300 Subject: [PATCH 907/931] Auto version update to [21.9.1.1] [54454] --- cmake/autogenerated_versions.txt | 8 ++++---- debian/changelog | 4 ++-- docker/client/Dockerfile | 2 +- docker/server/Dockerfile | 2 +- docker/test/Dockerfile | 2 +- .../System/StorageSystemContributors.generated.cpp | 13 +++++++++++++ 6 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 3249eb765c5..18072566d04 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54453) +SET(VERSION_REVISION 54454) SET(VERSION_MAJOR 21) -SET(VERSION_MINOR 8) +SET(VERSION_MINOR 9) SET(VERSION_PATCH 1) SET(VERSION_GITHASH f48c5af90c2ad51955d1ee3b6b05d006b03e4238) -SET(VERSION_DESCRIBE v21.8.1.7409-prestable) -SET(VERSION_STRING 21.8.1.7409) +SET(VERSION_DESCRIBE v21.9.1.1-prestable) +SET(VERSION_STRING 21.9.1.1) # end of autochange diff --git a/debian/changelog b/debian/changelog index 36c29fce1d0..38f740ae062 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (21.8.1.1) unstable; urgency=low +clickhouse (21.9.1.1) unstable; urgency=low * Modified source code - -- clickhouse-release Mon, 28 Jun 2021 00:50:15 +0300 + -- clickhouse-release Sat, 10 Jul 2021 08:22:49 +0300 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 19cadccb926..f17fa8ade16 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.8.1.* +ARG version=21.9.1.* RUN apt-get update \ && apt-get install --yes --no-install-recommends \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index 65d90bf52ce..5da9e703f4d 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.8.1.* +ARG version=21.9.1.* ARG gosu_ver=1.10 # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index 687393025f0..5768753cd7c 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.8.1.* +ARG version=21.9.1.* RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index f45acb0efd9..bed8eadc19c 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -95,6 +95,7 @@ const char * auto_contributors[] { "Anatoly Pugachev", "ana-uvarova", "AnaUvarova", + "Andreas Hunkeler", "AndreevDm", "Andrei Bodrov", "Andrei Chulkov", @@ -280,6 +281,7 @@ const char * auto_contributors[] { "Dongdong Yang", "DoomzD", "Dr. Strange Looker", + "d.v.semenov", "eaxdev", "eejoin", "egatov", @@ -290,6 +292,7 @@ const char * auto_contributors[] { "Eldar Zaitov", "Elena Baskakova", "elenaspb2019", + "elevankoff", "Elghazal Ahmed", "Elizaveta Mironyuk", "emakarov", @@ -434,6 +437,7 @@ const char * auto_contributors[] { "Ivan Starkov", "ivanzhukov", "Ivan Zhukov", + "Jack Song", "JackyWoo", "Jacob Hayes", "jakalletti", @@ -476,6 +480,7 @@ const char * auto_contributors[] { "Konstantin Lebedev", "Konstantin Malanchev", "Konstantin Podshumok", + "Konstantin Rudenskii", "Korenevskiy Denis", "Korviakov Andrey", "koshachy", @@ -488,6 +493,7 @@ const char * auto_contributors[] { "kshvakov", "kssenii", "l", + "l1tsolaiki", "lalex", "Latysheva Alexandra", "lehasm", @@ -515,6 +521,7 @@ const char * auto_contributors[] { "long2ice", "Lopatin Konstantin", "Loud_Scream", + "ltybc-coder", "luc1ph3r", "Lucid Dreams", "Luis Bosque", @@ -633,6 +640,7 @@ const char * auto_contributors[] { "nicelulu", "Nickita", "Nickolay Yastrebov", + "nickzhwang", "Nicolae Vartolomei", "Nico Mandery", "Nico Piderman", @@ -871,6 +879,7 @@ const char * auto_contributors[] { "Veselkov Konstantin", "vic", "vicdashkov", + "Victor", "Victor Tarnavsky", "Viktor Taranenko", "vinity", @@ -947,6 +956,7 @@ const char * auto_contributors[] { "Yuriy Korzhenevskiy", "Yury Karpovich", "Yury Stankevich", + "ywill3", "zamulla", "zhang2014", "zhangshengyu", @@ -957,11 +967,13 @@ const char * auto_contributors[] { "Zhichun Wu", "Zhipeng", "zhukai", + "Zijie Lu", "zlx19950903", "Zoran Pandovski", "zvonand", "zvrr", "zvvr", + "zxc111", "zzsmdfj", "Артем Стрельцов", "Владислав Тихонов", @@ -980,6 +992,7 @@ const char * auto_contributors[] { "张风啸", "徐炘", "曲正鹏", + "未来星___费", "极客青年", "谢磊", "贾顺名(Jarvis)", From a6d0cda7c19f5284c26535eb5ed5efa26f9a95d6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 10:12:18 +0300 Subject: [PATCH 908/931] Merging #24404 --- docs/en/operations/configuration-files.md | 2 +- src/Common/Config/ConfigProcessor.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index 28e520fece5..5c942efc77f 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -32,7 +32,7 @@ XML substitution example: - + diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index 346abce8600..03ee76240cb 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -302,14 +302,14 @@ void ConfigProcessor::doIncludesRecursive( } if (substs_count > 1) /// only one substitution is allowed - throw Poco::Exception("several substitutions attributes set for element <" + node->nodeName() + ">"); + throw Poco::Exception("More than one substitution attribute is set for element <" + node->nodeName() + ">"); if (node->nodeName() == "include") { if (node->hasChildNodes()) throw Poco::Exception(" element must have no children"); if (substs_count == 0) - throw Poco::Exception("no substitution attributes set for element , must have one"); + throw Poco::Exception("No substitution attributes set for element , must have exactly one"); } /// Replace the original contents, not add to it. From 4ed170a652c3eb0d470c061b4385e8fad2a505d7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 10:14:21 +0300 Subject: [PATCH 909/931] Added failing test #24404 --- .../0_stateless/01942_untuple_transformers_msan.reference | 0 tests/queries/0_stateless/01942_untuple_transformers_msan.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/01942_untuple_transformers_msan.reference create mode 100644 tests/queries/0_stateless/01942_untuple_transformers_msan.sql diff --git a/tests/queries/0_stateless/01942_untuple_transformers_msan.reference b/tests/queries/0_stateless/01942_untuple_transformers_msan.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01942_untuple_transformers_msan.sql b/tests/queries/0_stateless/01942_untuple_transformers_msan.sql new file mode 100644 index 00000000000..c1be25d34ac --- /dev/null +++ b/tests/queries/0_stateless/01942_untuple_transformers_msan.sql @@ -0,0 +1 @@ +SELECT untuple(tuple(100.0000991821289)), NULL, untuple((toDateTime(9223372036854775806, -1, NULL, NULL, toDateTime(NULL, NULL)), * EXCEPT b)), NULL FROM (SELECT 1 AS a, 1024, NULL AS b); From efbc3087394ae82b38a41e797ecfd6cc2275b1fc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 10:27:48 +0300 Subject: [PATCH 910/931] Remove harmful code and fix crash --- src/Interpreters/ActionsVisitor.cpp | 7 +------ .../0_stateless/01942_untuple_transformers_msan.reference | 1 + 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 7aad11252cb..03fa756276e 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -686,7 +686,7 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat ASTs columns; size_t tid = 0; - for (const auto & name : tuple_type->getElementNames()) + for (const auto & name [[maybe_unused]] : tuple_type->getElementNames()) { auto tuple_ast = function->arguments->children[0]; if (tid != 0) @@ -697,11 +697,6 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat auto func = makeASTFunction("tupleElement", tuple_ast, literal); - if (tuple_type->haveExplicitNames()) - func->setAlias(name); - else - func->setAlias(data.getUniqueName("_ut_" + name)); - auto function_builder = FunctionFactory::instance().get(func->name, data.getContext()); data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName(data.getContext()->getSettingsRef())}, func->getColumnName(data.getContext()->getSettingsRef())); diff --git a/tests/queries/0_stateless/01942_untuple_transformers_msan.reference b/tests/queries/0_stateless/01942_untuple_transformers_msan.reference index e69de29bb2d..82dea36febd 100644 --- a/tests/queries/0_stateless/01942_untuple_transformers_msan.reference +++ b/tests/queries/0_stateless/01942_untuple_transformers_msan.reference @@ -0,0 +1 @@ +100.0000991821289 \N \N 1 1024 \N From e95d67b8e23e2906c18895a8593b89df20b417bf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 10:29:30 +0300 Subject: [PATCH 911/931] Update tests after removing harmful code --- tests/queries/0_stateless/01232_untuple.reference | 2 +- tests/queries/0_stateless/01616_untuple_access_field.reference | 2 +- tests/queries/0_stateless/01616_untuple_access_field.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01232_untuple.reference b/tests/queries/0_stateless/01232_untuple.reference index 44f96e1decd..21fd0c4a8a5 100644 --- a/tests/queries/0_stateless/01232_untuple.reference +++ b/tests/queries/0_stateless/01232_untuple.reference @@ -2,7 +2,7 @@ hello 1 3 world 9 9 (0,1) -key v1 v2 v3 v4 v5 +key tupleElement(argMax(tuple(v1, v2, v3, v4, v5), v1), 1) tupleElement(argMax(tuple(v1, v2, v3, v4, v5), v1), 2) tupleElement(argMax(tuple(v1, v2, v3, v4, v5), v1), 3) tupleElement(argMax(tuple(v1, v2, v3, v4, v5), v1), 4) tupleElement(argMax(tuple(v1, v2, v3, v4, v5), v1), 5) 4 10 20 10 20 30 3 70 20 10 20 30 2 11 20 10 20 30 diff --git a/tests/queries/0_stateless/01616_untuple_access_field.reference b/tests/queries/0_stateless/01616_untuple_access_field.reference index d00491fd7e5..9874d6464ab 100644 --- a/tests/queries/0_stateless/01616_untuple_access_field.reference +++ b/tests/queries/0_stateless/01616_untuple_access_field.reference @@ -1 +1 @@ -1 +1 2 diff --git a/tests/queries/0_stateless/01616_untuple_access_field.sql b/tests/queries/0_stateless/01616_untuple_access_field.sql index 569efca5349..82cdf80c8bc 100644 --- a/tests/queries/0_stateless/01616_untuple_access_field.sql +++ b/tests/queries/0_stateless/01616_untuple_access_field.sql @@ -1 +1 @@ -select _ut_1 from (select untuple((1,2))); +select * from (select untuple((1,2))); From 9ca38235aa2f0e1d6b552625da40f4ee3d5e5ff7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 11:29:08 +0300 Subject: [PATCH 912/931] Correct fix for #26041 --- src/Functions/URL/FirstSignificantSubdomainCustomImpl.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h index 4670d610725..ba39eeb5e69 100644 --- a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h +++ b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h @@ -41,6 +41,9 @@ public: String getName() const override { return name; } size_t getNumberOfArguments() const override { return 2; } + bool useDefaultImplementationForConstants() const override { return true; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { if (!isString(arguments[0].type)) @@ -65,8 +68,6 @@ public: const ColumnConst * column_tld_list_name = checkAndGetColumnConstStringOrFixedString(arguments[1].column.get()); FirstSignificantSubdomainCustomLookup tld_lookup(column_tld_list_name->getValue()); - /// FIXME: convertToFullColumnIfConst() is suboptimal - auto column = arguments[0].column->convertToFullColumnIfConst(); if (const ColumnString * col = checkAndGetColumn(*column)) { auto col_res = ColumnString::create(); From ed34844d021df8293140e7101601ee4194b8c7f8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 10 Jul 2021 11:42:25 +0300 Subject: [PATCH 913/931] Fix build --- src/Coordination/KeeperStorage.cpp | 33 +++++++++++++----------- src/Coordination/ZooKeeperDataReader.cpp | 2 +- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 6a57dd63ff3..4c3f649a6b6 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -405,6 +405,24 @@ struct KeeperStorageGetRequest final : public KeeperStorageRequest } }; +namespace +{ + /// Garbage required to apply log to "fuzzy" zookeeper snapshot + void updateParentPzxid(const std::string & child_path, int64_t zxid, KeeperStorage::Container & container) + { + auto parent_path = parentPath(child_path); + auto parent_it = container.find(parent_path); + if (parent_it != container.end()) + { + container.updateValue(parent_path, [zxid](KeeperStorage::Node & parent) + { + if (parent.stat.pzxid < zxid) + parent.stat.pzxid = zxid; + }); + } + } +} + struct KeeperStorageRemoveRequest final : public KeeperStorageRequest { bool checkAuth(KeeperStorage & storage, int64_t session_id) const override @@ -422,21 +440,6 @@ struct KeeperStorageRemoveRequest final : public KeeperStorageRequest return checkACL(Coordination::ACL::Delete, node_acls, session_auths); } - /// Garbage required to apply log to "fuzzy" zookeeper snapshot - void updateParentPzxid(const std::string & child_path, int64_t zxid, KeeperStorage::Container & container) const - { - auto parent_path = parentPath(child_path); - auto parent_it = container.find(parent_path); - if (parent_it != container.end()) - { - container.updateValue(parent_path, [zxid](KeeperStorage::Node & parent) - { - if (parent.stat.pzxid < zxid) - parent.stat.pzxid = zxid; - }); - } - } - using KeeperStorageRequest::KeeperStorageRequest; std::pair process(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/) const override { diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index bb2eb550ddf..cf644110786 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -181,7 +181,7 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st /// We have incorrect state of storage where some random log entries from future were applied.... /// /// In ZooKeeper they say that their transactions log is idempotent and can be applied to "fuzzy" state as is. - /// It's true but there is no any general invariant which produces this property. They just have banch of ad-hoc "if" which detects + /// It's true but there is no any general invariant which produces this property. They just have ad-hoc "if's" which detects /// "fuzzy" state inconsistencies and apply log records in special way. Several examples: /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L453-L463 /// https://github.com/apache/zookeeper/blob/master/zookeeper-server/src/main/java/org/apache/zookeeper/server/DataTree.java#L476-L480 From ba1442532b4e59007ecda55785aeadaa5ab3eb5a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 11:43:28 +0300 Subject: [PATCH 914/931] Fix build --- src/Functions/URL/FirstSignificantSubdomainCustomImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h index ba39eeb5e69..08576fe59ec 100644 --- a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h +++ b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h @@ -68,7 +68,7 @@ public: const ColumnConst * column_tld_list_name = checkAndGetColumnConstStringOrFixedString(arguments[1].column.get()); FirstSignificantSubdomainCustomLookup tld_lookup(column_tld_list_name->getValue()); - if (const ColumnString * col = checkAndGetColumn(*column)) + if (const ColumnString * col = checkAndGetColumn(*arguments[0].column)) { auto col_res = ColumnString::create(); vector(tld_lookup, col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); From 0d4e0bb8fd8429c01e19c121bdc18c521d4a1ee5 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 10 Jul 2021 11:50:03 +0300 Subject: [PATCH 915/931] Fix links to nowhere --- docs/en/operations/clickhouse-keeper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md index 3dec4c74088..6af12eb9b01 100644 --- a/docs/en/operations/clickhouse-keeper.md +++ b/docs/en/operations/clickhouse-keeper.md @@ -5,7 +5,7 @@ toc_title: ClickHouse Keeper # [pre-production] clickhouse-keeper -ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../../engines/table-engines/mergetree-family/replication/) and [distributed DDL](../../sql-reference/distributed-ddl/) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper. +ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper. !!! warning "Warning" This feature currently in pre-production stage. We test it in our CI and on small internal installations. From d93fb6c93fa7e818685788cff85f8fab78f3b8d9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jul 2021 11:54:43 +0300 Subject: [PATCH 916/931] Drop Arcadia --- src/Functions/ya.make | 1 + src/Functions/ya.make.in | 1 + 2 files changed, 2 insertions(+) diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 600b34e4bbf..2db4a7645a1 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -10,6 +10,7 @@ CFLAGS( ADDINCL( library/cpp/consistent_hashing contrib/libs/farmhash + contrib/libs/h3/h3lib/include contrib/libs/hyperscan/src contrib/libs/libdivide contrib/libs/rapidjson/include diff --git a/src/Functions/ya.make.in b/src/Functions/ya.make.in index cfc58b7bf5d..b21bf64304a 100644 --- a/src/Functions/ya.make.in +++ b/src/Functions/ya.make.in @@ -9,6 +9,7 @@ CFLAGS( ADDINCL( library/cpp/consistent_hashing contrib/libs/farmhash + contrib/libs/h3/h3lib/include contrib/libs/hyperscan/src contrib/libs/libdivide contrib/libs/rapidjson/include From eb5ca241ecaf1ae6ccdb4394dda64d9e1d858c2d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jul 2021 13:51:03 +0300 Subject: [PATCH 917/931] Update version_date.tsv after release 21.6.7.57 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 3b12363712a..1cb9d58ec2d 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,4 +1,5 @@ v21.7.2.7-stable 2021-07-09 +v21.6.7.57-stable 2021-07-09 v21.6.6.51-stable 2021-07-02 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 From f45869ab44cf8c93dd012f35fb6edc224b4d0cc2 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jul 2021 15:04:34 +0300 Subject: [PATCH 918/931] Update version_date.tsv after release 21.3.15.4 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 1cb9d58ec2d..a8079ec89f6 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -13,6 +13,7 @@ v21.4.6.55-stable 2021-04-30 v21.4.5.46-stable 2021-04-24 v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 +v21.3.15.4-stable 2021-07-10 v21.3.14.1-lts 2021-07-01 v21.3.13.9-lts 2021-06-22 v21.3.12.2-lts 2021-05-25 From 36de61c6d04da6554f3902ebeb0fe73521ed4104 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 10 Jul 2021 16:37:32 +0300 Subject: [PATCH 919/931] Fixed performance test --- tests/performance/jit_aggregate_functions.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index 88ac46b2da8..3e99f6d9615 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -227,8 +227,8 @@ {function}(WatchID), {function}(CounterID), {function}(ClientIP), - {function}(GoodEvent), - {function}(CounterClass) + {function}(IPNetworkID), + {function}(SearchEngineID) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} FORMAT Null @@ -240,8 +240,8 @@ {function}(CounterID), sum(toUInt256(ClientIP)), {function}(ClientIP), - {function}(GoodEvent), - {function}(CounterClass) + {function}(IPNetworkID), + {function}(SearchEngineID) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} FORMAT Null @@ -276,8 +276,8 @@ {function}If(WatchID, predicate), {function}If(CounterID, predicate), {function}If(ClientIP, predicate), - {function}If(GoodEvent, predicate), - {function}If(CounterClass, predicate) + {function}If(IPNetworkID, predicate), + {function}If(SearchEngineID, predicate) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} FORMAT Null @@ -290,8 +290,8 @@ {function}If(CounterID, predicate), sumIf(toUInt256(ClientIP), predicate), {function}If(ClientIP, predicate), - {function}If(GoodEvent, predicate), - {function}If(CounterClass, predicate) + {function}If(IPNetworkID, predicate), + {function}If(SearchEngineID, predicate) FROM hits_100m_single GROUP BY intHash32(UserID) % {group_scale} FORMAT Null From f0cd4dd467a5c721954a2e6bc95f28171ab02b40 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 10 Jul 2021 17:01:18 +0300 Subject: [PATCH 920/931] Update version_date.tsv after release 21.5.9.4 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index a8079ec89f6..c46c393c630 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -4,6 +4,7 @@ v21.6.6.51-stable 2021-07-02 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 v21.6.3.14-stable 2021-06-04 +v21.5.9.4-stable 2021-07-10 v21.5.8.21-stable 2021-07-02 v21.5.7.9-stable 2021-06-22 v21.5.6.6-stable 2021-05-29 From 103b860555d52787d90da2e8f07a87bde2c6e6fb Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 10 Jul 2021 23:12:32 +0300 Subject: [PATCH 921/931] Update tuple-functions.md --- docs/en/sql-reference/functions/tuple-functions.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index 4189d0feeb5..39e59ae2ba9 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -87,6 +87,8 @@ Result: └───────┴───────┘ ``` +Note: the names are implementation specific and are subject to change. You should not assume specific names of the columns after application of the `untuple`. + Example of using an `EXCEPT` expression: Query: From 30826c7d7d871a4def46fb4732f13ad3c708e7d0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 03:33:14 +0300 Subject: [PATCH 922/931] Return O_DIRECT --- src/Common/ProfileEvents.cpp | 2 + src/IO/createReadBufferFromFileBase.cpp | 63 ++++++++++++++++++------- 2 files changed, 49 insertions(+), 16 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 0b91db5b972..915d14466b6 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -43,6 +43,8 @@ M(MarkCacheHits, "") \ M(MarkCacheMisses, "") \ M(CreatedReadBufferOrdinary, "") \ + M(CreatedReadBufferDirectIO, "") \ + M(CreatedReadBufferDirectIOFailed, "") \ M(CreatedReadBufferMMap, "") \ M(CreatedReadBufferMMapFailed, "") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index c6c284f888a..4882b4a2068 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -3,8 +3,6 @@ #include #include -#include - namespace ProfileEvents { @@ -38,30 +36,63 @@ std::unique_ptr createReadBufferFromFileBase( } } - ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary); - auto res = std::make_unique(filename, buffer_size, flags, existing_memory, alignment); - +#if defined(OS_LINUX) || defined(__FreeBSD__) if (direct_io_threshold && estimated_size >= direct_io_threshold) { -#if defined(OS_LINUX) - /** We don't use O_DIRECT because it is tricky and previous implementation has a bug. - * Instead, we advise the OS that the data should not be cached. - * This is not exactly the same for two reasons: - * - extra copying from page cache to userspace is not eliminated; - * - if data is already in cache, it is purged. + /** O_DIRECT + * The O_DIRECT flag may impose alignment restrictions on the length and address of user-space buffers and the file offset of I/Os. + * In Linux alignment restrictions vary by filesystem and kernel version and might be absent entirely. + * However there is currently no filesystem-independent interface for an application to discover these restrictions + * for a given file or filesystem. Some filesystems provide their own interfaces for doing so, for example the + * XFS_IOC_DIOINFO operation in xfsctl(3). * - * NOTE: Better to rewrite it with userspace page cache. + * Under Linux 2.4, transfer sizes, and the alignment of the user buffer and the file offset must all be + * multiples of the logical block size of the filesystem. Since Linux 2.6.0, alignment to the logical block size + * of the underlying storage (typically 512 bytes) suffices. + * + * - man 2 open */ + constexpr size_t min_alignment = DEFAULT_AIO_FILE_BLOCK_SIZE; - if (0 != posix_fadvise(res->getFD(), 0, 0, POSIX_FADV_DONTNEED)) - LOG_WARNING(&Poco::Logger::get("createReadBufferFromFileBase"), - "Cannot request 'posix_fadvise' with POSIX_FADV_DONTNEED for file {}", filename); + auto align_up = [=](size_t value) { return (value + min_alignment - 1) / min_alignment * min_alignment; }; + + if (alignment % min_alignment) + { + alignment = align_up(alignment); + } + + if (buffer_size % min_alignment) + { + existing_memory = nullptr; /// Cannot reuse existing memory is it has unaligned size. + buffer_size = align_up(buffer_size); + } + + if (reinterpret_cast(existing_memory) % min_alignment) + { + existing_memory = nullptr; /// Cannot reuse existing memory is it has unaligned offset. + } + + /// Attempt to open a file with O_DIRECT + try + { + auto res = std::make_unique( + filename, buffer_size, (flags == -1 ? O_RDONLY | O_CLOEXEC : flags) | O_DIRECT, existing_memory, alignment); + ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIO); + return res; + } + catch (const ErrnoException &) + { + /// Fallback to cached IO if O_DIRECT is not supported. + ProfileEvents::increment(ProfileEvents::CreatedReadBufferDirectIOFailed); + } } #else (void)direct_io_threshold; + (void)estimated_size; #endif - return res; + ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary); + return std::make_unique(filename, buffer_size, flags, existing_memory, alignment); } } From 4df8de76c85154429e8cbb4ba46af6c87048f8af Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 03:34:00 +0300 Subject: [PATCH 923/931] Fix #24124 --- src/Storages/System/StorageSystemStackTrace.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index 8b119492340..7a8ee75803f 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -223,7 +223,7 @@ void StorageSystemStackTrace::fillData(MutableColumns & res_columns, ContextPtr, { constexpr size_t comm_buf_size = 32; /// More than enough for thread name ReadBufferFromFile comm(thread_name_path.string(), comm_buf_size); - readStringUntilEOF(thread_name, comm); + readEscapedStringUntilEOL(thread_name, comm); comm.close(); } From 29624436ff578b97a5e749dd012ade00f0634b10 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 03:35:43 +0300 Subject: [PATCH 924/931] Return back O_DIRECT --- src/IO/ReadBufferFromFileDescriptor.cpp | 20 ++++++++++++++++---- src/IO/ReadBufferFromFileDescriptor.h | 5 +++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 893c2bcb5d8..4a3194e99c8 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -128,8 +128,8 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) if (new_pos + (working_buffer.end() - pos) == file_offset_of_buffer_end) return new_pos; - // file_offset_of_buffer_end corresponds to working_buffer.end(); it's a past-the-end pos, - // so the second inequality is strict. + /// file_offset_of_buffer_end corresponds to working_buffer.end(); it's a past-the-end pos, + /// so the second inequality is strict. if (file_offset_of_buffer_end - working_buffer.size() <= static_cast(new_pos) && new_pos < file_offset_of_buffer_end) { @@ -142,19 +142,31 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) } else { + size_t seek_pos = required_alignment > 1 + ? new_pos / required_alignment * required_alignment + : new_pos; + + size_t offset_after_seek_pos = new_pos - seek_pos; + + assert(new_offset_in_buffer < required_alignment); + ProfileEvents::increment(ProfileEvents::Seek); Stopwatch watch(profile_callback ? clock_type : CLOCK_MONOTONIC); pos = working_buffer.end(); - off_t res = ::lseek(fd, new_pos, SEEK_SET); + off_t res = ::lseek(fd, seek_pos, SEEK_SET); if (-1 == res) throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), ErrorCodes::CANNOT_SEEK_THROUGH_FILE); - file_offset_of_buffer_end = new_pos; watch.stop(); ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds()); + file_offset_of_buffer_end = new_pos; + + if (offset_after_seek_pos > 0) + ignore(offset_after_seek_pos); + return res; } } diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index 1883c6802bc..5a79193445e 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -14,8 +14,9 @@ namespace DB class ReadBufferFromFileDescriptor : public ReadBufferFromFileBase { protected: + const size_t required_alignment = 0; /// For O_DIRECT both file offsets and memory addresses have to be aligned. + size_t file_offset_of_buffer_end = 0; /// What offset in file corresponds to working_buffer.end(). int fd; - size_t file_offset_of_buffer_end; /// What offset in file corresponds to working_buffer.end(). bool nextImpl() override; @@ -24,7 +25,7 @@ protected: public: ReadBufferFromFileDescriptor(int fd_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, size_t alignment = 0) - : ReadBufferFromFileBase(buf_size, existing_memory, alignment), fd(fd_), file_offset_of_buffer_end(0) {} + : ReadBufferFromFileBase(buf_size, existing_memory, alignment), required_alignment(alignment), fd(fd_) {} int getFD() const { From 1ce40d6629fda992abe3935bde00370ccadf0293 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 04:01:23 +0300 Subject: [PATCH 925/931] Fix bad code (default function argument) --- src/Interpreters/AsynchronousMetrics.cpp | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 6 ++---- src/Storages/MergeTree/MergeTreeData.h | 5 ++--- src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp | 2 +- src/Storages/StorageMergeTree.cpp | 2 +- 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index aca92b8866d..88e5c72a01a 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -1179,7 +1179,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti total_number_of_parts += table_merge_tree->getPartsCount(); } - if (StorageReplicatedMergeTree * table_replicated_merge_tree = dynamic_cast(table.get())) + if (StorageReplicatedMergeTree * table_replicated_merge_tree = typeid_cast(table.get())) { StorageReplicatedMergeTree::Status status; table_replicated_merge_tree->getStatus(status, false); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index f311d58b7af..06838a077f7 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1088,7 +1088,7 @@ static bool isOldPartDirectory(const DiskPtr & disk, const String & directory_pa } -void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_lifetime_seconds) +void MergeTreeData::clearOldTemporaryDirectories(size_t custom_directories_lifetime_seconds) { /// If the method is already called from another thread, then we don't need to do anything. std::unique_lock lock(clear_old_temporary_directories_mutex, std::defer_lock); @@ -1097,9 +1097,7 @@ void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_life const auto settings = getSettings(); time_t current_time = time(nullptr); - ssize_t deadline = (custom_directories_lifetime_seconds >= 0) - ? current_time - custom_directories_lifetime_seconds - : current_time - settings->temporary_directories_lifetime.totalSeconds(); + ssize_t deadline = current_time - custom_directories_lifetime_seconds; /// Delete temporary directories older than a day. for (const auto & [path, disk] : getRelativeDataPathsWithDisks()) diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index a6ece4a7a98..55739a4d009 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -526,9 +526,8 @@ public: void clearOldWriteAheadLogs(); /// Delete all directories which names begin with "tmp" - /// Set non-negative parameter value to override MergeTreeSettings temporary_directories_lifetime - /// Must be called with locked lockForShare() because use relative_data_path. - void clearOldTemporaryDirectories(ssize_t custom_directories_lifetime_seconds = -1); + /// Must be called with locked lockForShare() because it's using relative_data_path. + void clearOldTemporaryDirectories(size_t custom_directories_lifetime_seconds); void clearEmptyParts(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 10e2d77eb27..06856c73888 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -62,7 +62,7 @@ void ReplicatedMergeTreeCleanupThread::iterate() /// Both use relative_data_path which changes during rename, so we /// do it under share lock storage.clearOldWriteAheadLogs(); - storage.clearOldTemporaryDirectories(); + storage.clearOldTemporaryDirectories(storage.getSettings()->temporary_directories_lifetime.totalSeconds()); } /// This is loose condition: no problem if we actually had lost leadership at this moment diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 55ccd60ea38..05d18e65068 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1068,7 +1068,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(IBackgroundJobExecutor & execut /// All use relative_data_path which changes during rename /// so execute under share lock. clearOldPartsFromFilesystem(); - clearOldTemporaryDirectories(); + clearOldTemporaryDirectories(getSettings()->temporary_directories_lifetime.totalSeconds()); clearOldWriteAheadLogs(); clearOldMutations(); clearEmptyParts(); From 2d02a3a45dfc9fd801f93bc8e781e78c8f5aa04e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 05:43:54 +0300 Subject: [PATCH 926/931] Fix errors --- src/IO/ReadBufferFromFileDescriptor.cpp | 2 +- src/IO/createReadBufferFromFileBase.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 4a3194e99c8..093c160730f 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -162,7 +162,7 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) watch.stop(); ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds()); - file_offset_of_buffer_end = new_pos; + file_offset_of_buffer_end = seek_pos; if (offset_after_seek_pos > 0) ignore(offset_after_seek_pos); diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 4882b4a2068..11a0937ee48 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -56,10 +56,10 @@ std::unique_ptr createReadBufferFromFileBase( auto align_up = [=](size_t value) { return (value + min_alignment - 1) / min_alignment * min_alignment; }; - if (alignment % min_alignment) - { + if (alignment == 0) + alignment = min_alignment; + else if (alignment % min_alignment) alignment = align_up(alignment); - } if (buffer_size % min_alignment) { From d3cdae121125cc37427d5d7ef678c53e4c8d47f4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 22:07:17 +0300 Subject: [PATCH 927/931] Fix build --- src/IO/ReadBufferFromFileDescriptor.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 093c160730f..c3b7f33f533 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -148,8 +148,6 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) size_t offset_after_seek_pos = new_pos - seek_pos; - assert(new_offset_in_buffer < required_alignment); - ProfileEvents::increment(ProfileEvents::Seek); Stopwatch watch(profile_callback ? clock_type : CLOCK_MONOTONIC); From 7e932c2504a0c382a39b3bc0f270af4c4f832c36 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Jul 2021 22:07:46 +0300 Subject: [PATCH 928/931] Fix Arcadia --- src/IO/ya.make | 1 - 1 file changed, 1 deletion(-) diff --git a/src/IO/ya.make b/src/IO/ya.make index d8bdfa95295..bca108ca426 100644 --- a/src/IO/ya.make +++ b/src/IO/ya.make @@ -44,7 +44,6 @@ SRCS( NullWriteBuffer.cpp PeekableReadBuffer.cpp Progress.cpp - ReadBufferAIO.cpp ReadBufferFromFile.cpp ReadBufferFromFileBase.cpp ReadBufferFromFileDecorator.cpp From 9e4f6a01ac5bf519b1dbfc0025ad8b219f4ceb59 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 11 Jul 2021 22:53:38 +0300 Subject: [PATCH 929/931] Update version_date.tsv after release 20.8.19.4 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index c46c393c630..afa6b9c8a25 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -71,6 +71,7 @@ v20.9.5.5-stable 2020-11-13 v20.9.4.76-stable 2020-10-29 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 +v20.8.19.4-stable 2021-07-10 v20.8.18.32-lts 2021-04-16 v20.8.17.25-lts 2021-04-08 v20.8.16.20-lts 2021-04-06 From e0effad4d108efa96239bfc651e72df0fe72560b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Jul 2021 03:24:38 +0300 Subject: [PATCH 930/931] Fix tiny issue --- src/IO/ReadBufferFromFileDescriptor.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index c3b7f33f533..d124a4bad66 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -181,6 +181,7 @@ void ReadBufferFromFileDescriptor::rewind() /// Clearing the buffer with existing data. New data will be read on subsequent call to 'next'. working_buffer.resize(0); pos = working_buffer.begin(); + file_offset_of_buffer_end = 0; } From 822cc0fec3e8450f98f2cbfcd8a598b9f74bf800 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Jul 2021 04:12:34 +0300 Subject: [PATCH 931/931] Lowered amount of syscalls in AsynchronousMetrics --- src/IO/ReadBufferFromFile.h | 14 ++++++ src/IO/ReadBufferFromFileDescriptor.cpp | 63 +++++++++++++++++------- src/IO/ReadBufferFromFileDescriptor.h | 24 +++++++-- src/Interpreters/AsynchronousMetrics.cpp | 22 ++++----- src/Interpreters/AsynchronousMetrics.h | 24 ++++----- 5 files changed, 102 insertions(+), 45 deletions(-) diff --git a/src/IO/ReadBufferFromFile.h b/src/IO/ReadBufferFromFile.h index 33365bc7ceb..676f53afeb8 100644 --- a/src/IO/ReadBufferFromFile.h +++ b/src/IO/ReadBufferFromFile.h @@ -46,4 +46,18 @@ public: } }; + +/** Similar to ReadBufferFromFile but it is using 'pread' instead of 'read'. + */ +class ReadBufferFromFilePRead : public ReadBufferFromFile +{ +public: + ReadBufferFromFilePRead(const std::string & file_name_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, int flags = -1, + char * existing_memory = nullptr, size_t alignment = 0) + : ReadBufferFromFile(file_name_, buf_size, flags, existing_memory, alignment) + { + use_pread = true; + } +}; + } diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index d124a4bad66..fdb538d4a49 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -59,7 +59,11 @@ bool ReadBufferFromFileDescriptor::nextImpl() ssize_t res = 0; { CurrentMetrics::Increment metric_increment{CurrentMetrics::Read}; - res = ::read(fd, internal_buffer.begin(), internal_buffer.size()); + + if (use_pread) + res = ::pread(fd, internal_buffer.begin(), internal_buffer.size(), file_offset_of_buffer_end); + else + res = ::read(fd, internal_buffer.begin(), internal_buffer.size()); } if (!res) break; @@ -133,7 +137,8 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) if (file_offset_of_buffer_end - working_buffer.size() <= static_cast(new_pos) && new_pos < file_offset_of_buffer_end) { - /// Position is still inside buffer. + /// Position is still inside the buffer. + pos = working_buffer.end() - file_offset_of_buffer_end + new_pos; assert(pos >= working_buffer.begin()); assert(pos < working_buffer.end()); @@ -142,41 +147,61 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) } else { - size_t seek_pos = required_alignment > 1 + /// Position is out of the buffer, we need to do real seek. + off_t seek_pos = required_alignment > 1 ? new_pos / required_alignment * required_alignment : new_pos; - size_t offset_after_seek_pos = new_pos - seek_pos; - - ProfileEvents::increment(ProfileEvents::Seek); - Stopwatch watch(profile_callback ? clock_type : CLOCK_MONOTONIC); + off_t offset_after_seek_pos = new_pos - seek_pos; + /// First put position at the end of the buffer so the next read will fetch new data to the buffer. pos = working_buffer.end(); - off_t res = ::lseek(fd, seek_pos, SEEK_SET); - if (-1 == res) - throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); - watch.stop(); - ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds()); + /// In case of using 'pread' we just update the info about the next position in file. + /// In case of using 'read' we call 'lseek'. + + /// We account both cases as seek event as it leads to non-contiguous reads from file. + ProfileEvents::increment(ProfileEvents::Seek); + + if (!use_pread) + { + Stopwatch watch(profile_callback ? clock_type : CLOCK_MONOTONIC); + + off_t res = ::lseek(fd, seek_pos, SEEK_SET); + if (-1 == res) + throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + + /// Also note that seeking past the file size is not allowed. + if (res != seek_pos) + throw Exception(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, + "The 'lseek' syscall returned value ({}) that is not expected ({})", res, seek_pos); + + watch.stop(); + ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds()); + } file_offset_of_buffer_end = seek_pos; if (offset_after_seek_pos > 0) ignore(offset_after_seek_pos); - return res; + return seek_pos; } } void ReadBufferFromFileDescriptor::rewind() { - ProfileEvents::increment(ProfileEvents::Seek); - off_t res = ::lseek(fd, 0, SEEK_SET); - if (-1 == res) - throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + if (!use_pread) + { + ProfileEvents::increment(ProfileEvents::Seek); + off_t res = ::lseek(fd, 0, SEEK_SET); + if (-1 == res) + throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + } + /// In case of pread, the ProfileEvents::Seek is not accounted, but it's Ok. /// Clearing the buffer with existing data. New data will be read on subsequent call to 'next'. working_buffer.resize(0); diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index 5a79193445e..84970820abf 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -14,8 +14,10 @@ namespace DB class ReadBufferFromFileDescriptor : public ReadBufferFromFileBase { protected: - const size_t required_alignment = 0; /// For O_DIRECT both file offsets and memory addresses have to be aligned. - size_t file_offset_of_buffer_end = 0; /// What offset in file corresponds to working_buffer.end(). + const size_t required_alignment = 0; /// For O_DIRECT both file offsets and memory addresses have to be aligned. + bool use_pread = false; /// To access one fd from multiple threads, use 'pread' syscall instead of 'read'. + + size_t file_offset_of_buffer_end = 0; /// What offset in file corresponds to working_buffer.end(). int fd; bool nextImpl() override; @@ -25,7 +27,9 @@ protected: public: ReadBufferFromFileDescriptor(int fd_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, size_t alignment = 0) - : ReadBufferFromFileBase(buf_size, existing_memory, alignment), required_alignment(alignment), fd(fd_) {} + : ReadBufferFromFileBase(buf_size, existing_memory, alignment), required_alignment(alignment), fd(fd_) + { + } int getFD() const { @@ -46,9 +50,23 @@ public: off_t size(); void setProgressCallback(ContextPtr context); + private: /// Assuming file descriptor supports 'select', check that we have data to read or wait until timeout. bool poll(size_t timeout_microseconds); }; + +/** Similar to ReadBufferFromFileDescriptor but it is using 'pread' allowing multiple concurrent reads from the same fd. + */ +class ReadBufferFromFileDescriptorPRead : public ReadBufferFromFileDescriptor +{ +public: + ReadBufferFromFileDescriptorPRead(int fd_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, size_t alignment = 0) + : ReadBufferFromFileDescriptor(fd_, buf_size, existing_memory, alignment) + { + use_pread = true; + } +}; + } diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 88e5c72a01a..da514759eb5 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -48,7 +48,7 @@ namespace ErrorCodes static constexpr size_t small_buffer_size = 4096; -static void openFileIfExists(const char * filename, std::optional & out) +static void openFileIfExists(const char * filename, std::optional & out) { /// Ignoring time of check is not time of use cases, as procfs/sysfs files are fairly persistent. @@ -57,11 +57,11 @@ static void openFileIfExists(const char * filename, std::optional openFileIfExists(const std::string & filename) +static std::unique_ptr openFileIfExists(const std::string & filename) { std::error_code ec; if (std::filesystem::is_regular_file(filename, ec)) - return std::make_unique(filename, small_buffer_size); + return std::make_unique(filename, small_buffer_size); return {}; } @@ -89,7 +89,7 @@ AsynchronousMetrics::AsynchronousMetrics( for (size_t thermal_device_index = 0;; ++thermal_device_index) { - std::unique_ptr file = openFileIfExists(fmt::format("/sys/class/thermal/thermal_zone{}/temp", thermal_device_index)); + std::unique_ptr file = openFileIfExists(fmt::format("/sys/class/thermal/thermal_zone{}/temp", thermal_device_index)); if (!file) { /// Sometimes indices are from zero sometimes from one. @@ -113,7 +113,7 @@ AsynchronousMetrics::AsynchronousMetrics( } String hwmon_name; - ReadBufferFromFile hwmon_name_in(hwmon_name_file, small_buffer_size); + ReadBufferFromFilePRead hwmon_name_in(hwmon_name_file, small_buffer_size); readText(hwmon_name, hwmon_name_in); std::replace(hwmon_name.begin(), hwmon_name.end(), ' ', '_'); @@ -134,14 +134,14 @@ AsynchronousMetrics::AsynchronousMetrics( break; } - std::unique_ptr file = openFileIfExists(sensor_value_file); + std::unique_ptr file = openFileIfExists(sensor_value_file); if (!file) continue; String sensor_name; if (sensor_name_file_exists) { - ReadBufferFromFile sensor_name_in(sensor_name_file, small_buffer_size); + ReadBufferFromFilePRead sensor_name_in(sensor_name_file, small_buffer_size); readText(sensor_name, sensor_name_in); std::replace(sensor_name.begin(), sensor_name.end(), ' ', '_'); } @@ -184,7 +184,7 @@ AsynchronousMetrics::AsynchronousMetrics( if (device_name.starts_with("loop")) continue; - std::unique_ptr file = openFileIfExists(device_dir.path() / "stat"); + std::unique_ptr file = openFileIfExists(device_dir.path() / "stat"); if (!file) continue; @@ -1021,7 +1021,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti { try { - ReadBufferFromFile & in = *thermal[i]; + ReadBufferFromFilePRead & in = *thermal[i]; in.rewind(); Int64 temperature = 0; @@ -1065,7 +1065,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti { if (edac[i].first) { - ReadBufferFromFile & in = *edac[i].first; + ReadBufferFromFilePRead & in = *edac[i].first; in.rewind(); uint64_t errors = 0; readText(errors, in); @@ -1074,7 +1074,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti if (edac[i].second) { - ReadBufferFromFile & in = *edac[i].second; + ReadBufferFromFilePRead & in = *edac[i].second; in.rewind(); uint64_t errors = 0; readText(errors, in); diff --git a/src/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h index 606d117e605..07e117c4dd9 100644 --- a/src/Interpreters/AsynchronousMetrics.h +++ b/src/Interpreters/AsynchronousMetrics.h @@ -82,25 +82,25 @@ private: #if defined(OS_LINUX) MemoryStatisticsOS memory_stat; - std::optional meminfo; - std::optional loadavg; - std::optional proc_stat; - std::optional cpuinfo; - std::optional file_nr; - std::optional uptime; - std::optional net_dev; + std::optional meminfo; + std::optional loadavg; + std::optional proc_stat; + std::optional cpuinfo; + std::optional file_nr; + std::optional uptime; + std::optional net_dev; - std::vector> thermal; + std::vector> thermal; std::unordered_map>> hwmon_devices; + std::unique_ptr>> hwmon_devices; std::vector /* correctable errors */, - std::unique_ptr /* uncorrectable errors */>> edac; + std::unique_ptr /* correctable errors */, + std::unique_ptr /* uncorrectable errors */>> edac; - std::unordered_map> block_devs; + std::unordered_map> block_devs; /// TODO: socket statistics.